diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9a07c4a7..de435b35 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -137,60 +137,6 @@ jobs: run: | make test-e2e-cache-btc-timestamping - e2e-run-btc-timestamping-phase-2-hermes: - needs: [e2e-docker-build-babylon] - runs-on: ubuntu-22.04 - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - name: Download babylon artifact - uses: actions/download-artifact@v4 - with: - name: babylond-${{ github.sha }} - path: /tmp - - name: Docker load babylond - run: | - docker load < /tmp/docker-babylond.tar.gz - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Cache Go - uses: actions/setup-go@v5 - with: - go-version: 1.21 - - name: Run e2e TestBTCTimestampingPhase2HermesTestSuite - run: | - make test-e2e-cache-btc-timestamping-phase-2-hermes - - e2e-run-btc-timestamping-phase-2-rly: - needs: [e2e-docker-build-babylon] - runs-on: ubuntu-22.04 - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - name: Download babylon artifact - uses: actions/download-artifact@v4 - with: - name: babylond-${{ github.sha }} - path: /tmp - - name: Docker load babylond - run: | - docker load < /tmp/docker-babylond.tar.gz - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Cache Go - uses: actions/setup-go@v5 - with: - go-version: 1.21 - - name: Run e2e TestBTCTimestampingPhase2RlyTestSuite - run: | - make test-e2e-cache-btc-timestamping-phase-2-rly - e2e-run-btc-staking: needs: [e2e-docker-build-babylon] runs-on: ubuntu-22.04 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b64f1d5..e95dc2d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,8 +37,28 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## Unreleased +### State Machine Breaking + +* [#107](https://github.com/babylonlabs-io/babylon/pull/107) Implement ADR-027 and +enable in-protocol minimum gas price +* [#103](https://github.com/babylonlabs-io/babylon/pull/103) Add token distribution +to upgrade handler and rename `signet-launch` to `v1` +* [#55](https://github.com/babylonlabs-io/babylon/pull/55) Remove `x/zoneconcierge` +module + +### Bug fixes + +### Misc Improvements + +* [#106](https://github.com/babylonlabs-io/babylon/pull/106) Add CLI command for + querying signing info of finality providers. + +## v0.10.1 + ### Bug Fixes +* [#91](https://github.com/babylonlabs-io/babylon/pull/91) fix testnet command +by add ibc default gen state and min gas price specification of `1ubbn` * [#93](https://github.com/babylonlabs-io/babylon/pull/93) fix genesis epoch initialization. diff --git a/app/ante.go b/app/ante.go deleted file mode 100644 index 1cd6b9de..00000000 --- a/app/ante.go +++ /dev/null @@ -1,24 +0,0 @@ -package app - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// WrappedAnteHandler is the wrapped AnteHandler that implements the `AnteDecorator` interface, which has a single function `AnteHandle`. -// It allows us to chain an existing AnteHandler with other decorators by using `sdk.ChainAnteDecorators`. -type WrappedAnteHandler struct { - ah sdk.AnteHandler -} - -// NewWrappedAnteHandler creates a new WrappedAnteHandler for a given AnteHandler. -func NewWrappedAnteHandler(ah sdk.AnteHandler) WrappedAnteHandler { - return WrappedAnteHandler{ah} -} - -func (wah WrappedAnteHandler) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (newCtx sdk.Context, err error) { - newCtx, err = wah.ah(ctx, tx, simulate) - if err != nil { - return newCtx, err - } - return next(newCtx, tx, simulate) -} diff --git a/app/ante/ante.go b/app/ante/ante.go new file mode 100644 index 00000000..cd7b83a8 --- /dev/null +++ b/app/ante/ante.go @@ -0,0 +1,91 @@ +package ante + +import ( + "cosmossdk.io/core/store" + circuitkeeper "cosmossdk.io/x/circuit/keeper" + txsigning "cosmossdk.io/x/tx/signing" + wasmapp "github.com/CosmWasm/wasmd/app" + wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" + wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + bbn "github.com/babylonlabs-io/babylon/types" + btcckeeper "github.com/babylonlabs-io/babylon/x/btccheckpoint/keeper" + epochingkeeper "github.com/babylonlabs-io/babylon/x/epoching/keeper" + sdk "github.com/cosmos/cosmos-sdk/types" + authante "github.com/cosmos/cosmos-sdk/x/auth/ante" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + ibckeeper "github.com/cosmos/ibc-go/v8/modules/core/keeper" +) + +// NewAnteHandler creates a new AnteHandler for the Babylon chain. +func NewAnteHandler( + accountKeeper authante.AccountKeeper, + bankKeeper authtypes.BankKeeper, + feegrantKeeper authante.FeegrantKeeper, + signModeHandler *txsigning.HandlerMap, + ibcKeeper *ibckeeper.Keeper, + wasmConfig *wasmtypes.WasmConfig, + wasmKeeper *wasmkeeper.Keeper, + circuitKeeper *circuitkeeper.Keeper, + epochingKeeper *epochingkeeper.Keeper, + btcConfig *bbn.BtcConfig, + btccKeeper *btcckeeper.Keeper, + txCounterStoreService store.KVStoreService, +) sdk.AnteHandler { + // initialize AnteHandler, which includes + // - authAnteHandler + // - custom wasm ante handler NewLimitSimulationGasDecorator and NewCountTXDecorator + // - Extra decorators introduced in Babylon, such as DropValidatorMsgDecorator that delays validator-related messages + // + // We are using constructor from wasmapp as it introduces custom wasm ante handle decorators + // early in chain of ante handlers. + authAnteHandler, err := wasmapp.NewAnteHandler( + wasmapp.HandlerOptions{ + HandlerOptions: authante.HandlerOptions{ + AccountKeeper: accountKeeper, + BankKeeper: bankKeeper, + SignModeHandler: signModeHandler, + FeegrantKeeper: feegrantKeeper, + SigGasConsumer: authante.DefaultSigVerificationGasConsumer, + // CheckTxFeeWithGlobalMinGasPrices will enforce the global minimum + // gas price for all transactions. + TxFeeChecker: CheckTxFeeWithGlobalMinGasPrices, + }, + IBCKeeper: ibcKeeper, + WasmConfig: wasmConfig, + TXCounterStoreService: txCounterStoreService, + WasmKeeper: wasmKeeper, + CircuitKeeper: circuitKeeper, + }, + ) + + if err != nil { + panic(err) + } + + anteHandler := sdk.ChainAnteDecorators( + NewWrappedAnteHandler(authAnteHandler), + epochingkeeper.NewDropValidatorMsgDecorator(epochingKeeper), + NewBtcValidationDecorator(btcConfig, btccKeeper), + ) + + return anteHandler +} + +// WrappedAnteHandler is the wrapped AnteHandler that implements the `AnteDecorator` interface, which has a single function `AnteHandle`. +// It allows us to chain an existing AnteHandler with other decorators by using `sdk.ChainAnteDecorators`. +type WrappedAnteHandler struct { + ah sdk.AnteHandler +} + +// NewWrappedAnteHandler creates a new WrappedAnteHandler for a given AnteHandler. +func NewWrappedAnteHandler(ah sdk.AnteHandler) WrappedAnteHandler { + return WrappedAnteHandler{ah} +} + +func (wah WrappedAnteHandler) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (newCtx sdk.Context, err error) { + newCtx, err = wah.ah(ctx, tx, simulate) + if err != nil { + return newCtx, err + } + return next(newCtx, tx, simulate) +} diff --git a/app/ante_btc_validation_decorator.go b/app/ante/ante_btc_validation_decorator.go similarity index 96% rename from app/ante_btc_validation_decorator.go rename to app/ante/ante_btc_validation_decorator.go index 8e5febfb..ffe7d21c 100644 --- a/app/ante_btc_validation_decorator.go +++ b/app/ante/ante_btc_validation_decorator.go @@ -1,4 +1,4 @@ -package app +package ante import ( bbn "github.com/babylonlabs-io/babylon/types" @@ -9,12 +9,12 @@ import ( ) type BtcValidationDecorator struct { - BtcCfg bbn.BtcConfig + BtcCfg *bbn.BtcConfig btccheckpointKeeper *btccheckpointkeeper.Keeper } func NewBtcValidationDecorator( - cfg bbn.BtcConfig, + cfg *bbn.BtcConfig, k *btccheckpointkeeper.Keeper, ) BtcValidationDecorator { return BtcValidationDecorator{ diff --git a/app/ante/fee_checker.go b/app/ante/fee_checker.go new file mode 100644 index 00000000..48118af2 --- /dev/null +++ b/app/ante/fee_checker.go @@ -0,0 +1,66 @@ +package ante + +import ( + "fmt" + + errors "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + appparams "github.com/babylonlabs-io/babylon/app/params" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerror "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + // priorityScalingFactor is a scaling factor to convert the gas price to a priority. + priorityScalingFactor = 1_000_000 +) + +// CheckTxFeeWithGlobalMinGasPrices implements the default fee logic, where the minimum price per +// unit of gas is fixed and set globally, and the tx priority is computed from the gas price. +// adapted from https://github.com/celestiaorg/celestia-app/pull/2985 +func CheckTxFeeWithGlobalMinGasPrices(ctx sdk.Context, tx sdk.Tx) (sdk.Coins, int64, error) { + feeTx, ok := tx.(sdk.FeeTx) + if !ok { + return nil, 0, errors.Wrap(sdkerror.ErrTxDecode, "Tx must be a FeeTx") + } + + denom := appparams.DefaultBondDenom + + fee := feeTx.GetFee().AmountOf(denom) + gas := feeTx.GetGas() + + // convert the global minimum gas price to a big.Int + globalMinGasPrice, err := sdkmath.LegacyNewDecFromStr(fmt.Sprintf("%f", appparams.GlobalMinGasPrice)) + if err != nil { + return nil, 0, errors.Wrap(err, "invalid GlobalMinGasPrice") + } + + gasInt := sdkmath.NewIntFromUint64(gas) + minFee := globalMinGasPrice.MulInt(gasInt).RoundInt() + + if !fee.GTE(minFee) { + return nil, 0, errors.Wrapf(sdkerror.ErrInsufficientFee, "insufficient fees; got: %s required: %s", fee, minFee) + } + + priority := getTxPriority(feeTx.GetFee(), int64(gas)) + return feeTx.GetFee(), priority, nil +} + +// getTxPriority returns a naive tx priority based on the amount of the smallest denomination of the gas price +// provided in a transaction. +// NOTE: This implementation should not be used for txs with multiple coins. +func getTxPriority(fee sdk.Coins, gas int64) int64 { + var priority int64 + for _, c := range fee { + p := c.Amount.Mul(sdkmath.NewInt(priorityScalingFactor)).QuoRaw(gas) + if !p.IsInt64() { + continue + } + // take the lowest priority as the tx priority + if priority == 0 || p.Int64() < priority { + priority = p.Int64() + } + } + + return priority +} diff --git a/app/ante/fee_checker_test.go b/app/ante/fee_checker_test.go new file mode 100644 index 00000000..35d02516 --- /dev/null +++ b/app/ante/fee_checker_test.go @@ -0,0 +1,100 @@ +package ante_test + +import ( + "math" + "testing" + + bbnapp "github.com/babylonlabs-io/babylon/app" + "github.com/babylonlabs-io/babylon/app/ante" + appparams "github.com/babylonlabs-io/babylon/app/params" + "github.com/babylonlabs-io/babylon/testutil/datagen" + sdk "github.com/cosmos/cosmos-sdk/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/stretchr/testify/require" +) + +// TestCheckTxFeeWithGlobalMinGasPrices tests the CheckTxFeeWithGlobalMinGasPrices +// function +// adapted from https://github.com/celestiaorg/celestia-app/pull/2985 +func TestCheckTxFeeWithGlobalMinGasPrices(t *testing.T) { + encCfg := bbnapp.GetEncodingConfig() + + builder := encCfg.TxConfig.NewTxBuilder() + err := builder.SetMsgs( + banktypes.NewMsgSend( + datagen.GenRandomAccount().GetAddress(), + datagen.GenRandomAccount().GetAddress(), + sdk.NewCoins(sdk.NewInt64Coin(appparams.DefaultBondDenom, 10)), + ), + ) + require.NoError(t, err) + + feeAmount := int64(1000) + ctx := sdk.Context{} + + testCases := []struct { + name string + fee sdk.Coins + gasLimit uint64 + appVersion uint64 + expErr bool + }{ + { + name: "bad tx; fee below required minimum", + fee: sdk.NewCoins(sdk.NewInt64Coin(appparams.DefaultBondDenom, feeAmount-1)), + gasLimit: uint64(float64(feeAmount) / appparams.GlobalMinGasPrice), + appVersion: uint64(2), + expErr: true, + }, + { + name: "good tx; fee equal to required minimum", + fee: sdk.NewCoins(sdk.NewInt64Coin(appparams.DefaultBondDenom, feeAmount)), + gasLimit: uint64(float64(feeAmount) / appparams.GlobalMinGasPrice), + appVersion: uint64(2), + expErr: false, + }, + { + name: "good tx; fee above required minimum", + fee: sdk.NewCoins(sdk.NewInt64Coin(appparams.DefaultBondDenom, feeAmount+1)), + gasLimit: uint64(float64(feeAmount) / appparams.GlobalMinGasPrice), + appVersion: uint64(2), + expErr: false, + }, + { + name: "good tx; gas limit and fee are maximum values", + fee: sdk.NewCoins(sdk.NewInt64Coin(appparams.DefaultBondDenom, math.MaxInt64)), + gasLimit: math.MaxUint64, + appVersion: uint64(2), + expErr: false, + }, + { + name: "bad tx; gas limit and fee are 0", + fee: sdk.NewCoins(sdk.NewInt64Coin(appparams.DefaultBondDenom, 0)), + gasLimit: 0, + appVersion: uint64(2), + expErr: false, + }, + { + name: "good tx; minFee = 0.8, rounds up to 1", + fee: sdk.NewCoins(sdk.NewInt64Coin(appparams.DefaultBondDenom, feeAmount)), + gasLimit: 400, + appVersion: uint64(2), + expErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + builder.SetGasLimit(tc.gasLimit) + builder.SetFeeAmount(tc.fee) + tx := builder.GetTx() + + _, _, err := ante.CheckTxFeeWithGlobalMinGasPrices(ctx, tx) + if tc.expErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/app/ante/get_tx_priority_test.go b/app/ante/get_tx_priority_test.go new file mode 100644 index 00000000..d4859dd1 --- /dev/null +++ b/app/ante/get_tx_priority_test.go @@ -0,0 +1,60 @@ +package ante + +import ( + "testing" + + appparams "github.com/babylonlabs-io/babylon/app/params" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/assert" +) + +// TestGetTxPriority tests the getTxPriority function +// adapted from https://github.com/celestiaorg/celestia-app/pull/2985 +func TestGetTxPriority(t *testing.T) { + denom := appparams.DefaultBondDenom + + cases := []struct { + name string + fee sdk.Coins + gas int64 + expectedPri int64 + }{ + { + name: "1 bbn fee large gas", + fee: sdk.NewCoins(sdk.NewInt64Coin(denom, 1_000_000)), + gas: 1000000, + expectedPri: 1000000, + }, + { + name: "1 ubbn fee small gas", + fee: sdk.NewCoins(sdk.NewInt64Coin(denom, 1)), + gas: 1, + expectedPri: 1000000, + }, + { + name: "2 ubbn fee small gas", + fee: sdk.NewCoins(sdk.NewInt64Coin(denom, 2)), + gas: 1, + expectedPri: 2000000, + }, + { + name: "1_000_000 bbn fee normal gas tx", + fee: sdk.NewCoins(sdk.NewInt64Coin(denom, 1_000_000_000_000)), + gas: 75000, + expectedPri: 13333333333333, + }, + { + name: "0.001 ubbn gas price", + fee: sdk.NewCoins(sdk.NewInt64Coin(denom, 1_000)), + gas: 1_000_000, + expectedPri: 1000, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + pri := getTxPriority(tc.fee, tc.gas) + assert.Equal(t, tc.expectedPri, pri) + }) + } +} diff --git a/app/app.go b/app/app.go index 8eadadc7..601f0eda 100644 --- a/app/app.go +++ b/app/app.go @@ -20,7 +20,6 @@ import ( feegrantmodule "cosmossdk.io/x/feegrant/module" "cosmossdk.io/x/upgrade" upgradetypes "cosmossdk.io/x/upgrade/types" - wasmapp "github.com/CosmWasm/wasmd/app" "github.com/CosmWasm/wasmd/x/wasm" wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" @@ -47,7 +46,6 @@ import ( "github.com/cosmos/cosmos-sdk/types/msgservice" "github.com/cosmos/cosmos-sdk/version" "github.com/cosmos/cosmos-sdk/x/auth" - "github.com/cosmos/cosmos-sdk/x/auth/ante" authcodec "github.com/cosmos/cosmos-sdk/x/auth/codec" authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" @@ -93,6 +91,7 @@ import ( ibctm "github.com/cosmos/ibc-go/v8/modules/light-clients/07-tendermint" "github.com/spf13/cast" + "github.com/babylonlabs-io/babylon/app/ante" "github.com/babylonlabs-io/babylon/app/upgrades" bbn "github.com/babylonlabs-io/babylon/types" @@ -108,7 +107,6 @@ import ( "github.com/babylonlabs-io/babylon/x/checkpointing" checkpointingtypes "github.com/babylonlabs-io/babylon/x/checkpointing/types" "github.com/babylonlabs-io/babylon/x/epoching" - epochingkeeper "github.com/babylonlabs-io/babylon/x/epoching/keeper" epochingtypes "github.com/babylonlabs-io/babylon/x/epoching/types" "github.com/babylonlabs-io/babylon/x/finality" finalitytypes "github.com/babylonlabs-io/babylon/x/finality/types" @@ -116,9 +114,6 @@ import ( incentivetypes "github.com/babylonlabs-io/babylon/x/incentive/types" "github.com/babylonlabs-io/babylon/x/monitor" monitortypes "github.com/babylonlabs-io/babylon/x/monitor/types" - "github.com/babylonlabs-io/babylon/x/zoneconcierge" - zckeeper "github.com/babylonlabs-io/babylon/x/zoneconcierge/keeper" - zctypes "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" ) const ( @@ -313,7 +308,6 @@ func NewBabylonApp( btccheckpoint.NewAppModule(appCodec, app.BtcCheckpointKeeper), checkpointing.NewAppModule(appCodec, app.CheckpointingKeeper), monitor.NewAppModule(appCodec, app.MonitorKeeper), - zoneconcierge.NewAppModule(appCodec, app.ZoneConciergeKeeper, app.AccountKeeper, app.BankKeeper), // Babylon modules - btc staking btcstaking.NewAppModule(appCodec, app.BTCStakingKeeper), finality.NewAppModule(appCodec, app.FinalityKeeper), @@ -368,7 +362,6 @@ func NewBabylonApp( ibcexported.ModuleName, ibcwasmtypes.ModuleName, ibctransfertypes.ModuleName, - zctypes.ModuleName, ibcfeetypes.ModuleName, wasmtypes.ModuleName, // BTC staking related modules @@ -397,7 +390,6 @@ func NewBabylonApp( ibcexported.ModuleName, ibcwasmtypes.ModuleName, ibctransfertypes.ModuleName, - zctypes.ModuleName, ibcfeetypes.ModuleName, wasmtypes.ModuleName, // BTC staking related modules @@ -430,7 +422,6 @@ func NewBabylonApp( ibcexported.ModuleName, ibcwasmtypes.ModuleName, ibctransfertypes.ModuleName, - zctypes.ModuleName, ibcfeetypes.ModuleName, wasmtypes.ModuleName, // BTC staking related modules @@ -478,38 +469,20 @@ func NewBabylonApp( app.MountTransientStores(app.GetTransientStoreKeys()) app.MountMemoryStores(app.GetMemoryStoreKeys()) - // initialize AnteHandler, which includes - // - authAnteHandler - // - custom wasm ante handler NewLimitSimulationGasDecorator and NewCountTXDecorator - // - Extra decorators introduced in Babylon, such as DropValidatorMsgDecorator that delays validator-related messages - // - // We are using constructor from wasmapp as it introduces custom wasm ante handle decorators - // early in chain of ante handlers. - authAnteHandler, err := wasmapp.NewAnteHandler( - wasmapp.HandlerOptions{ - HandlerOptions: ante.HandlerOptions{ - AccountKeeper: app.AccountKeeper, - BankKeeper: app.BankKeeper, - SignModeHandler: txConfig.SignModeHandler(), - FeegrantKeeper: app.FeeGrantKeeper, - SigGasConsumer: ante.DefaultSigVerificationGasConsumer, - }, - IBCKeeper: app.IBCKeeper, - WasmConfig: &wasmConfig, - TXCounterStoreService: runtime.NewKVStoreService(app.AppKeepers.GetKey(wasmtypes.StoreKey)), - WasmKeeper: &app.WasmKeeper, - CircuitKeeper: &app.CircuitKeeper, - }, - ) - - if err != nil { - panic(err) - } - - anteHandler := sdk.ChainAnteDecorators( - NewWrappedAnteHandler(authAnteHandler), - epochingkeeper.NewDropValidatorMsgDecorator(app.EpochingKeeper), - NewBtcValidationDecorator(btcConfig, &app.BtcCheckpointKeeper), + // initialize AnteHandler for the app + anteHandler := ante.NewAnteHandler( + &app.AccountKeeper, + app.BankKeeper, + &app.FeeGrantKeeper, + txConfig.SignModeHandler(), + app.IBCKeeper, + &wasmConfig, + &app.WasmKeeper, + &app.CircuitKeeper, + &app.EpochingKeeper, + &btcConfig, + &app.BtcCheckpointKeeper, + runtime.NewKVStoreService(app.AppKeepers.GetKey(wasmtypes.StoreKey)), ) // set proposal extension @@ -540,12 +513,6 @@ func NewBabylonApp( app.SetEndBlocker(app.EndBlocker) app.SetAnteHandler(anteHandler) - // set postHandler - postHandler := sdk.ChainPostDecorators( - zckeeper.NewIBCHeaderDecorator(app.ZoneConciergeKeeper), - ) - app.SetPostHandler(postHandler) - // must be before Loading version // requires the snapshot store to be created and registered as a BaseAppOption // see cmd/wasmd/root.go: 206 - 214 approx diff --git a/app/e2e_include_upgrades.go b/app/e2e_include_upgrades.go index 6f5770be..4c6e5818 100644 --- a/app/e2e_include_upgrades.go +++ b/app/e2e_include_upgrades.go @@ -2,12 +2,10 @@ package app -import ( - "github.com/babylonlabs-io/babylon/app/upgrades/signetlaunch" -) +import v1 "github.com/babylonlabs-io/babylon/app/upgrades/v1" // init is used to include signet upgrade used for e2e testing // this file should be removed once the upgrade testing with signet ends. func init() { - Upgrades = append(Upgrades, signetlaunch.Upgrade) + Upgrades = append(Upgrades, v1.Upgrade) } diff --git a/app/keepers/keepers.go b/app/keepers/keepers.go index 17c9a522..0fb61c6b 100644 --- a/app/keepers/keepers.go +++ b/app/keepers/keepers.go @@ -3,7 +3,6 @@ package keepers import ( "path/filepath" - errorsmod "cosmossdk.io/errors" "cosmossdk.io/log" storetypes "cosmossdk.io/store/types" circuitkeeper "cosmossdk.io/x/circuit/keeper" @@ -21,7 +20,6 @@ import ( "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/runtime" servertypes "github.com/cosmos/cosmos-sdk/server/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" authcodec "github.com/cosmos/cosmos-sdk/x/auth/codec" authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" @@ -80,9 +78,6 @@ import ( incentivetypes "github.com/babylonlabs-io/babylon/x/incentive/types" monitorkeeper "github.com/babylonlabs-io/babylon/x/monitor/keeper" monitortypes "github.com/babylonlabs-io/babylon/x/monitor/types" - "github.com/babylonlabs-io/babylon/x/zoneconcierge" - zckeeper "github.com/babylonlabs-io/babylon/x/zoneconcierge/keeper" - zctypes "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" ) // Capabilities of the IBC wasm contracts @@ -129,11 +124,10 @@ type AppKeepers struct { MonitorKeeper monitorkeeper.Keeper // IBC-related modules - IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly - IBCFeeKeeper ibcfeekeeper.Keeper // for relayer incentivization - https://github.com/cosmos/ibc/tree/main/spec/app/ics-029-fee-payment - TransferKeeper ibctransferkeeper.Keeper // for cross-chain fungible token transfers - IBCWasmKeeper ibcwasmkeeper.Keeper // for IBC wasm light clients - ZoneConciergeKeeper zckeeper.Keeper // for cross-chain fungible token transfers + IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly + IBCFeeKeeper ibcfeekeeper.Keeper // for relayer incentivization - https://github.com/cosmos/ibc/tree/main/spec/app/ics-029-fee-payment + TransferKeeper ibctransferkeeper.Keeper // for cross-chain fungible token transfers + IBCWasmKeeper ibcwasmkeeper.Keeper // for IBC wasm light clients // BTC staking related modules BTCStakingKeeper btcstakingkeeper.Keeper @@ -198,7 +192,6 @@ func (ak *AppKeepers) InitKeepers( ibctransfertypes.StoreKey, ibcfeetypes.StoreKey, ibcwasmtypes.StoreKey, - zctypes.StoreKey, // BTC staking related modules btcstakingtypes.StoreKey, finalitytypes.StoreKey, @@ -291,7 +284,6 @@ func (ak *AppKeepers) InitKeepers( // grant capabilities for the ibc and ibc-transfer modules scopedIBCKeeper := ak.CapabilityKeeper.ScopeToModule(ibcexported.ModuleName) scopedTransferKeeper := ak.CapabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName) - scopedZoneConciergeKeeper := ak.CapabilityKeeper.ScopeToModule(zctypes.ModuleName) scopedWasmKeeper := ak.CapabilityKeeper.ScopeToModule(wasmtypes.ModuleName) // Applications that wish to enforce statically created ScopedKeepers should call `Seal` after creating @@ -454,12 +446,6 @@ func (ak *AppKeepers) InitKeepers( authtypes.NewModuleAddress(govtypes.ModuleName).String(), ) - // create querier for KVStore - storeQuerier, ok := bApp.CommitMultiStore().(storetypes.Queryable) - if !ok { - panic(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "multistore doesn't support queries")) - } - ak.IBCFeeKeeper = ibcfeekeeper.NewKeeper( appCodec, keys[ibcfeetypes.StoreKey], ak.IBCKeeper.ChannelKeeper, // may be replaced with IBC middleware @@ -467,25 +453,6 @@ func (ak *AppKeepers) InitKeepers( ak.IBCKeeper.PortKeeper, ak.AccountKeeper, ak.BankKeeper, ) - zcKeeper := zckeeper.NewKeeper( - appCodec, - runtime.NewKVStoreService(keys[zctypes.StoreKey]), - ak.IBCFeeKeeper, - ak.IBCKeeper.ClientKeeper, - ak.IBCKeeper.ChannelKeeper, - ak.IBCKeeper.PortKeeper, - ak.AccountKeeper, - ak.BankKeeper, - &btclightclientKeeper, - &checkpointingKeeper, - &btcCheckpointKeeper, - epochingKeeper, - storeQuerier, - scopedZoneConciergeKeeper, - authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - ak.ZoneConciergeKeeper = *zcKeeper - // Create Transfer Keepers ak.TransferKeeper = ibctransferkeeper.NewKeeper( appCodec, @@ -510,12 +477,12 @@ func (ak *AppKeepers) InitKeepers( epochingKeeper.SetMsgServiceRouter(bApp.MsgServiceRouter()) // make ZoneConcierge and Monitor to subscribe to the epoching's hooks ak.EpochingKeeper = *epochingKeeper.SetHooks( - epochingtypes.NewMultiEpochingHooks(ak.ZoneConciergeKeeper.Hooks(), ak.MonitorKeeper.Hooks()), + epochingtypes.NewMultiEpochingHooks(ak.MonitorKeeper.Hooks()), ) // set up Checkpointing, BTCCheckpoint, and BTCLightclient keepers ak.CheckpointingKeeper = *checkpointingKeeper.SetHooks( - checkpointingtypes.NewMultiCheckpointingHooks(ak.EpochingKeeper.Hooks(), ak.ZoneConciergeKeeper.Hooks(), ak.MonitorKeeper.Hooks()), + checkpointingtypes.NewMultiCheckpointingHooks(ak.EpochingKeeper.Hooks(), ak.MonitorKeeper.Hooks()), ) ak.BtcCheckpointKeeper = btcCheckpointKeeper ak.BTCLightClientKeeper = *btclightclientKeeper.SetHooks( @@ -562,7 +529,7 @@ func (ak *AppKeepers) InitKeepers( // If evidence needs to be handled for the app, set routes in router here and seal ak.EvidenceKeeper = *evidenceKeeper - wasmOpts = append(owasm.RegisterCustomPlugins(&ak.EpochingKeeper, &ak.ZoneConciergeKeeper, &ak.BTCLightClientKeeper), wasmOpts...) + wasmOpts = append(owasm.RegisterCustomPlugins(&ak.EpochingKeeper, &ak.CheckpointingKeeper, &ak.BTCLightClientKeeper), wasmOpts...) ak.WasmKeeper = wasmkeeper.NewKeeper( appCodec, @@ -609,10 +576,6 @@ func (ak *AppKeepers) InitKeepers( transferStack = transfer.NewIBCModule(ak.TransferKeeper) transferStack = ibcfee.NewIBCMiddleware(transferStack, ak.IBCFeeKeeper) - var zoneConciergeStack porttypes.IBCModule - zoneConciergeStack = zoneconcierge.NewIBCModule(ak.ZoneConciergeKeeper) - zoneConciergeStack = ibcfee.NewIBCMiddleware(zoneConciergeStack, ak.IBCFeeKeeper) - var wasmStack porttypes.IBCModule wasmStack = wasm.NewIBCHandler(ak.WasmKeeper, ak.IBCKeeper.ChannelKeeper, ak.IBCFeeKeeper) wasmStack = ibcfee.NewIBCMiddleware(wasmStack, ak.IBCFeeKeeper) @@ -620,7 +583,6 @@ func (ak *AppKeepers) InitKeepers( // Create static IBC router, add ibc-transfer module route, then set and seal it ibcRouter := porttypes.NewRouter(). AddRoute(ibctransfertypes.ModuleName, transferStack). - AddRoute(zctypes.ModuleName, zoneConciergeStack). AddRoute(wasmtypes.ModuleName, wasmStack) // Setting Router will finalize all routes by sealing router @@ -637,7 +599,6 @@ func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino // whole usage of params module paramsKeeper.Subspace(ibcexported.ModuleName) paramsKeeper.Subspace(ibctransfertypes.ModuleName) - paramsKeeper.Subspace(zctypes.ModuleName) return paramsKeeper } diff --git a/app/params/config.go b/app/params/config.go index 3d432267..222752e9 100644 --- a/app/params/config.go +++ b/app/params/config.go @@ -20,6 +20,18 @@ const ( Bech32PrefixAccAddr = "bbn" ) +// taken from https://github.com/celestiaorg/celestia-app/pull/2985 +const ( + // DefaultMinGasPrice is the default min gas price that gets set in the app.toml file. + // The min gas price acts as a filter. Transactions below that limit will not pass + // a nodes `CheckTx` and thus not be proposed by that node. + DefaultMinGasPrice = 0.002 + + // GlobalMinGasPrice is used in the AnteHandler to ensure + // that all transactions have a gas price greater than or equal to this value. + GlobalMinGasPrice = DefaultMinGasPrice +) + var ( // Bech32PrefixAccPub defines the Bech32 prefix of an account's public key. Bech32PrefixAccPub = Bech32PrefixAccAddr + "pub" diff --git a/app/upgrades/signetlaunch/README.md b/app/upgrades/v1/README.md similarity index 74% rename from app/upgrades/signetlaunch/README.md rename to app/upgrades/v1/README.md index 9ac84d3e..ac7fe8bc 100644 --- a/app/upgrades/signetlaunch/README.md +++ b/app/upgrades/v1/README.md @@ -5,9 +5,13 @@ DO NOT USE IN PRODUCTION! ## Compile signet launch upgrade -This upgrade loads 2 JSONs from strings in different files. -BTC Headers from `./data_btc_headers.go` and signed messages -to create finality providers `./data_signed_fps.go`. +This upgrade loads 5 JSONs from strings in different files. + +- BTC Headers at `./data_btc_headers.go` +- Finality Providers signed messages at`./data_signed_fps.go` +- Tokens distribution at `./data_token_distribution.go` +- BTC Staking Parameters `./btcstaking_params.go` +- Finality Parameters `./finality_params.go` ### BTC Headers @@ -55,5 +59,16 @@ Phase-1 will need to provider a signed [MsgCreateFinalityProvider](../../../x/btcstaking/types/tx.pb.go#38) as a json file message inside the networks repository registry. - +### Tokens distribution + +During the upgrade, some tokens will be distributed so users and operators can +finish their actions, by example: + +- BTC stakers to finalize their BTC delegation +- Finality providers to submit pub rand and finality +- New Cosmos-SDK validators to decentralize after the upgrade +- Vigilantes +- Covenant Emulators + +> This data for token distribution will be built accordingly with the +data collected during Phase-1. diff --git a/app/upgrades/signetlaunch/btcstaking_params.go b/app/upgrades/v1/btcstaking_params.go similarity index 98% rename from app/upgrades/signetlaunch/btcstaking_params.go rename to app/upgrades/v1/btcstaking_params.go index 649e18bd..79554d7f 100644 --- a/app/upgrades/signetlaunch/btcstaking_params.go +++ b/app/upgrades/v1/btcstaking_params.go @@ -1,4 +1,4 @@ -package signetlaunch +package v1 // TODO Some default parameters. Consider how to switch those depending on network: // mainnet, testnet, devnet etc. diff --git a/app/upgrades/signetlaunch/btcstaking_params_test.go b/app/upgrades/v1/btcstaking_params_test.go similarity index 78% rename from app/upgrades/signetlaunch/btcstaking_params_test.go rename to app/upgrades/v1/btcstaking_params_test.go index 1df82afe..66f88b80 100644 --- a/app/upgrades/signetlaunch/btcstaking_params_test.go +++ b/app/upgrades/v1/btcstaking_params_test.go @@ -1,4 +1,4 @@ -package signetlaunch_test +package v1_test import ( "testing" @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" "github.com/babylonlabs-io/babylon/app" - v1 "github.com/babylonlabs-io/babylon/app/upgrades/signetlaunch" + v1 "github.com/babylonlabs-io/babylon/app/upgrades/v1" ) func TestHardCodedBtcStakingParamsAreValid(t *testing.T) { diff --git a/app/upgrades/signetlaunch/data_btc_headers.go b/app/upgrades/v1/data_btc_headers.go similarity index 99% rename from app/upgrades/signetlaunch/data_btc_headers.go rename to app/upgrades/v1/data_btc_headers.go index 408de35f..843ebad6 100644 --- a/app/upgrades/signetlaunch/data_btc_headers.go +++ b/app/upgrades/v1/data_btc_headers.go @@ -1,4 +1,4 @@ -package signetlaunch +package v1 const NewBtcHeadersStr = `{ "btc_headers": [ diff --git a/app/upgrades/signetlaunch/data_signed_fps.go b/app/upgrades/v1/data_signed_fps.go similarity index 99% rename from app/upgrades/signetlaunch/data_signed_fps.go rename to app/upgrades/v1/data_signed_fps.go index 38c531ae..40d2af7f 100644 --- a/app/upgrades/signetlaunch/data_signed_fps.go +++ b/app/upgrades/v1/data_signed_fps.go @@ -1,4 +1,4 @@ -package signetlaunch +package v1 const SignedFPsStr = `{ "signed_txs_create_fp": [ diff --git a/app/upgrades/signetlaunch/data_signed_fps_test.go b/app/upgrades/v1/data_signed_fps_test.go similarity index 97% rename from app/upgrades/signetlaunch/data_signed_fps_test.go rename to app/upgrades/v1/data_signed_fps_test.go index 338f863c..17531815 100644 --- a/app/upgrades/signetlaunch/data_signed_fps_test.go +++ b/app/upgrades/v1/data_signed_fps_test.go @@ -1,4 +1,4 @@ -package signetlaunch_test +package v1_test import ( "bytes" @@ -7,7 +7,7 @@ import ( "time" "github.com/babylonlabs-io/babylon/app" - v1 "github.com/babylonlabs-io/babylon/app/upgrades/signetlaunch" + v1 "github.com/babylonlabs-io/babylon/app/upgrades/v1" btcstktypes "github.com/babylonlabs-io/babylon/x/btcstaking/types" tmproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cosmos/cosmos-sdk/codec" diff --git a/app/upgrades/v1/data_token_distribution.go b/app/upgrades/v1/data_token_distribution.go new file mode 100644 index 00000000..d8dfa7a3 --- /dev/null +++ b/app/upgrades/v1/data_token_distribution.go @@ -0,0 +1,26 @@ +package v1 + +const TokensDistribution = `{ + "token_distribution": [ + { + "address_sender": "bbn14d97wthm9fqvvdd96ax8lnfppwknndxztevs7k", + "address_receiver": "bbn13t5cnqj6t0p4xa40cwhmgv4wju0zl6g8slk8rz", + "amount": 100000 + }, + { + "address_sender": "bbn10d07y265gmmuvt4z0w9aw880jnsr700jduz5f2", + "address_receiver": "bbn1yl6hdjhmkf37639730gffanpzndzdpmhep8cg6", + "amount": 1500000 + }, + { + "address_sender": "bbn10d07y265gmmuvt4z0w9aw880jnsr700jduz5f2", + "address_receiver": "bbn1k6u5pge8w6lavtmunp02smehr4qtazkw8clg04", + "amount": 700000 + }, + { + "address_sender": "bbn10d07y265gmmuvt4z0w9aw880jnsr700jduz5f2", + "address_receiver": "bbn1dj2c57fjv6md7pzykh9y6h407ln6xxcw090hre", + "amount": 100000 + } + ] +}` diff --git a/app/upgrades/v1/data_token_distribution_test.go b/app/upgrades/v1/data_token_distribution_test.go new file mode 100644 index 00000000..372d64d6 --- /dev/null +++ b/app/upgrades/v1/data_token_distribution_test.go @@ -0,0 +1,27 @@ +package v1_test + +import ( + "testing" + + v1 "github.com/babylonlabs-io/babylon/app/upgrades/v1" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +func TestCheckTokensDistributionFromData(t *testing.T) { + d, err := v1.LoadTokenDistributionFromData() + require.NoError(t, err) + require.Greater(t, len(d.TokenDistribution), 1) + + for _, td := range d.TokenDistribution { + sender, err := sdk.AccAddressFromBech32(td.AddressSender) + require.NoError(t, err) + require.Equal(t, sender.String(), td.AddressSender) + + receiver, err := sdk.AccAddressFromBech32(td.AddressReceiver) + require.NoError(t, err) + require.Equal(t, receiver.String(), td.AddressReceiver) + + require.True(t, td.Amount > 0) + } +} diff --git a/app/upgrades/signetlaunch/finality_params.go b/app/upgrades/v1/finality_params.go similarity index 93% rename from app/upgrades/signetlaunch/finality_params.go rename to app/upgrades/v1/finality_params.go index 08b75dcd..bfd9be95 100644 --- a/app/upgrades/signetlaunch/finality_params.go +++ b/app/upgrades/v1/finality_params.go @@ -1,4 +1,4 @@ -package signetlaunch +package v1 // TODO Some default parameters. Consider how to switch those depending on network: // mainnet, testnet, devnet etc. diff --git a/app/upgrades/signetlaunch/finality_params_test.go b/app/upgrades/v1/finality_params_test.go similarity index 78% rename from app/upgrades/signetlaunch/finality_params_test.go rename to app/upgrades/v1/finality_params_test.go index d6ba29a3..6528bbec 100644 --- a/app/upgrades/signetlaunch/finality_params_test.go +++ b/app/upgrades/v1/finality_params_test.go @@ -1,4 +1,4 @@ -package signetlaunch_test +package v1_test import ( "testing" @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" "github.com/babylonlabs-io/babylon/app" - v1 "github.com/babylonlabs-io/babylon/app/upgrades/signetlaunch" + v1 "github.com/babylonlabs-io/babylon/app/upgrades/v1" ) func TestHardCodedFinalityParamsAreValid(t *testing.T) { diff --git a/app/upgrades/signetlaunch/upgrades.go b/app/upgrades/v1/upgrades.go similarity index 71% rename from app/upgrades/signetlaunch/upgrades.go rename to app/upgrades/v1/upgrades.go index e9d48cfb..13fad658 100644 --- a/app/upgrades/signetlaunch/upgrades.go +++ b/app/upgrades/v1/upgrades.go @@ -1,7 +1,7 @@ // This code is only for testing purposes. // DO NOT USE IN PRODUCTION! -package signetlaunch +package v1 import ( "bytes" @@ -11,12 +11,14 @@ import ( "fmt" "sort" + sdkmath "cosmossdk.io/math" store "cosmossdk.io/store/types" upgradetypes "cosmossdk.io/x/upgrade/types" "github.com/btcsuite/btcd/chaincfg" "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" + bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" "github.com/babylonlabs-io/babylon/app/keepers" appparams "github.com/babylonlabs-io/babylon/app/params" @@ -30,16 +32,31 @@ import ( finalitytypes "github.com/babylonlabs-io/babylon/x/finality/types" ) +const ( + ZoneConciergeStoreKey = "zoneconcierge" +) + var Upgrade = upgrades.Upgrade{ - UpgradeName: "signet-launch", + UpgradeName: "v1", CreateUpgradeHandler: CreateUpgradeHandler, - StoreUpgrades: store.StoreUpgrades{}, + // Upgrade necessary for deletions of `zoneconcierge` + StoreUpgrades: store.StoreUpgrades{ + Deleted: []string{ZoneConciergeStoreKey}, + }, } type DataSignedFps struct { SignedTxsFP []any `json:"signed_txs_create_fp"` } +type DataTokenDistribution struct { + TokenDistribution []struct { + AddressSender string `json:"address_sender"` + AddressReceiver string `json:"address_receiver"` + Amount int64 `json:"amount"` + } `json:"token_distribution"` +} + // CreateUpgradeHandler upgrade handler for launch. func CreateUpgradeHandler( mm *module.Manager, @@ -63,7 +80,7 @@ func CreateUpgradeHandler( panic(err) } - if err := propLaunch(ctx, keepers.EncCfg, &keepers.BTCLightClientKeeper, &keepers.BTCStakingKeeper); err != nil { + if err := upgradeLaunch(ctx, keepers.EncCfg, &keepers.BTCLightClientKeeper, &keepers.BTCStakingKeeper, keepers.BankKeeper); err != nil { panic(err) } @@ -71,30 +88,6 @@ func CreateUpgradeHandler( } } -func LoadBtcStakingParamsFromData(cdc codec.Codec) (btcstktypes.Params, error) { - buff := bytes.NewBufferString(BtcStakingParamStr) - - var params btcstktypes.Params - err := cdc.UnmarshalJSON(buff.Bytes(), ¶ms) - if err != nil { - return btcstktypes.Params{}, err - } - - return params, nil -} - -func LoadFinalityParamsFromData(cdc codec.Codec) (finalitytypes.Params, error) { - buff := bytes.NewBufferString(FinalityParamStr) - - var params finalitytypes.Params - err := cdc.UnmarshalJSON(buff.Bytes(), ¶ms) - if err != nil { - return finalitytypes.Params{}, err - } - - return params, nil -} - func upgradeBtcStakingParameters( ctx sdk.Context, e *appparams.EncodingConfig, @@ -131,25 +124,65 @@ func upgradeFinalityParameters( return k.SetParams(ctx, params) } -// propLaunch runs the proposal of launch that is meant to insert new BTC Headers. -func propLaunch( +// upgradeLaunch runs the upgrade: +// - Transfer ubbn funds for token distribution +// - Insert new BTC Headers +// - Insert new finality providers +func upgradeLaunch( ctx sdk.Context, encCfg *appparams.EncodingConfig, btcLigthK *btclightkeeper.Keeper, btcStkK *btcstkkeeper.Keeper, + bankK bankkeeper.SendKeeper, ) error { - cdc := encCfg.Codec + if err := upgradeTokensDistribution(ctx, bankK); err != nil { + return err + } - newHeaders, err := LoadBTCHeadersFromData(cdc) + if err := upgradeBTCHeaders(ctx, encCfg.Codec, btcLigthK); err != nil { + return err + } + + return upgradeSignedFPs(ctx, encCfg, btcStkK) +} + +func upgradeTokensDistribution(ctx sdk.Context, bankK bankkeeper.SendKeeper) error { + data, err := LoadTokenDistributionFromData() if err != nil { return err } - if err := insertBtcHeaders(ctx, btcLigthK, newHeaders); err != nil { + for _, td := range data.TokenDistribution { + receiver, err := sdk.AccAddressFromBech32(td.AddressReceiver) + if err != nil { + return err + } + + sender, err := sdk.AccAddressFromBech32(td.AddressSender) + if err != nil { + return err + } + + amount := sdk.NewCoin(appparams.BaseCoinUnit, sdkmath.NewInt(td.Amount)) + if err := bankK.SendCoins(ctx, sender, receiver, sdk.NewCoins(amount)); err != nil { + return err + } + } + + return nil +} + +func upgradeBTCHeaders(ctx sdk.Context, cdc codec.Codec, btcLigthK *btclightkeeper.Keeper) error { + newHeaders, err := LoadBTCHeadersFromData(cdc) + if err != nil { return err } - fps, err := LoadSignedFPsFromData(cdc, encCfg.TxConfig.TxJSONDecoder()) + return insertBtcHeaders(ctx, btcLigthK, newHeaders) +} + +func upgradeSignedFPs(ctx sdk.Context, encCfg *appparams.EncodingConfig, btcStkK *btcstkkeeper.Keeper) error { + fps, err := LoadSignedFPsFromData(encCfg.Codec, encCfg.TxConfig.TxJSONDecoder()) if err != nil { return err } @@ -157,6 +190,30 @@ func propLaunch( return insertFPs(ctx, btcStkK, fps) } +func LoadBtcStakingParamsFromData(cdc codec.Codec) (btcstktypes.Params, error) { + buff := bytes.NewBufferString(BtcStakingParamStr) + + var params btcstktypes.Params + err := cdc.UnmarshalJSON(buff.Bytes(), ¶ms) + if err != nil { + return btcstktypes.Params{}, err + } + + return params, nil +} + +func LoadFinalityParamsFromData(cdc codec.Codec) (finalitytypes.Params, error) { + buff := bytes.NewBufferString(FinalityParamStr) + + var params finalitytypes.Params + err := cdc.UnmarshalJSON(buff.Bytes(), ¶ms) + if err != nil { + return finalitytypes.Params{}, err + } + + return params, nil +} + // LoadBTCHeadersFromData returns the BTC headers load from the json string with the headers inside of it. func LoadBTCHeadersFromData(cdc codec.Codec) ([]*btclighttypes.BTCHeaderInfo, error) { buff := bytes.NewBufferString(NewBtcHeadersStr) @@ -208,6 +265,19 @@ func LoadSignedFPsFromData(cdc codec.Codec, txJSONDecoder sdk.TxDecoder) ([]*btc return fps, nil } +// LoadTokenDistributionFromData returns the tokens to be distributed from the json string. +func LoadTokenDistributionFromData() (DataTokenDistribution, error) { + buff := bytes.NewBufferString(TokensDistribution) + + var d DataTokenDistribution + err := json.Unmarshal(buff.Bytes(), &d) + if err != nil { + return d, err + } + + return d, nil +} + func parseCreateFPFromSignedTx(cdc codec.Codec, tx sdk.Tx) (*btcstktypes.MsgCreateFinalityProvider, error) { msgs := tx.GetMsgs() if len(msgs) != 1 { diff --git a/app/upgrades/signetlaunch/upgrades_test.go b/app/upgrades/v1/upgrades_test.go similarity index 71% rename from app/upgrades/signetlaunch/upgrades_test.go rename to app/upgrades/v1/upgrades_test.go index 11245bcc..691160cd 100644 --- a/app/upgrades/signetlaunch/upgrades_test.go +++ b/app/upgrades/v1/upgrades_test.go @@ -1,4 +1,4 @@ -package signetlaunch_test +package v1_test import ( "fmt" @@ -7,14 +7,18 @@ import ( "cosmossdk.io/core/appmodule" "cosmossdk.io/core/header" + sdkmath "cosmossdk.io/math" "cosmossdk.io/x/upgrade" upgradetypes "cosmossdk.io/x/upgrade/types" + appparams "github.com/babylonlabs-io/babylon/app/params" + "github.com/babylonlabs-io/babylon/test/e2e/util" tmproto "github.com/cometbft/cometbft/proto/tendermint/types" sdk "github.com/cosmos/cosmos-sdk/types" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" "github.com/stretchr/testify/suite" "github.com/babylonlabs-io/babylon/app" - v1 "github.com/babylonlabs-io/babylon/app/upgrades/signetlaunch" + v1 "github.com/babylonlabs-io/babylon/app/upgrades/v1" "github.com/babylonlabs-io/babylon/x/btclightclient" btclighttypes "github.com/babylonlabs-io/babylon/x/btclightclient/types" "github.com/babylonlabs-io/babylon/x/btcstaking/types" @@ -59,6 +63,16 @@ func (s *UpgradeTestSuite) TestUpgrade() { oldHeadersLen := 0 oldFPsLen := 0 + tokenDistData, err := v1.LoadTokenDistributionFromData() + s.NoError(err) + + balanceDiffByAddr := make(map[string]int64) + for _, td := range tokenDistData.TokenDistribution { + balanceDiffByAddr[td.AddressSender] -= td.Amount + balanceDiffByAddr[td.AddressReceiver] += td.Amount + } + balancesBeforeUpgrade := make(map[string]sdk.Coin) + testCases := []struct { msg string pre_update func() @@ -84,6 +98,25 @@ func (s *UpgradeTestSuite) TestUpgrade() { s.NoError(err) fModuleParams := s.app.FinalityKeeper.GetParams(s.ctx) s.NotEqualValues(fModuleParams, fParamsFromUpgrade) + + for addr, amountDiff := range balanceDiffByAddr { + sdkAddr := sdk.MustAccAddressFromBech32(addr) + + if amountDiff < 0 { + // if the amount is lower than zero, it means the addr is going to spend tokens and + // could be that the addr does not have enough funds. + // For test completeness, mint the coins that the acc is going to spend. + coinsToMint := sdk.NewCoins(sdk.NewCoin(appparams.DefaultBondDenom, sdkmath.NewInt(util.Abs(amountDiff)))) + err = s.app.BankKeeper.MintCoins(s.ctx, minttypes.ModuleName, coinsToMint) + s.NoError(err) + + err = s.app.BankKeeper.SendCoinsFromModuleToAccount(s.ctx, minttypes.ModuleName, sdkAddr, coinsToMint) + s.NoError(err) + } + + // update the balances before upgrade only after mint check is done + balancesBeforeUpgrade[addr] = s.app.BankKeeper.GetBalance(s.ctx, sdkAddr, appparams.DefaultBondDenom) + } }, func() { // inject upgrade plan @@ -149,6 +182,19 @@ func (s *UpgradeTestSuite) TestUpgrade() { s.NoError(err) fModuleParams := s.app.FinalityKeeper.GetParams(s.ctx) s.EqualValues(fModuleParams, fParamsFromUpgrade) + + // verifies that all the modified balances match as expected after the upgrade + for addr, diff := range balanceDiffByAddr { + coinDiff := sdk.NewCoin(appparams.DefaultBondDenom, sdkmath.NewInt(util.Abs(diff))) + expectedBalance := balancesBeforeUpgrade[addr].Add(coinDiff) + if diff < 0 { + expectedBalance = balancesBeforeUpgrade[addr].Sub(coinDiff) + } + + sdkAddr := sdk.MustAccAddressFromBech32(addr) + balanceAfterUpgrade := s.app.BankKeeper.GetBalance(s.ctx, sdkAddr, appparams.DefaultBondDenom) + s.Equal(expectedBalance.String(), balanceAfterUpgrade.String()) + } }, }, } diff --git a/client/query/zoneconcierge.go b/client/query/zoneconcierge.go deleted file mode 100644 index bdc20c19..00000000 --- a/client/query/zoneconcierge.go +++ /dev/null @@ -1,96 +0,0 @@ -package query - -import ( - "context" - - zctypes "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - "github.com/cosmos/cosmos-sdk/client" - sdkquerytypes "github.com/cosmos/cosmos-sdk/types/query" -) - -// QueryZoneConcierge queries the ZoneConcierge module of the Babylon node -// according to the given function -func (c *QueryClient) QueryZoneConcierge(f func(ctx context.Context, queryClient zctypes.QueryClient) error) error { - ctx, cancel := c.getQueryContext() - defer cancel() - - clientCtx := client.Context{Client: c.RPCClient} - queryClient := zctypes.NewQueryClient(clientCtx) - - return f(ctx, queryClient) -} - -// FinalizedConnectedChainsInfo queries the zoneconcierge module to get the finalization information for a connected chain -func (c *QueryClient) FinalizedConnectedChainsInfo(consumerIds []string) (*zctypes.QueryFinalizedChainsInfoResponse, error) { - var resp *zctypes.QueryFinalizedChainsInfoResponse - err := c.QueryZoneConcierge(func(ctx context.Context, queryClient zctypes.QueryClient) error { - var err error - req := &zctypes.QueryFinalizedChainsInfoRequest{ - ConsumerIds: consumerIds, - } - resp, err = queryClient.FinalizedChainsInfo(ctx, req) - return err - }) - - return resp, err -} - -// ConnectedChainsInfo queries the zoneconcierge module to get information for a connected chain -func (c *QueryClient) ConnectedChainsInfo(consumerIds []string) (*zctypes.QueryChainsInfoResponse, error) { - var resp *zctypes.QueryChainsInfoResponse - err := c.QueryZoneConcierge(func(ctx context.Context, queryClient zctypes.QueryClient) error { - var err error - req := &zctypes.QueryChainsInfoRequest{ - ConsumerIds: consumerIds, - } - resp, err = queryClient.ChainsInfo(ctx, req) - return err - }) - - return resp, err -} - -// ConnectedChainList queries the zoneconierge module for the chain IDs of the connected chains -func (c *QueryClient) ConnectedChainList() (*zctypes.QueryChainListResponse, error) { - var resp *zctypes.QueryChainListResponse - err := c.QueryZoneConcierge(func(ctx context.Context, queryClient zctypes.QueryClient) error { - var err error - req := &zctypes.QueryChainListRequest{} - resp, err = queryClient.ChainList(ctx, req) - return err - }) - - return resp, err -} - -// ConnectedChainHeaders queries the zoneconcierge module for the headers of a connected chain -func (c *QueryClient) ConnectedChainHeaders(consumerID string, pagination *sdkquerytypes.PageRequest) (*zctypes.QueryListHeadersResponse, error) { - var resp *zctypes.QueryListHeadersResponse - err := c.QueryZoneConcierge(func(ctx context.Context, queryClient zctypes.QueryClient) error { - var err error - req := &zctypes.QueryListHeadersRequest{ - ConsumerId: consumerID, - Pagination: pagination, - } - resp, err = queryClient.ListHeaders(ctx, req) - return err - }) - - return resp, err -} - -// ConnectedChainsEpochInfo queries the zoneconcierge module for the chain information of a connected chain at a particular epoch -func (c *QueryClient) ConnectedChainsEpochInfo(consumerIds []string, epochNum uint64) (*zctypes.QueryEpochChainsInfoResponse, error) { - var resp *zctypes.QueryEpochChainsInfoResponse - err := c.QueryZoneConcierge(func(ctx context.Context, queryClient zctypes.QueryClient) error { - var err error - req := &zctypes.QueryEpochChainsInfoRequest{ - ConsumerIds: consumerIds, - EpochNum: epochNum, - } - resp, err = queryClient.EpochChainsInfo(ctx, req) - return err - }) - - return resp, err -} diff --git a/cmd/babylond/cmd/custom_babylon_config.go b/cmd/babylond/cmd/custom_babylon_config.go index 6b4e6c9f..6e4c4dfc 100644 --- a/cmd/babylond/cmd/custom_babylon_config.go +++ b/cmd/babylond/cmd/custom_babylon_config.go @@ -1,11 +1,11 @@ package cmd import ( + "fmt" + wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" - sdkmath "cosmossdk.io/math" serverconfig "github.com/cosmos/cosmos-sdk/server/config" - sdk "github.com/cosmos/cosmos-sdk/types" appparams "github.com/babylonlabs-io/babylon/app/params" bbn "github.com/babylonlabs-io/babylon/types" @@ -31,19 +31,9 @@ type BabylonAppConfig struct { func DefaultBabylonAppConfig() *BabylonAppConfig { baseConfig := *serverconfig.DefaultConfig() - // The SDK's default minimum gas price is set to "" (empty value) inside - // app.toml. If left empty by validators, the node will halt on startup. - // However, the chain developer can set a default app.toml value for their - // validators here. - // - // In summary: - // - if you leave srvCfg.MinGasPrices = "", all validators MUST tweak their - // own app.toml config, - // - if you set srvCfg.MinGasPrices non-empty, validators CAN tweak their - // own app.toml to override, or use this default value. - // - // In app, we set the min gas prices to 0. - baseConfig.MinGasPrices = sdk.NewCoin(appparams.BaseCoinUnit, sdkmath.NewInt(1)).String() + // The SDK's default minimum gas price is set to "0.002ubbn" (empty value) inside + // app.toml, in order to avoid spamming attacks due to transactions with 0 gas price. + baseConfig.MinGasPrices = fmt.Sprintf("%f%s", appparams.GlobalMinGasPrice, appparams.BaseCoinUnit) return &BabylonAppConfig{ Config: baseConfig, Wasm: wasmtypes.DefaultWasmConfig(), diff --git a/contrib/images/Makefile b/contrib/images/Makefile index e708e22a..ae1368aa 100644 --- a/contrib/images/Makefile +++ b/contrib/images/Makefile @@ -4,7 +4,7 @@ BABYLON_VERSION_BEFORE_UPGRADE ?= v0.9.3 all: babylond cosmos-relayer -babylond: +babylond: babylond-rmi docker build --tag babylonlabs-io/babylond -f babylond/Dockerfile ${BABYLON_FULL_PATH} babylond-e2e: @@ -12,7 +12,7 @@ babylond-e2e: --build-arg BUILD_TAGS="e2e" ## TODO: once release docker public versions for tags, remove this! -babylond-before-upgrade: +babylond-before-upgrade: babylond-rmi-upgrade docker build --tag babylonlabs-io/babylond-before-upgrade -f babylond/Dockerfile \ --build-arg VERSION="${BABYLON_VERSION_BEFORE_UPGRADE}" ${BABYLON_FULL_PATH} @@ -25,13 +25,10 @@ babylond-rmi-upgrade: e2e-init-chain-rmi: docker rmi babylonlabs-io/babylond-e2e-init-chain --force 2>/dev/null; true -e2e-init-chain: +e2e-init-chain: e2e-init-chain-rmi @DOCKER_BUILDKIT=1 docker build -t babylonlabs-io/babylond-e2e-init-chain --build-arg E2E_SCRIPT_NAME=chain --platform=linux/x86_64 \ -f e2e-initialization/init.Dockerfile --build-arg VERSION="${BABYLON_VERSION_BEFORE_UPGRADE}" ${BABYLON_FULL_PATH} -e2e-init-chain-rmi: - docker rmi babylonlabs-io/babylond-e2e-init-chain 2>/dev/null; true - cosmos-relayer: cosmos-relayer-rmi docker build --tag babylonlabs-io/cosmos-relayer:${RELAYER_TAG} -f cosmos-relayer/Dockerfile \ ${BABYLON_FULL_PATH}/contrib/images/cosmos-relayer diff --git a/contrib/images/babylond/Dockerfile b/contrib/images/babylond/Dockerfile index 7aa4602a..68a8f69d 100644 --- a/contrib/images/babylond/Dockerfile +++ b/contrib/images/babylond/Dockerfile @@ -26,7 +26,10 @@ RUN LEDGER_ENABLED=$LEDGER_ENABLED \ FROM debian:bookworm-slim AS wasm-link -RUN apt-get update && apt-get install -y wget bash jq +# Create a user +RUN addgroup --gid 1137 --system babylon && adduser --uid 1137 --gid 1137 --system --home /home/babylon babylon + +RUN apt-get update && apt-get install -y curl wget bash jq # Label should match your github repo LABEL org.opencontainers.image.source="https://github.com/babylonlabs-io/babylond:${VERSION}" @@ -43,3 +46,9 @@ RUN WASMVM_VERSION=$(grep github.com/CosmWasm/wasmvm /tmp/go.mod | cut -d' ' -f2 RUN rm -f /tmp/go.mod COPY --from=build-env /go/src/github.com/babylonlabs-io/babylon/build/babylond /bin/babylond + +# Set home directory and user +WORKDIR /home/babylon +RUN chown -R babylon /home/babylon +RUN chmod g+s /home/babylon +USER babylon \ No newline at end of file diff --git a/contrib/images/e2e-initialization/init.Dockerfile b/contrib/images/e2e-initialization/init.Dockerfile index 80c7ab97..2a496a1a 100644 --- a/contrib/images/e2e-initialization/init.Dockerfile +++ b/contrib/images/e2e-initialization/init.Dockerfile @@ -18,7 +18,10 @@ RUN LEDGER_ENABLED=false LINK_STATICALLY=false E2E_SCRIPT_NAME=${E2E_SCRIPT_NAME FROM debian:bookworm-slim AS wasm-link -RUN apt-get update && apt-get install -y wget bash + +# Create a user +RUN addgroup --gid 1137 --system babylon && adduser --uid 1137 --gid 1137 --system --home /home/babylon babylon +RUN apt-get update && apt-get install -y bash curl jq wget # Label should match your github repo LABEL org.opencontainers.image.source="https://github.com/babylonlabs-io/babylond:${VERSION}" diff --git a/proto/babylon/zoneconcierge/v1/genesis.proto b/proto/babylon/zoneconcierge/v1/genesis.proto deleted file mode 100644 index 8277e9e8..00000000 --- a/proto/babylon/zoneconcierge/v1/genesis.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; -package babylon.zoneconcierge.v1; - -import "gogoproto/gogo.proto"; -import "babylon/zoneconcierge/v1/params.proto"; - -option go_package = "github.com/babylonlabs-io/babylon/x/zoneconcierge/types"; - -// GenesisState defines the zoneconcierge module's genesis state. -message GenesisState { - string port_id = 1; - Params params = 2 [ (gogoproto.nullable) = false ]; -} diff --git a/proto/babylon/zoneconcierge/v1/packet.proto b/proto/babylon/zoneconcierge/v1/packet.proto deleted file mode 100644 index 9576b951..00000000 --- a/proto/babylon/zoneconcierge/v1/packet.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; -package babylon.zoneconcierge.v1; - -import "babylon/btccheckpoint/v1/btccheckpoint.proto"; -import "babylon/checkpointing/v1/checkpoint.proto"; -import "babylon/btclightclient/v1/btclightclient.proto"; -import "babylon/epoching/v1/epoching.proto"; -import "babylon/zoneconcierge/v1/zoneconcierge.proto"; - -option go_package = "github.com/babylonlabs-io/babylon/x/zoneconcierge/types"; - -// ZoneconciergePacketData is the message that defines the IBC packets of -// ZoneConcierge -message ZoneconciergePacketData { - // packet is the actual message carried in the IBC packet - oneof packet { - BTCTimestamp btc_timestamp = 1; - } -} - -// BTCTimestamp is a BTC timestamp that carries information of a BTC-finalised epoch -// It includes a number of BTC headers, a raw checkpoint, an epoch metadata, and -// a CZ header if there exists CZ headers checkpointed to this epoch. -// Upon a newly finalised epoch in Babylon, Babylon will send a BTC timestamp to each -// Cosmos zone that has phase-2 integration with Babylon via IBC. -message BTCTimestamp { - // header is the last CZ header in the finalized Babylon epoch - babylon.zoneconcierge.v1.IndexedHeader header = 1; - - /* - Data for BTC light client - */ - // btc_headers is BTC headers between - // - the block AFTER the common ancestor of BTC tip at epoch `lastFinalizedEpoch-1` and BTC tip at epoch `lastFinalizedEpoch` - // - BTC tip at epoch `lastFinalizedEpoch` - // where `lastFinalizedEpoch` is the last finalised epoch in Babylon - repeated babylon.btclightclient.v1.BTCHeaderInfo btc_headers = 2; - - /* - Data for Babylon epoch chain - */ - // epoch_info is the metadata of the sealed epoch - babylon.epoching.v1.Epoch epoch_info = 3; - // raw_checkpoint is the raw checkpoint that seals this epoch - babylon.checkpointing.v1.RawCheckpoint raw_checkpoint = 4; - // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch - babylon.btccheckpoint.v1.SubmissionKey btc_submission_key = 5; - - /* - Proofs that the header is finalized - */ - babylon.zoneconcierge.v1.ProofFinalizedChainInfo proof = 6; -} \ No newline at end of file diff --git a/proto/babylon/zoneconcierge/v1/params.proto b/proto/babylon/zoneconcierge/v1/params.proto deleted file mode 100644 index 48fa6c9e..00000000 --- a/proto/babylon/zoneconcierge/v1/params.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; -package babylon.zoneconcierge.v1; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/babylonlabs-io/babylon/x/zoneconcierge/types"; - -// Params defines the parameters for the module. -message Params { - option (gogoproto.equal) = true; - - // ibc_packet_timeout_seconds is the time period after which an unrelayed - // IBC packet becomes timeout, measured in seconds - uint32 ibc_packet_timeout_seconds = 1 - [ (gogoproto.moretags) = "yaml:\"ibc_packet_timeout_seconds\"" ]; -} diff --git a/proto/babylon/zoneconcierge/v1/query.proto b/proto/babylon/zoneconcierge/v1/query.proto deleted file mode 100644 index 67f3b450..00000000 --- a/proto/babylon/zoneconcierge/v1/query.proto +++ /dev/null @@ -1,202 +0,0 @@ -syntax = "proto3"; -package babylon.zoneconcierge.v1; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "babylon/btccheckpoint/v1/btccheckpoint.proto"; -import "babylon/checkpointing/v1/checkpoint.proto"; -import "babylon/epoching/v1/epoching.proto"; -import "babylon/zoneconcierge/v1/zoneconcierge.proto"; -import "babylon/zoneconcierge/v1/params.proto"; - -option go_package = "github.com/babylonlabs-io/babylon/x/zoneconcierge/types"; - -// Query defines the gRPC querier service. -service Query { - // Params queries the parameters of the module. - rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { - option (google.api.http).get = "/babylon/zoneconcierge/v1/params"; - } - // Header queries the CZ header and fork headers at a given height. - rpc Header(QueryHeaderRequest) returns (QueryHeaderResponse) { - option (google.api.http).get = - "/babylon/zoneconcierge/v1/chain_info/{consumer_id}/header/{height}"; - } - // ChainList queries the list of chains that checkpoint to Babylon - rpc ChainList(QueryChainListRequest) returns (QueryChainListResponse) { - option (google.api.http).get = "/babylon/zoneconcierge/v1/chains"; - } - // ChainsInfo queries the latest info for a given list of chains in Babylon's view - rpc ChainsInfo(QueryChainsInfoRequest) returns (QueryChainsInfoResponse) { - option (google.api.http).get = - "/babylon/zoneconcierge/v1/chains_info"; - } - // EpochChainsInfo queries the latest info for a list of chains - // in a given epoch in Babylon's view - rpc EpochChainsInfo(QueryEpochChainsInfoRequest) - returns (QueryEpochChainsInfoResponse) { - option (google.api.http).get = - "/babylon/zoneconcierge/v1/epoch_chains_info"; - } - // ListHeaders queries the headers of a chain in Babylon's view, with - // pagination support - rpc ListHeaders(QueryListHeadersRequest) returns (QueryListHeadersResponse) { - option (google.api.http).get = - "/babylon/zoneconcierge/v1/headers/{consumer_id}"; - } - // ListEpochHeaders queries the headers of a chain timestamped in a given - // epoch of Babylon, with pagination support - rpc ListEpochHeaders(QueryListEpochHeadersRequest) - returns (QueryListEpochHeadersResponse) { - option (google.api.http).get = - "/babylon/zoneconcierge/v1/headers/{consumer_id}/epochs/{epoch_num}"; - } - // FinalizedChainsInfo queries the BTC-finalised info of chains with given IDs, with proofs - rpc FinalizedChainsInfo(QueryFinalizedChainsInfoRequest) - returns (QueryFinalizedChainsInfoResponse) { - option (google.api.http).get = - "/babylon/zoneconcierge/v1/finalized_chains_info"; - } - // FinalizedChainInfoUntilHeight queries the BTC-finalised info no later than - // the provided CZ height, with proofs - rpc FinalizedChainInfoUntilHeight(QueryFinalizedChainInfoUntilHeightRequest) - returns (QueryFinalizedChainInfoUntilHeightResponse) { - option (google.api.http).get = - "/babylon/zoneconcierge/v1/finalized_chain_info/{consumer_id}/height/" - "{height}"; - } -} - -// QueryParamsRequest is the request type for the Query/Params RPC method. -message QueryParamsRequest {} - -// QueryParamsResponse is the response type for the Query/Params RPC method. -message QueryParamsResponse { - // params holds all the parameters of this module. - babylon.zoneconcierge.v1.Params params = 1 [ (gogoproto.nullable) = false ]; -} - -// QueryHeaderRequest is request type for the Query/Header RPC method. -message QueryHeaderRequest { - string consumer_id = 1; - uint64 height = 2; -} - -// QueryHeaderResponse is response type for the Query/Header RPC method. -message QueryHeaderResponse { - babylon.zoneconcierge.v1.IndexedHeader header = 1; - babylon.zoneconcierge.v1.Forks fork_headers = 2; -} - -// QueryChainListRequest is request type for the Query/ChainList RPC method -message QueryChainListRequest { - // pagination defines whether to have the pagination in the request - cosmos.base.query.v1beta1.PageRequest pagination = 1; -} - -// QueryChainListResponse is response type for the Query/ChainList RPC method -message QueryChainListResponse { - // consumer_ids are IDs of the chains in ascending alphabetical order - repeated string consumer_ids = 1; - // pagination defines the pagination in the response - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryChainsInfoRequest is request type for the Query/ChainsInfo RPC method. -message QueryChainsInfoRequest { repeated string consumer_ids = 1; } - -// QueryChainsInfoResponse is response type for the Query/ChainsInfo RPC method. -message QueryChainsInfoResponse { - repeated babylon.zoneconcierge.v1.ChainInfo chains_info = 1; -} - -// QueryEpochChainsInfoRequest is request type for the Query/EpochChainsInfo RPC -// method. -message QueryEpochChainsInfoRequest { - uint64 epoch_num = 1; - repeated string consumer_ids = 2; -} - -// QueryEpochChainsInfoResponse is response type for the Query/EpochChainsInfo RPC -// method. -message QueryEpochChainsInfoResponse { - // chain_info is the info of the CZ - repeated babylon.zoneconcierge.v1.ChainInfo chains_info = 1; -} - -// QueryListHeadersRequest is request type for the Query/ListHeaders RPC method. -message QueryListHeadersRequest { - string consumer_id = 1; - // pagination defines whether to have the pagination in the request - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryListHeadersResponse is response type for the Query/ListHeaders RPC -// method. -message QueryListHeadersResponse { - // headers is the list of headers - repeated babylon.zoneconcierge.v1.IndexedHeader headers = 1; - // pagination defines the pagination in the response - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryListEpochHeadersRequest is request type for the Query/ListEpochHeaders -// RPC method. -message QueryListEpochHeadersRequest { - uint64 epoch_num = 1; - string consumer_id = 2; -} - -// QueryListEpochHeadersResponse is response type for the Query/ListEpochHeaders -// RPC method. -message QueryListEpochHeadersResponse { - // headers is the list of headers - repeated babylon.zoneconcierge.v1.IndexedHeader headers = 1; -} - -// QueryFinalizedChainsInfoRequest is request type for the -// Query/FinalizedChainsInfo RPC method. -message QueryFinalizedChainsInfoRequest { - // consumer_ids is the list of ids of CZs - repeated string consumer_ids = 1; - // prove indicates whether the querier wants to get proofs of this timestamp - bool prove = 2; -} - -// QueryFinalizedChainsInfoResponse is response type for the -// Query/FinalizedChainsInfo RPC method. -message QueryFinalizedChainsInfoResponse { - repeated babylon.zoneconcierge.v1.FinalizedChainInfo finalized_chains_info = 1; -} - -// QueryFinalizedChainInfoUntilHeightRequest is request type for the -// Query/FinalizedChainInfoUntilHeight RPC method. -message QueryFinalizedChainInfoUntilHeightRequest { - // consumer_id is the ID of the CZ - string consumer_id = 1; - // height is the height of the CZ chain - // such that the returned finalised chain info will be no later than this - // height - uint64 height = 2; - // prove indicates whether the querier wants to get proofs of this timestamp - bool prove = 3; -} - -// QueryFinalizedChainInfoUntilHeightResponse is response type for the -// Query/FinalizedChainInfoUntilHeight RPC method. -message QueryFinalizedChainInfoUntilHeightResponse { - // finalized_chain_info is the info of the CZ - babylon.zoneconcierge.v1.ChainInfo finalized_chain_info = 1; - - // epoch_info is the metadata of the last BTC-finalised epoch - babylon.epoching.v1.Epoch epoch_info = 2; - // raw_checkpoint is the raw checkpoint of this epoch - babylon.checkpointing.v1.RawCheckpoint raw_checkpoint = 3; - // btc_submission_key is position of two BTC txs that include the raw - // checkpoint of this epoch - babylon.btccheckpoint.v1.SubmissionKey btc_submission_key = 4; - - // proof is the proof that the chain info is finalized - babylon.zoneconcierge.v1.ProofFinalizedChainInfo proof = 5; -} diff --git a/proto/babylon/zoneconcierge/v1/tx.proto b/proto/babylon/zoneconcierge/v1/tx.proto deleted file mode 100644 index 5e94ee9f..00000000 --- a/proto/babylon/zoneconcierge/v1/tx.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; -package babylon.zoneconcierge.v1; - - -import "gogoproto/gogo.proto"; -import "cosmos_proto/cosmos.proto"; -import "cosmos/msg/v1/msg.proto"; -import "babylon/zoneconcierge/v1/params.proto"; - -option go_package = "github.com/babylonlabs-io/babylon/x/zoneconcierge/types"; - -// Msg defines the Msg service. -service Msg { - option (cosmos.msg.v1.service) = true; - - // UpdateParams updates the zoneconcierge module parameters. - rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); -} - -// MsgUpdateParams defines a message for updating zoneconcierge module parameters. -message MsgUpdateParams { - option (cosmos.msg.v1.signer) = "authority"; - - // authority is the address of the governance account. - // just FYI: cosmos.AddressString marks that this field should use type alias - // for AddressString instead of string, but the functionality is not yet implemented - // in cosmos-proto - string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; - - // params defines the zoneconcierge parameters to update. - // - // NOTE: All parameters must be supplied. - Params params = 2 [(gogoproto.nullable) = false]; - } - - // MsgUpdateParamsResponse is the response to the MsgUpdateParams message. - message MsgUpdateParamsResponse {} diff --git a/proto/babylon/zoneconcierge/v1/zoneconcierge.proto b/proto/babylon/zoneconcierge/v1/zoneconcierge.proto deleted file mode 100644 index 44eeefdb..00000000 --- a/proto/babylon/zoneconcierge/v1/zoneconcierge.proto +++ /dev/null @@ -1,148 +0,0 @@ -syntax = "proto3"; -package babylon.zoneconcierge.v1; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "tendermint/crypto/proof.proto"; -import "babylon/btccheckpoint/v1/btccheckpoint.proto"; -import "babylon/checkpointing/v1/bls_key.proto"; -import "babylon/checkpointing/v1/checkpoint.proto"; -import "babylon/epoching/v1/epoching.proto"; -import "babylon/btclightclient/v1/btclightclient.proto"; - -option go_package = "github.com/babylonlabs-io/babylon/x/zoneconcierge/types"; - -// IndexedHeader is the metadata of a CZ header -message IndexedHeader { - // consumer_id is the unique ID of the consumer - string consumer_id = 1; - // hash is the hash of this header - bytes hash = 2; - // height is the height of this header on CZ ledger - // (hash, height) jointly provides the position of the header on CZ ledger - uint64 height = 3; - // time is the timestamp of this header on CZ ledger - // it is needed for CZ to unbond all mature validators/delegations - // before this timestamp when this header is BTC-finalised - google.protobuf.Timestamp time = 4 [ (gogoproto.stdtime) = true ]; - // babylon_header_hash is the hash of the babylon block that includes this CZ - // header - bytes babylon_header_hash = 5; - // babylon_header_height is the height of the babylon block that includes this CZ - // header - uint64 babylon_header_height = 6; - // epoch is the epoch number of this header on Babylon ledger - uint64 babylon_epoch = 7; - // babylon_tx_hash is the hash of the tx that includes this header - // (babylon_block_height, babylon_tx_hash) jointly provides the position of - // the header on Babylon ledger - bytes babylon_tx_hash = 8; -} - -// Forks is a list of non-canonical `IndexedHeader`s at the same height. -// For example, assuming the following blockchain -// ``` -// A <- B <- C <- D <- E -// \ -- D1 -// \ -- D2 -// ``` -// Then the fork will be {[D1, D2]} where each item is in struct `IndexedBlock`. -// -// Note that each `IndexedHeader` in the fork should have a valid quorum -// certificate. Such forks exist since Babylon considers CZs might have -// dishonest majority. Also note that the IBC-Go implementation will only -// consider the first header in a fork valid, since the subsequent headers -// cannot be verified without knowing the validator set in the previous header. -message Forks { - // blocks is the list of non-canonical indexed headers at the same height - repeated IndexedHeader headers = 3; -} - -// ChainInfo is the information of a CZ -message ChainInfo { - // consumer_id is the ID of the consumer - string consumer_id = 1; - // latest_header is the latest header in CZ's canonical chain - IndexedHeader latest_header = 2; - // latest_forks is the latest forks, formed as a series of IndexedHeader (from - // low to high) - Forks latest_forks = 3; - // timestamped_headers_count is the number of timestamped headers in CZ's - // canonical chain - uint64 timestamped_headers_count = 4; -} - -// ChainInfoWithProof is the chain info with a proof that the latest header in -// the chain info is included in the epoch -message ChainInfoWithProof { - ChainInfo chain_info = 1; - // proof_header_in_epoch is an inclusion proof that the latest_header in chain_info - // is committed to `app_hash` of the sealer header of latest_header.babylon_epoch - // this field is optional - tendermint.crypto.ProofOps proof_header_in_epoch = 2; -} - -// FinalizedChainInfo is the information of a CZ that is BTC-finalised -message FinalizedChainInfo { - // consumer_id is the ID of the consumer - string consumer_id = 1; - // finalized_chain_info is the info of the CZ - babylon.zoneconcierge.v1.ChainInfo finalized_chain_info = 2; - - // epoch_info is the metadata of the last BTC-finalised epoch - babylon.epoching.v1.Epoch epoch_info = 3; - // raw_checkpoint is the raw checkpoint of this epoch - babylon.checkpointing.v1.RawCheckpoint raw_checkpoint = 4; - // btc_submission_key is position of two BTC txs that include the raw - // checkpoint of this epoch - babylon.btccheckpoint.v1.SubmissionKey btc_submission_key = 5; - - // proof is the proof that the chain info is finalized - babylon.zoneconcierge.v1.ProofFinalizedChainInfo proof = 6; -} - -// ProofEpochSealed is the proof that an epoch is sealed by the sealer header, -// i.e., the 2nd header of the next epoch With the access of metadata -// - Metadata of this epoch, which includes the sealer header -// - Raw checkpoint of this epoch -// The verifier can perform the following verification rules: -// - The raw checkpoint's `app_hash` is same as in the sealer header -// - More than 2/3 (in voting power) validators in the validator set of this -// epoch have signed `app_hash` of the sealer header -// - The epoch metadata is committed to the `app_hash` of the sealer header -// - The validator set is committed to the `app_hash` of the sealer header -message ProofEpochSealed { - // validator_set is the validator set of the sealed epoch - // This validator set has generated a BLS multisig on `app_hash` of - // the sealer header - repeated babylon.checkpointing.v1.ValidatorWithBlsKey validator_set = 1; - // proof_epoch_info is the Merkle proof that the epoch's metadata is committed - // to `app_hash` of the sealer header - tendermint.crypto.ProofOps proof_epoch_info = 2; - // proof_epoch_info is the Merkle proof that the epoch's validator set is - // committed to `app_hash` of the sealer header - tendermint.crypto.ProofOps proof_epoch_val_set = 3; -} - -// ProofFinalizedChainInfo is a set of proofs that attest a chain info is -// BTC-finalised -message ProofFinalizedChainInfo { - /* - The following fields include proofs that attest the chain info is - BTC-finalised - */ - // proof_cz_header_in_epoch is the proof that the CZ header is timestamped - // within a certain epoch - tendermint.crypto.ProofOps proof_cz_header_in_epoch = 1; - // proof_epoch_sealed is the proof that the epoch is sealed - babylon.zoneconcierge.v1.ProofEpochSealed proof_epoch_sealed = 2; - // proof_epoch_submitted is the proof that the epoch's checkpoint is included - // in BTC ledger It is the two TransactionInfo in the best (i.e., earliest) - // checkpoint submission - repeated babylon.btccheckpoint.v1.TransactionInfo proof_epoch_submitted = 3; -} - -// Btc light client chain segment grown during last finalized epoch -message BTCChainSegment { - repeated babylon.btclightclient.v1.BTCHeaderInfo btc_headers = 1; -} diff --git a/test/e2e/btc_staking_e2e_test.go b/test/e2e/btc_staking_e2e_test.go index 28315bc4..36593f3a 100644 --- a/test/e2e/btc_staking_e2e_test.go +++ b/test/e2e/btc_staking_e2e_test.go @@ -15,6 +15,7 @@ import ( sdkmath "cosmossdk.io/math" feegrantcli "cosmossdk.io/x/feegrant/client/cli" + appparams "github.com/babylonlabs-io/babylon/app/params" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/babylonlabs-io/babylon/app/params" @@ -554,7 +555,6 @@ func (s *BTCStakingTestSuite) Test7BTCDelegationFeeGrant() { granteeStakerAddr := sdk.MustAccAddressFromBech32(nonValidatorNode.KeysAdd(wGratee)) feePayerBalanceBeforeBTCDel := sdk.NewCoin(params.DefaultBondDenom, sdkmath.NewInt(100000)) - fees := sdk.NewCoin(params.DefaultBondDenom, sdkmath.NewInt(50000)) // fund the granter nonValidatorNode.BankSendFromNode(feePayerAddr.String(), feePayerBalanceBeforeBTCDel.String()) @@ -606,7 +606,6 @@ func (s *BTCStakingTestSuite) Test7BTCDelegationFeeGrant() { wGratee, false, fmt.Sprintf("--fee-granter=%s", feePayerAddr.String()), - fmt.Sprintf("--fees=%s", fees.String()), ) // wait for a block so that above txs take effect @@ -623,10 +622,10 @@ func (s *BTCStakingTestSuite) Test7BTCDelegationFeeGrant() { s.NoError(err) s.True(stakerBalances.IsZero()) - // the fee payer should have the (feePayerBalanceBeforeBTCDel - fee) == currentBalance + // the fee payer should have the feePayerBalanceBeforeBTCDel > currentBalance feePayerBalances, err := nonValidatorNode.QueryBalances(feePayerAddr.String()) s.NoError(err) - s.Equal(feePayerBalanceBeforeBTCDel.Sub(fees).String(), feePayerBalances.String()) + s.True(feePayerBalanceBeforeBTCDel.Amount.GT(feePayerBalances.AmountOf(appparams.BaseCoinUnit))) } // Test8BTCDelegationFeeGrantTyped is an end-to-end test to create a BTC delegation @@ -741,7 +740,6 @@ func (s *BTCStakingTestSuite) Test8BTCDelegationFeeGrantTyped() { wGratee, false, fmt.Sprintf("--fee-granter=%s", feePayerAddr.String()), - fmt.Sprintf("--fees=%s", fees.String()), ) // wait for a block so that above txs take effect @@ -758,10 +756,10 @@ func (s *BTCStakingTestSuite) Test8BTCDelegationFeeGrantTyped() { s.NoError(err) s.Equal(stakerBalance.String(), stakerBalances.String()) - // the fee payer should have the (feePayerBalanceBeforeBTCDel - fee) == currentBalance + // the fee payer should have the feePayerBalanceBeforeBTCDel > currentBalance feePayerBalances, err := node.QueryBalances(feePayerAddr.String()) s.NoError(err) - s.Equal(feePayerBalanceBeforeBTCDel.Sub(fees).String(), feePayerBalances.String()) + s.True(feePayerBalanceBeforeBTCDel.Amount.GT(feePayerBalances.AmountOf(appparams.BaseCoinUnit))) } // ParseRespsBTCDelToBTCDel parses an BTC delegation response to BTC Delegation @@ -870,6 +868,9 @@ func (s *BTCStakingTestSuite) CreateNodeFP(node *chain.NodeConfig) (newFP *bstyp newFP, err = datagen.GenRandomFinalityProviderWithBTCBabylonSKs(r, fpBTCSK, nodeAddr) s.NoError(err) + // use a higher commission to ensure the reward is more than tx fee of a finality sig + commission := sdkmath.LegacyNewDecWithPrec(20, 2) + newFP.Commission = &commission node.CreateFinalityProvider(newFP.Addr, newFP.BtcPk, newFP.Pop, newFP.Description.Moniker, newFP.Description.Identity, newFP.Description.Website, newFP.Description.SecurityContact, newFP.Description.Details, newFP.Commission) // wait for a block so that above txs take effect diff --git a/test/e2e/btc_timestamping_e2e_test.go b/test/e2e/btc_timestamping_e2e_test.go index c344e6e1..e55b43c6 100644 --- a/test/e2e/btc_timestamping_e2e_test.go +++ b/test/e2e/btc_timestamping_e2e_test.go @@ -105,28 +105,15 @@ func (s *BTCTimestampingTestSuite) Test3SendTx() { s.Equal(tip2Depth, uint64(0)) } -func (s *BTCTimestampingTestSuite) Test4IbcCheckpointing() { +func (s *BTCTimestampingTestSuite) Test4GenerateAndWithdrawReward() { chainA := s.configurer.GetChainConfig(0) + chainA.WaitUntilHeight(35) nonValidatorNode, err := chainA.GetNodeAtIndex(2) s.NoError(err) - // Query open IBC channels and assert there is only one - channels, err := nonValidatorNode.QueryIBCChannels() - s.NoError(err) - s.Equal(1, len(channels.Channels), "Expected only one open IBC channel") - // Get the client ID under this IBC channel - channelClientState, err := nonValidatorNode.QueryChannelClientState(channels.Channels[0].ChannelId, channels.Channels[0].PortId) - s.NoError(err) - clientID := channelClientState.IdentifiedClientState.ClientId - - // Query checkpoint chain info for opposing chain - chainsInfo, err := nonValidatorNode.QueryChainsInfo([]string{clientID}) - s.NoError(err) - s.Equal(chainsInfo[0].ConsumerId, clientID) - - // Finalize epoch 1, 2, 3, as first headers of opposing chain are in epoch 3 + // Finalize epoch 1, 2, 3 var ( startEpochNum uint64 = 1 endEpochNum uint64 = 3 @@ -148,31 +135,6 @@ func (s *BTCTimestampingTestSuite) Test4IbcCheckpointing() { // Wait for next block nonValidatorNode.WaitForNextBlock() - // Check we have epoch info for opposing chain and some basic assertions - epochChainsInfo, err := nonValidatorNode.QueryEpochChainsInfo(endEpochNum, []string{clientID}) - s.NoError(err) - s.Equal(epochChainsInfo[0].ConsumerId, clientID) - s.Equal(epochChainsInfo[0].LatestHeader.BabylonEpoch, endEpochNum) - - // Check we have finalized epoch info for opposing chain and some basic assertions - finalizedChainsInfo, err := nonValidatorNode.QueryFinalizedChainsInfo([]string{clientID}) - s.NoError(err) - - // TODO Add more assertion here. Maybe check proofs ? - s.Equal(finalizedChainsInfo[0].FinalizedChainInfo.ConsumerId, clientID) - s.Equal(finalizedChainsInfo[0].EpochInfo.EpochNumber, endEpochNum) - - currEpoch, err := nonValidatorNode.QueryCurrentEpoch() - s.NoError(err) - - heightAtEndedEpoch, err := nonValidatorNode.QueryLightClientHeightEpochEnd(currEpoch - 1) - s.NoError(err) - - if heightAtEndedEpoch == 0 { - // we can only assert, that btc lc height is larger than 0. - s.FailNow(fmt.Sprintf("Light client height should be > 0 on epoch %d", currEpoch-1)) - } - // ensure balance has increased after finalising some epochs rewardGauges, err := nonValidatorNode.QueryRewardGauge(submitterReporterAddr) s.NoError(err) @@ -183,20 +145,6 @@ func (s *BTCTimestampingTestSuite) Test4IbcCheckpointing() { s.True(ok) s.True(reporterRewardGauge.Coins.IsAllPositive()) - chainB := s.configurer.GetChainConfig(1) - _, err = chainB.GetDefaultNode() - s.NoError(err) -} - -func (s *BTCTimestampingTestSuite) Test5WithdrawReward() { - chainA := s.configurer.GetChainConfig(0) - nonValidatorNode, err := chainA.GetNodeAtIndex(2) - s.NoError(err) - - // NOTE: nonValidatorNode.PublicAddress is the address associated with key name `val` - // and is both the submitter and reporter - submitterReporterAddr := sdk.MustAccAddressFromBech32(nonValidatorNode.PublicAddress) - // balance before withdraw balance, err := nonValidatorNode.QueryBalances(submitterReporterAddr.String()) s.NoError(err) @@ -243,7 +191,7 @@ func (s *BTCTimestampingTestSuite) Test5WithdrawReward() { s.True(rgs3[itypes.ReporterType.String()].IsFullyWithdrawn()) } -func (s *BTCTimestampingTestSuite) Test6Wasm() { +func (s *BTCTimestampingTestSuite) Test5Wasm() { contractPath := "/bytecode/storage_contract.wasm" chainA := s.configurer.GetChainConfig(0) nonValidatorNode, err := chainA.GetNodeAtIndex(2) @@ -301,7 +249,7 @@ func (s *BTCTimestampingTestSuite) Test6Wasm() { s.Greater(saveEpoch, latestFinalizedEpoch) } -func (s *BTCTimestampingTestSuite) Test7InterceptFeeCollector() { +func (s *BTCTimestampingTestSuite) Test6InterceptFeeCollector() { chainA := s.configurer.GetChainConfig(0) nonValidatorNode, err := chainA.GetNodeAtIndex(2) s.NoError(err) diff --git a/test/e2e/btc_timestamping_phase2_hermes_test.go b/test/e2e/btc_timestamping_phase2_hermes_test.go deleted file mode 100644 index 4f656e06..00000000 --- a/test/e2e/btc_timestamping_phase2_hermes_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package e2e - -import ( - "time" - - "github.com/babylonlabs-io/babylon/test/e2e/configurer" - ct "github.com/babylonlabs-io/babylon/x/checkpointing/types" - "github.com/cosmos/cosmos-sdk/types/query" - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" - "github.com/stretchr/testify/suite" -) - -type BTCTimestampingPhase2HermesTestSuite struct { - suite.Suite - - configurer configurer.Configurer -} - -func (s *BTCTimestampingPhase2HermesTestSuite) SetupSuite() { - s.T().Log("setting up phase 2 integration test suite...") - var ( - err error - ) - - // The e2e test flow is as follows: - // - // 1. Configure two chains - chain A and chain B. - // * For each chain, set up several validator nodes - // * Initialize configs and genesis for all them. - // 2. Start both networks. - // 3. Store and instantiate babylon contract on chain B. - // 3. Execute various e2e tests, excluding IBC - s.configurer, err = configurer.NewBTCTimestampingPhase2Configurer(s.T(), true) - - s.Require().NoError(err) - - err = s.configurer.ConfigureChains() - s.Require().NoError(err) - - err = s.configurer.RunSetup() - s.Require().NoError(err) -} - -func (s *BTCTimestampingPhase2HermesTestSuite) TearDownSuite() { - err := s.configurer.ClearResources() - if err != nil { - s.T().Logf("error to clear resources %s", err.Error()) - } -} - -func (s *BTCTimestampingPhase2HermesTestSuite) Test1IbcCheckpointingPhase2Hermes() { - chainA := s.configurer.GetChainConfig(0) - nonValidatorNode, err := chainA.GetNodeAtIndex(2) - s.NoError(err) - - babylonNode, err := chainA.GetNodeAtIndex(2) - s.NoError(err) - czNode, err := s.configurer.GetChainConfig(1).GetNodeAtIndex(2) - s.NoError(err) - - // Validate channel state and kind (Babylon side) - // Wait until the channel (Babylon side) is open - var babylonChannel *channeltypes.IdentifiedChannel - s.Eventually(func() bool { - babylonChannelsResp, err := babylonNode.QueryIBCChannels() - if err != nil { - return false - } - if len(babylonChannelsResp.Channels) != 1 { - return false - } - // channel has to be open and ordered - babylonChannel = babylonChannelsResp.Channels[0] - if babylonChannel.State != channeltypes.OPEN { - return false - } - s.Equal(channeltypes.ORDERED, babylonChannel.Ordering) - // the counterparty has to be the Babylon smart contract - s.Contains(babylonChannel.Counterparty.PortId, "wasm.") - return true - }, time.Minute, time.Second*2) - - // Wait until the channel (CZ side) is open - var czChannel *channeltypes.IdentifiedChannel - s.Eventually(func() bool { - czChannelsResp, err := czNode.QueryIBCChannels() - if err != nil { - return false - } - if len(czChannelsResp.Channels) != 1 { - return false - } - czChannel = czChannelsResp.Channels[0] - if czChannel.State != channeltypes.OPEN { - return false - } - s.Equal(channeltypes.ORDERED, czChannel.Ordering) - s.Equal(babylonChannel.PortId, czChannel.Counterparty.PortId) - return true - }, time.Minute, time.Second*2) - - // Get the client ID under this IBC channel - channelClientState, err := nonValidatorNode.QueryChannelClientState(babylonChannel.ChannelId, babylonChannel.PortId) - s.NoError(err) - clientID := channelClientState.IdentifiedClientState.ClientId - - // Query checkpoint chain info for the consumer chain - listHeaderResp, err := babylonNode.QueryListHeaders(clientID, &query.PageRequest{Limit: 1}) - s.NoError(err) - s.GreaterOrEqual(len(listHeaderResp.Headers), 1) - startEpochNum := listHeaderResp.Headers[0].BabylonEpoch - endEpochNum := startEpochNum + 2 - - // wait until epoch endEpochNum - // so that there will be endEpochNum - startEpochNum + 1 = 3 - // BTC timestamps in Babylon contract - chainA.WaitUntilHeight(int64(endEpochNum*10 + 5)) - babylonNode.FinalizeSealedEpochs(1, endEpochNum) - - // ensure endEpochNum has been finalised - endEpoch, err := babylonNode.QueryRawCheckpoint(endEpochNum) - s.NoError(err) - s.Equal(endEpoch.Status, ct.Finalized) - - // there should be 3 IBC packets sent (with sequence number 1, 2, 3). - // Thus, the next sequence number will eventually be 4 - s.Eventually(func() bool { - nextSequenceSendResp, err := babylonNode.QueryNextSequenceSend(babylonChannel.ChannelId, babylonChannel.PortId) - if err != nil { - return false - } - babylonNode.LogActionF("next sequence send at ZoneConcierge is %d", nextSequenceSendResp.NextSequenceSend) - return nextSequenceSendResp.NextSequenceSend >= endEpochNum-startEpochNum+1+1 - }, time.Minute, time.Second*2) - - // ensure the next receive sequence number of Babylon contract is also 3 - var nextSequenceRecv *channeltypes.QueryNextSequenceReceiveResponse - s.Eventually(func() bool { - nextSequenceRecv, err = czNode.QueryNextSequenceReceive(babylonChannel.Counterparty.ChannelId, babylonChannel.Counterparty.PortId) - if err != nil { - return false - } - czNode.LogActionF("next sequence receive at Babylon contract is %d", nextSequenceRecv.NextSequenceReceive) - return nextSequenceRecv.NextSequenceReceive >= endEpochNum-startEpochNum+1+1 - }, time.Minute, time.Second*2) - - // Ensure the IBC packet acknowledgements (on chain B) are there and do not contain error - nextSequence := nextSequenceRecv.NextSequenceReceive - for seq := uint64(1); seq < nextSequence; seq++ { - var seqResp *channeltypes.QueryPacketAcknowledgementResponse - s.Eventually(func() bool { - seqResp, err = czNode.QueryPacketAcknowledgement(czChannel.ChannelId, czChannel.PortId, seq) - czNode.LogActionF("acknowledgement resp of IBC packet #%d: %v, err: %v", seq, seqResp, err) - return err == nil - }, time.Minute, time.Second*2) - } -} diff --git a/test/e2e/btc_timestamping_phase2_rly_test.go b/test/e2e/btc_timestamping_phase2_rly_test.go deleted file mode 100644 index 744fc7d0..00000000 --- a/test/e2e/btc_timestamping_phase2_rly_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package e2e - -import ( - "time" - - "github.com/babylonlabs-io/babylon/test/e2e/configurer" - ct "github.com/babylonlabs-io/babylon/x/checkpointing/types" - "github.com/cosmos/cosmos-sdk/types/query" - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" - "github.com/stretchr/testify/suite" -) - -type BTCTimestampingPhase2RlyTestSuite struct { - suite.Suite - - configurer configurer.Configurer -} - -func (s *BTCTimestampingPhase2RlyTestSuite) SetupSuite() { - s.T().Log("setting up phase 2 go relayer integration test suite...") - var ( - err error - ) - - // The e2e test flow is as follows: - // - // 1. Configure two chains - chain A and chain B. - // * For each chain, set up several validator nodes - // * Initialize configs and genesis for all them. - // 2. Start both networks. - // 3. Store and instantiate babylon contract on chain B. - // 3. Execute various e2e tests, excluding IBC - s.configurer, err = configurer.NewBTCTimestampingPhase2RlyConfigurer(s.T(), true) - - s.Require().NoError(err) - - err = s.configurer.ConfigureChains() - s.Require().NoError(err) - - err = s.configurer.RunSetup() - s.Require().NoError(err) -} - -func (s *BTCTimestampingPhase2RlyTestSuite) TearDownSuite() { - err := s.configurer.ClearResources() - if err != nil { - s.T().Logf("error to clear resources %s", err.Error()) - } -} - -func (s *BTCTimestampingPhase2RlyTestSuite) Test1IbcCheckpointingPhase2Rly() { - chainA := s.configurer.GetChainConfig(0) - nonValidatorNode, err := chainA.GetNodeAtIndex(2) - s.NoError(err) - - babylonNode, err := chainA.GetNodeAtIndex(2) - s.NoError(err) - czNode, err := s.configurer.GetChainConfig(1).GetNodeAtIndex(2) - s.NoError(err) - - // Validate channel state and kind (Babylon side) - // Wait until the channel (Babylon side) is open - var babylonChannel *channeltypes.IdentifiedChannel - s.Eventually(func() bool { - babylonChannelsResp, err := babylonNode.QueryIBCChannels() - if err != nil { - return false - } - if len(babylonChannelsResp.Channels) != 1 { - return false - } - // channel has to be open and ordered - babylonChannel = babylonChannelsResp.Channels[0] - if babylonChannel.State != channeltypes.OPEN { - return false - } - s.Equal(channeltypes.ORDERED, babylonChannel.Ordering) - // the counterparty has to be the Babylon smart contract - s.Contains(babylonChannel.Counterparty.PortId, "wasm.") - return true - }, time.Minute, time.Second*2) - - // Wait until the channel (CZ side) is open - var czChannel *channeltypes.IdentifiedChannel - s.Eventually(func() bool { - czChannelsResp, err := czNode.QueryIBCChannels() - if err != nil { - return false - } - if len(czChannelsResp.Channels) != 1 { - return false - } - czChannel = czChannelsResp.Channels[0] - if czChannel.State != channeltypes.OPEN { - return false - } - s.Equal(channeltypes.ORDERED, czChannel.Ordering) - s.Equal(babylonChannel.PortId, czChannel.Counterparty.PortId) - return true - }, time.Minute, time.Second*2) - - // Get the client ID under this IBC channel - channelClientState, err := nonValidatorNode.QueryChannelClientState(babylonChannel.ChannelId, babylonChannel.PortId) - s.NoError(err) - clientID := channelClientState.IdentifiedClientState.ClientId - - // Query checkpoint chain info for the consumer chain - listHeaderResp, err := babylonNode.QueryListHeaders(clientID, &query.PageRequest{Limit: 1}) - s.NoError(err) - s.GreaterOrEqual(len(listHeaderResp.Headers), 1) - startEpochNum := listHeaderResp.Headers[0].BabylonEpoch - endEpochNum := startEpochNum + 2 - - // wait until epoch endEpochNum - // so that there will be endEpochNum - startEpochNum + 1 = 3 - // BTC timestamps in Babylon contract - chainA.WaitUntilHeight(int64(endEpochNum*10 + 5)) - babylonNode.FinalizeSealedEpochs(1, endEpochNum) - - // ensure endEpochNum has been finalised - endEpoch, err := babylonNode.QueryRawCheckpoint(endEpochNum) - s.NoError(err) - s.Equal(endEpoch.Status, ct.Finalized) - - // there should be 3 IBC packets sent (with sequence number 1, 2, 3). - // Thus, the next sequence number will eventually be 4 - s.Eventually(func() bool { - nextSequenceSendResp, err := babylonNode.QueryNextSequenceSend(babylonChannel.ChannelId, babylonChannel.PortId) - if err != nil { - return false - } - s.T().Logf("next sequence send at ZoneConcierge is %d", nextSequenceSendResp.NextSequenceSend) - return nextSequenceSendResp.NextSequenceSend >= endEpochNum-startEpochNum+1+1 - }, time.Minute, time.Second*2) - - // ensure the next receive sequence number of Babylon contract is also 3 - var nextSequenceRecv *channeltypes.QueryNextSequenceReceiveResponse - s.Eventually(func() bool { - nextSequenceRecv, err = czNode.QueryNextSequenceReceive(babylonChannel.Counterparty.ChannelId, babylonChannel.Counterparty.PortId) - if err != nil { - return false - } - s.T().Logf("next sequence receive at Babylon contract is %d", nextSequenceRecv.NextSequenceReceive) - return nextSequenceRecv.NextSequenceReceive >= endEpochNum-startEpochNum+1+1 - }, time.Minute, time.Second*2) - - // Ensure the IBC packet acknowledgements (on chain B) are there - nextSequence := nextSequenceRecv.NextSequenceReceive - for seq := uint64(1); seq < nextSequence; seq++ { - var seqResp *channeltypes.QueryPacketAcknowledgementResponse - s.Eventually(func() bool { - seqResp, err = czNode.QueryPacketAcknowledgement(czChannel.ChannelId, czChannel.PortId, seq) - s.T().Logf("acknowledgement resp of IBC packet #%d: %v, err: %v", seq, seqResp, err) - return err == nil - }, time.Minute, time.Second*2) - } -} diff --git a/test/e2e/configurer/base.go b/test/e2e/configurer/base.go index 38ae526d..65e1d048 100644 --- a/test/e2e/configurer/base.go +++ b/test/e2e/configurer/base.go @@ -127,9 +127,6 @@ func (bc *baseConfigurer) RunHermesRelayerIBC() error { if err := bc.runHermesIBCRelayer(bc.chainConfigs[i], bc.chainConfigs[j]); err != nil { return err } - if err := bc.createBabylonPhase2Channel(bc.chainConfigs[i], bc.chainConfigs[j]); err != nil { - return err - } } } return nil @@ -142,9 +139,6 @@ func (bc *baseConfigurer) RunCosmosRelayerIBC() error { if err := bc.runCosmosIBCRelayer(bc.chainConfigs[i], bc.chainConfigs[j]); err != nil { return err } - //if err := bc.createBabylonPhase2Channel(bc.chainConfigs[i], bc.chainConfigs[j]); err != nil { - // return err - //} } } // Launches a relayer between chain A (babylond) and chain B (wasmd) @@ -298,27 +292,6 @@ func (bc *baseConfigurer) runCosmosIBCRelayer(chainConfigA *chain.Config, chainC return nil } -func (bc *baseConfigurer) createBabylonPhase2Channel(chainA *chain.Config, chainB *chain.Config) error { - bc.t.Logf("connecting %s and %s chains via IBC", chainA.ChainMeta.Id, chainB.ChainMeta.Id) - require.Equal(bc.t, chainA.IBCConfig.Order, chainB.IBCConfig.Order) - require.Equal(bc.t, chainA.IBCConfig.Version, chainB.IBCConfig.Version) - cmd := []string{"hermes", "create", "channel", - "--a-chain", chainA.ChainMeta.Id, "--b-chain", chainB.ChainMeta.Id, // channel ID - "--a-port", chainA.IBCConfig.PortID, "--b-port", chainB.IBCConfig.PortID, // port - "--order", chainA.IBCConfig.Order.String(), - "--channel-version", chainA.IBCConfig.Version, - "--new-client-connection", "--yes", - } - _, _, err := bc.containerManager.ExecHermesCmd(bc.t, cmd, "SUCCESS") - if err != nil { - return err - } - bc.t.Logf("connected %s and %s chains via IBC", chainA.ChainMeta.Id, chainB.ChainMeta.Id) - bc.t.Logf("chainA's IBC config: %v", chainA.IBCConfig) - bc.t.Logf("chainB's IBC config: %v", chainB.IBCConfig) - return nil -} - func (bc *baseConfigurer) createIBCTransferChannel(chainA *chain.Config, chainB *chain.Config) error { bc.t.Logf("connecting %s and %s chains via IBC", chainA.ChainMeta.Id, chainB.ChainMeta.Id) cmd := []string{"hermes", "create", "channel", "--a-chain", chainA.ChainMeta.Id, "--b-chain", chainB.ChainMeta.Id, "--a-port", "transfer", "--b-port", "transfer", "--new-client-connection", "--yes"} diff --git a/test/e2e/configurer/chain/commands.go b/test/e2e/configurer/chain/commands.go index feca5964..73c6e188 100644 --- a/test/e2e/configurer/chain/commands.go +++ b/test/e2e/configurer/chain/commands.go @@ -258,7 +258,7 @@ func (n *NodeConfig) FinalizeSealedEpochs(startEpoch uint64, lastEpoch uint64) { func (n *NodeConfig) StoreWasmCode(wasmFile, from string) { n.LogActionF("storing wasm code from file %s", wasmFile) - cmd := []string{"babylond", "tx", "wasm", "store", wasmFile, fmt.Sprintf("--from=%s", from), "--gas=auto", "--gas-prices=1ubbn", "--gas-adjustment=1.3"} + cmd := []string{"babylond", "tx", "wasm", "store", wasmFile, fmt.Sprintf("--from=%s", from), "--gas=auto", "--gas-adjustment=1.3"} n.LogActionF(strings.Join(cmd, " ")) _, _, err := n.containerManager.ExecTxCmd(n.t, n.chainId, n.Name, cmd) require.NoError(n.t, err) @@ -267,7 +267,7 @@ func (n *NodeConfig) StoreWasmCode(wasmFile, from string) { func (n *NodeConfig) InstantiateWasmContract(codeId, initMsg, from string) { n.LogActionF("instantiating wasm contract %s with %s", codeId, initMsg) - cmd := []string{"babylond", "tx", "wasm", "instantiate", codeId, initMsg, fmt.Sprintf("--from=%s", from), "--no-admin", "--label=contract", "--gas=auto", "--gas-prices=1ubbn", "--gas-adjustment=1.3"} + cmd := []string{"babylond", "tx", "wasm", "instantiate", codeId, initMsg, fmt.Sprintf("--from=%s", from), "--no-admin", "--label=contract", "--gas=auto", "--gas-adjustment=1.3"} n.LogActionF(strings.Join(cmd, " ")) _, _, err := n.containerManager.ExecTxCmd(n.t, n.chainId, n.Name, cmd) require.NoError(n.t, err) diff --git a/test/e2e/configurer/chain/commands_btcstaking.go b/test/e2e/configurer/chain/commands_btcstaking.go index 1cb8c52d..8ba79eb9 100644 --- a/test/e2e/configurer/chain/commands_btcstaking.go +++ b/test/e2e/configurer/chain/commands_btcstaking.go @@ -101,9 +101,15 @@ func (n *NodeConfig) CreateBTCDelegation( n.FlagChainID(), "--log_format=json", } + // gas price + cmd = append(cmd, "--gas-prices=0.002ubbn") + if generateOnly { cmd = append(cmd, "--generate-only") } else { + // gas + cmd = append(cmd, "--gas=auto", "--gas-adjustment=1.3") + // broadcast stuff cmd = append(cmd, "-b=sync", "--yes") } @@ -140,7 +146,7 @@ func (n *NodeConfig) AddCovenantSigs(covPK *bbn.BIP340PubKey, stakingTxHash stri // used key cmd = append(cmd, "--from=val") // gas - cmd = append(cmd, "--gas=auto", "--gas-prices=1ubbn", "--gas-adjustment=1.3") + cmd = append(cmd, "--gas=auto", "--gas-adjustment=1.3") _, _, err := n.containerManager.ExecTxCmd(n.t, n.chainId, n.Name, cmd) require.NoError(n.t, err) diff --git a/test/e2e/configurer/chain/queries.go b/test/e2e/configurer/chain/queries.go index 366bf335..95e8693e 100644 --- a/test/e2e/configurer/chain/queries.go +++ b/test/e2e/configurer/chain/queries.go @@ -28,7 +28,6 @@ import ( ct "github.com/babylonlabs-io/babylon/x/checkpointing/types" etypes "github.com/babylonlabs-io/babylon/x/epoching/types" mtypes "github.com/babylonlabs-io/babylon/x/monitor/types" - zctypes "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" ) func (n *NodeConfig) QueryGRPCGateway(path string, queryParams url.Values) ([]byte, error) { @@ -102,6 +101,22 @@ func (n *NodeConfig) QueryBalances(address string) (sdk.Coins, error) { return balancesResp.GetBalances(), nil } +// QueryBalance returns balance of some address. +func (n *NodeConfig) QueryBalance(address, denom string) (*sdk.Coin, error) { + path := fmt.Sprintf("cosmos/bank/v1beta1/balances/%s/by_denom", address) + + params := url.Values{} + params.Set("denom", denom) + bz, err := n.QueryGRPCGateway(path, params) + require.NoError(n.t, err) + + var balancesResp banktypes.QueryBalanceResponse + if err := util.Cdc.UnmarshalJSON(bz, &balancesResp); err != nil { + return nil, err + } + return balancesResp.GetBalance(), nil +} + func (n *NodeConfig) QuerySupplyOf(denom string) (sdkmath.Int, error) { path := fmt.Sprintf("cosmos/bank/v1beta1/supply/%s", denom) bz, err := n.QueryGRPCGateway(path, url.Values{}) @@ -246,85 +261,6 @@ func (n *NodeConfig) QueryHeaderDepth(hash string) (uint64, error) { return blcResponse.Depth, nil } -func (n *NodeConfig) QueryListHeaders(consumerID string, pagination *query.PageRequest) (*zctypes.QueryListHeadersResponse, error) { - queryParams := url.Values{} - if pagination != nil { - queryParams.Set("pagination.key", base64.URLEncoding.EncodeToString(pagination.Key)) - queryParams.Set("pagination.limit", strconv.Itoa(int(pagination.Limit))) - } - - path := fmt.Sprintf("babylon/zoneconcierge/v1/headers/%s", consumerID) - bz, err := n.QueryGRPCGateway(path, queryParams) - require.NoError(n.t, err) - - var resp zctypes.QueryListHeadersResponse - if err := util.Cdc.UnmarshalJSON(bz, &resp); err != nil { - return nil, err - } - - return &resp, nil -} - -func (n *NodeConfig) QueryFinalizedChainsInfo(consumerIDs []string) ([]*zctypes.FinalizedChainInfo, error) { - queryParams := url.Values{} - for _, consumerID := range consumerIDs { - queryParams.Add("consumer_ids", consumerID) - } - - bz, err := n.QueryGRPCGateway("babylon/zoneconcierge/v1/finalized_chains_info", queryParams) - require.NoError(n.t, err) - - var resp zctypes.QueryFinalizedChainsInfoResponse - if err := util.Cdc.UnmarshalJSON(bz, &resp); err != nil { - return nil, err - } - - return resp.FinalizedChainsInfo, nil -} - -func (n *NodeConfig) QueryEpochChainsInfo(epochNum uint64, consumerIDs []string) ([]*zctypes.ChainInfo, error) { - queryParams := url.Values{} - for _, consumerID := range consumerIDs { - queryParams.Add("epoch_num", fmt.Sprintf("%d", epochNum)) - queryParams.Add("consumer_ids", consumerID) - } - - bz, err := n.QueryGRPCGateway("babylon/zoneconcierge/v1/epoch_chains_info", queryParams) - require.NoError(n.t, err) - - var resp zctypes.QueryEpochChainsInfoResponse - if err := util.Cdc.UnmarshalJSON(bz, &resp); err != nil { - return nil, err - } - - return resp.ChainsInfo, nil -} - -func (n *NodeConfig) QueryChains() (*[]string, error) { - bz, err := n.QueryGRPCGateway("babylon/zoneconcierge/v1/chains", url.Values{}) - require.NoError(n.t, err) - var chainsResponse zctypes.QueryChainListResponse - if err := util.Cdc.UnmarshalJSON(bz, &chainsResponse); err != nil { - return nil, err - } - return &chainsResponse.ConsumerIds, nil -} - -func (n *NodeConfig) QueryChainsInfo(consumerIDs []string) ([]*zctypes.ChainInfo, error) { - queryParams := url.Values{} - for _, consumerId := range consumerIDs { - queryParams.Add("consumer_ids", consumerId) - } - - bz, err := n.QueryGRPCGateway("/babylon/zoneconcierge/v1/chains_info", queryParams) - require.NoError(n.t, err) - var resp zctypes.QueryChainsInfoResponse - if err := util.Cdc.UnmarshalJSON(bz, &resp); err != nil { - return nil, err - } - return resp.ChainsInfo, nil -} - func (n *NodeConfig) QueryCurrentEpoch() (uint64, error) { bz, err := n.QueryGRPCGateway("/babylon/epoching/v1/current_epoch", url.Values{}) require.NoError(n.t, err) diff --git a/test/e2e/configurer/config/constants.go b/test/e2e/configurer/config/constants.go index 68586647..97547092 100644 --- a/test/e2e/configurer/config/constants.go +++ b/test/e2e/configurer/config/constants.go @@ -16,5 +16,5 @@ const ( // PropSubmitBlocks estimated number of blocks it takes to submit for a proposal PropSubmitBlocks float32 = 1 // Upgrade prop files json - UpgradeSignetLaunchFilePath = "/upgrades/signet-launch.json" + UpgradeSignetLaunchFilePath = "/upgrades/v1.json" ) diff --git a/test/e2e/configurer/factory.go b/test/e2e/configurer/factory.go index 36c39d7f..847c8fc3 100644 --- a/test/e2e/configurer/factory.go +++ b/test/e2e/configurer/factory.go @@ -11,8 +11,6 @@ import ( "github.com/babylonlabs-io/babylon/test/e2e/containers" "github.com/babylonlabs-io/babylon/test/e2e/initialization" btclighttypes "github.com/babylonlabs-io/babylon/x/btclightclient/types" - zctypes "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - ibctesting "github.com/cosmos/ibc-go/v8/testing" ) type Configurer interface { @@ -103,16 +101,6 @@ var ( IsValidator: false, }, } - ibcConfigChainA = &ibctesting.ChannelConfig{ - PortID: zctypes.PortID, - Order: zctypes.Ordering, - Version: zctypes.Version, - } - ibcConfigChainB = &ibctesting.ChannelConfig{ - PortID: zctypes.PortID, // Will be replaced by the contract address in Phase 2 tests - Order: zctypes.Ordering, - Version: zctypes.Version, - } ) const MaxIndetifierSize = 10 @@ -129,8 +117,8 @@ func NewBTCTimestampingConfigurer(t *testing.T, isDebugLogEnabled bool) (Configu return NewCurrentBranchConfigurer(t, []*chain.Config{ - chain.New(t, containerManager, initialization.ChainAID, updateNodeConfigNameWithIdentifier(validatorConfigsChainA, identifier), ibcConfigChainA), - chain.New(t, containerManager, initialization.ChainBID, updateNodeConfigNameWithIdentifier(validatorConfigsChainB, identifier), ibcConfigChainB), + chain.New(t, containerManager, initialization.ChainAID, updateNodeConfigNameWithIdentifier(validatorConfigsChainA, identifier), nil), + chain.New(t, containerManager, initialization.ChainBID, updateNodeConfigNameWithIdentifier(validatorConfigsChainB, identifier), nil), }, withIBC(baseSetup), // base set up with IBC containerManager, @@ -146,8 +134,8 @@ func NewIBCTransferConfigurer(t *testing.T, isDebugLogEnabled bool) (Configurer, return NewCurrentBranchConfigurer(t, []*chain.Config{ - chain.New(t, containerManager, initialization.ChainAID, updateNodeConfigNameWithIdentifier(validatorConfigsChainA, identifier), ibcConfigChainA), - chain.New(t, containerManager, initialization.ChainBID, updateNodeConfigNameWithIdentifier(validatorConfigsChainB, identifier), ibcConfigChainB), + chain.New(t, containerManager, initialization.ChainAID, updateNodeConfigNameWithIdentifier(validatorConfigsChainA, identifier), nil), + chain.New(t, containerManager, initialization.ChainBID, updateNodeConfigNameWithIdentifier(validatorConfigsChainB, identifier), nil), }, withIBCTransferChannel(baseSetup), // base set up with IBC containerManager, @@ -164,8 +152,8 @@ func NewBTCTimestampingPhase2Configurer(t *testing.T, isDebugLogEnabled bool) (C return NewCurrentBranchConfigurer(t, []*chain.Config{ - chain.New(t, containerManager, initialization.ChainAID, updateNodeConfigNameWithIdentifier(validatorConfigsChainA, identifier), ibcConfigChainA), - chain.New(t, containerManager, initialization.ChainBID, updateNodeConfigNameWithIdentifier(validatorConfigsChainB, identifier), ibcConfigChainB), + chain.New(t, containerManager, initialization.ChainAID, updateNodeConfigNameWithIdentifier(validatorConfigsChainA, identifier), nil), + chain.New(t, containerManager, initialization.ChainBID, updateNodeConfigNameWithIdentifier(validatorConfigsChainB, identifier), nil), }, withPhase2IBC(baseSetup), // IBC setup (requires contract address) containerManager, @@ -182,8 +170,8 @@ func NewBTCTimestampingPhase2RlyConfigurer(t *testing.T, isDebugLogEnabled bool) return NewCurrentBranchConfigurer(t, []*chain.Config{ - chain.New(t, containerManager, initialization.ChainAID, updateNodeConfigNameWithIdentifier(validatorConfigsChainA, identifier), ibcConfigChainA), - chain.New(t, containerManager, initialization.ChainBID, updateNodeConfigNameWithIdentifier(validatorConfigsChainB, identifier), ibcConfigChainB), + chain.New(t, containerManager, initialization.ChainAID, updateNodeConfigNameWithIdentifier(validatorConfigsChainA, identifier), nil), + chain.New(t, containerManager, initialization.ChainBID, updateNodeConfigNameWithIdentifier(validatorConfigsChainB, identifier), nil), }, withPhase2RlyIBC(baseSetup), // IBC setup with wasmd and Go relayer containerManager, @@ -209,7 +197,7 @@ func NewBTCStakingConfigurer(t *testing.T, isDebugLogEnabled bool) (Configurer, } // NewSoftwareUpgradeConfigurer returns a new Configurer for Software Upgrade testing -func NewSoftwareUpgradeConfigurer(t *testing.T, isDebugLogEnabled bool, upgradePath string, btcHeaders []*btclighttypes.BTCHeaderInfo) (*UpgradeConfigurer, error) { +func NewSoftwareUpgradeConfigurer(t *testing.T, isDebugLogEnabled bool, upgradePath string, btcHeaders []*btclighttypes.BTCHeaderInfo, preUpgradeFunc PreUpgradeFunc) (*UpgradeConfigurer, error) { identifier := identifierName(t) containerManager, err := containers.NewManager(identifier, isDebugLogEnabled, false, true) if err != nil { @@ -230,6 +218,7 @@ func NewSoftwareUpgradeConfigurer(t *testing.T, isDebugLogEnabled bool, upgradeP containerManager, upgradePath, 0, + preUpgradeFunc, ), nil } diff --git a/test/e2e/configurer/upgrade.go b/test/e2e/configurer/upgrade.go index 65acb320..6524a0f7 100644 --- a/test/e2e/configurer/upgrade.go +++ b/test/e2e/configurer/upgrade.go @@ -29,10 +29,13 @@ type UpgradeSettings struct { ForkHeight int64 // non-zero height implies that this is a fork upgrade. } +type PreUpgradeFunc func([]*chain.Config) + type UpgradeConfigurer struct { baseConfigurer upgradeJsonFilePath string forkHeight int64 // forkHeight > 0 implies that this is a fork upgrade. Otherwise, proposal upgrade. + preUpgradeFunc PreUpgradeFunc } var _ Configurer = (*UpgradeConfigurer)(nil) @@ -40,7 +43,7 @@ var _ Configurer = (*UpgradeConfigurer)(nil) // NewUpgradeConfigurer returns a upgrade configurer, if forkHeight is bigger // than 0 it implies that it is a fork upgrade that does not pass by a gov prop // if it is set to zero it runs the upgrade by the gov prop. -func NewUpgradeConfigurer(t *testing.T, chainConfigs []*chain.Config, setupTests setupFn, containerManager *containers.Manager, upgradePlanFilePath string, forkHeight int64) *UpgradeConfigurer { +func NewUpgradeConfigurer(t *testing.T, chainConfigs []*chain.Config, setupTests setupFn, containerManager *containers.Manager, upgradePlanFilePath string, forkHeight int64, preUpgradeFunc PreUpgradeFunc) *UpgradeConfigurer { t.Helper() return &UpgradeConfigurer{ baseConfigurer: baseConfigurer{ @@ -52,6 +55,7 @@ func NewUpgradeConfigurer(t *testing.T, chainConfigs []*chain.Config, setupTests }, forkHeight: forkHeight, upgradeJsonFilePath: upgradePlanFilePath, + preUpgradeFunc: preUpgradeFunc, } } @@ -151,6 +155,7 @@ func (uc *UpgradeConfigurer) CreatePreUpgradeState() error { firstNode.BankMultiSendFromNode(addresses, amountToSend.String()) } + uc.preUpgradeFunc(uc.chainConfigs) return nil } diff --git a/test/e2e/configurer/upgrade_test.go b/test/e2e/configurer/upgrade_test.go index b2acf999..dc1c3dc6 100644 --- a/test/e2e/configurer/upgrade_test.go +++ b/test/e2e/configurer/upgrade_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/babylonlabs-io/babylon/app" - v1 "github.com/babylonlabs-io/babylon/app/upgrades/signetlaunch" + v1 "github.com/babylonlabs-io/babylon/app/upgrades/v1" "github.com/babylonlabs-io/babylon/test/e2e/configurer/config" "github.com/stretchr/testify/require" ) diff --git a/test/e2e/containers/containers.go b/test/e2e/containers/containers.go index cd09f1ae..62929b30 100644 --- a/test/e2e/containers/containers.go +++ b/test/e2e/containers/containers.go @@ -69,7 +69,7 @@ func (m *Manager) ExecTxCmd(t *testing.T, chainId string, nodeName string, comma // namely adding flags `--chain-id={chain-id} -b=block --yes --keyring-backend=test "--log_format=json"`, // and searching for `successStr` func (m *Manager) ExecTxCmdWithSuccessString(t *testing.T, chainId string, containerName string, command []string, successStr string) (bytes.Buffer, bytes.Buffer, error) { - allTxArgs := []string{fmt.Sprintf("--chain-id=%s", chainId), "-b=sync", "--yes", "--keyring-backend=test", "--log_format=json", "--home=/home/babylon/babylondata"} + allTxArgs := []string{fmt.Sprintf("--chain-id=%s", chainId), "--gas-prices=0.002ubbn", "-b=sync", "--yes", "--keyring-backend=test", "--log_format=json", "--home=/home/babylon/babylondata"} txCommand := append(command, allTxArgs...) return m.ExecCmd(t, containerName, txCommand, successStr) } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index d5096131..372fe35f 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -19,18 +19,6 @@ func TestBTCTimestampingTestSuite(t *testing.T) { suite.Run(t, new(BTCTimestampingTestSuite)) } -// TestBTCTimestampingPhase2HermesTestSuite tests BTC timestamping phase 2 protocol end-to-end, -// with the Hermes relayer -func TestBTCTimestampingPhase2HermesTestSuite(t *testing.T) { - suite.Run(t, new(BTCTimestampingPhase2HermesTestSuite)) -} - -// TestBTCTimestampingPhase2RlyTestSuite tests BTC timestamping phase 2 protocol end-to-end, -// with the Go relayer -func TestBTCTimestampingPhase2RlyTestSuite(t *testing.T) { - suite.Run(t, new(BTCTimestampingPhase2RlyTestSuite)) -} - // TestBTCStakingTestSuite tests BTC staking protocol end-to-end func TestBTCStakingTestSuite(t *testing.T) { suite.Run(t, new(BTCStakingTestSuite)) diff --git a/test/e2e/initialization/config.go b/test/e2e/initialization/config.go index cb074552..31b876ac 100644 --- a/test/e2e/initialization/config.go +++ b/test/e2e/initialization/config.go @@ -50,7 +50,7 @@ type NodeConfig struct { const ( // common BabylonDenom = "ubbn" - MinGasPrice = "0.000" + MinGasPrice = "0.002" ValidatorWalletName = "val" BabylonOpReturnTag = "01020304" @@ -58,11 +58,11 @@ const ( BabylonBtcFinalizationPeriod = 4 // chainA ChainAID = "bbn-test-a" - BabylonBalanceA = 200000000000 + BabylonBalanceA = 3000000000000 StakeAmountA = 100000000000 // chainB ChainBID = "bbn-test-b" - BabylonBalanceB = 500000000000 + BabylonBalanceB = 5000000000000 StakeAmountB = 400000000000 EpochDuration = time.Second * 60 diff --git a/test/e2e/initialization/node.go b/test/e2e/initialization/node.go index b43ba62b..dea6df49 100644 --- a/test/e2e/initialization/node.go +++ b/test/e2e/initialization/node.go @@ -32,6 +32,7 @@ import ( "github.com/spf13/viper" babylonApp "github.com/babylonlabs-io/babylon/app" + appparams "github.com/babylonlabs-io/babylon/app/params" "github.com/babylonlabs-io/babylon/cmd/babylond/cmd" "github.com/babylonlabs-io/babylon/crypto/bls12381" "github.com/babylonlabs-io/babylon/privval" @@ -396,7 +397,7 @@ func (n *internalNode) signMsg(msgs ...sdk.Msg) (*sdktx.Tx, error) { } txBuilder.SetMemo(fmt.Sprintf("%s@%s:26656", n.nodeKey.ID(), n.moniker)) - txBuilder.SetFeeAmount(sdk.NewCoins()) + txBuilder.SetFeeAmount(sdk.NewCoins(sdk.NewCoin(appparams.DefaultBondDenom, math.NewInt(20000)))) txBuilder.SetGasLimit(uint64(200000 * len(msgs))) addr, err := n.keyInfo.GetAddress() diff --git a/test/e2e/software_upgrade_e2e_signet_launch_test.go b/test/e2e/software_upgrade_e2e_signet_launch_test.go index 4323f3af..c2e0fc9f 100644 --- a/test/e2e/software_upgrade_e2e_signet_launch_test.go +++ b/test/e2e/software_upgrade_e2e_signet_launch_test.go @@ -3,30 +3,78 @@ package e2e import ( "sort" + sdkmath "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/suite" "github.com/babylonlabs-io/babylon/app" - v1 "github.com/babylonlabs-io/babylon/app/upgrades/signetlaunch" + appparams "github.com/babylonlabs-io/babylon/app/params" + v1 "github.com/babylonlabs-io/babylon/app/upgrades/v1" btclighttypes "github.com/babylonlabs-io/babylon/x/btclightclient/types" "github.com/babylonlabs-io/babylon/test/e2e/configurer" + "github.com/babylonlabs-io/babylon/test/e2e/configurer/chain" "github.com/babylonlabs-io/babylon/test/e2e/configurer/config" + "github.com/babylonlabs-io/babylon/test/e2e/util" ) type SoftwareUpgradeSignetLaunchTestSuite struct { suite.Suite - configurer *configurer.UpgradeConfigurer + configurer *configurer.UpgradeConfigurer + balancesBeforeUpgrade map[string]sdk.Coin } func (s *SoftwareUpgradeSignetLaunchTestSuite) SetupSuite() { s.T().Log("setting up e2e integration test suite...") var err error + s.balancesBeforeUpgrade = make(map[string]sdk.Coin) btcHeaderGenesis, err := app.SignetBtcHeaderGenesis(app.NewTmpBabylonApp().AppCodec()) s.NoError(err) - cfg, err := configurer.NewSoftwareUpgradeConfigurer(s.T(), true, config.UpgradeSignetLaunchFilePath, []*btclighttypes.BTCHeaderInfo{btcHeaderGenesis}) + tokenDistData, err := v1.LoadTokenDistributionFromData() + s.NoError(err) + + balanceToMintByAddr := make(map[string]int64) + for _, td := range tokenDistData.TokenDistribution { + balanceToMintByAddr[td.AddressSender] += td.Amount + balanceToMintByAddr[td.AddressReceiver] += 0 + } + + // func only runs right before the upgrade proposal is sent + preUpgradeFunc := func(chains []*chain.Config) { + node := chains[0].NodeConfigs[1] + uniqueAddrs := make(map[string]any) + + for addr, amountToMint := range balanceToMintByAddr { + uniqueAddrs[addr] = struct{}{} + if amountToMint <= 0 { + continue + } + + amountToSend := sdk.NewCoin(appparams.BaseCoinUnit, sdkmath.NewInt(amountToMint)) + node.BankSendFromNode(addr, amountToSend.String()) + } + + // needs to wait for a block to make sure the send tx was processed and + // it queries the real balances before upgrade. + node.WaitForNextBlock() + for addr := range uniqueAddrs { + balance, err := node.QueryBalance(addr, appparams.DefaultBondDenom) + s.NoError(err) + + s.balancesBeforeUpgrade[addr] = *balance + } + } + + cfg, err := configurer.NewSoftwareUpgradeConfigurer( + s.T(), + true, + config.UpgradeSignetLaunchFilePath, + []*btclighttypes.BTCHeaderInfo{btcHeaderGenesis}, + preUpgradeFunc, + ) s.NoError(err) s.configurer = cfg @@ -116,6 +164,30 @@ func (s *SoftwareUpgradeSignetLaunchTestSuite) TestUpgradeSignetLaunch() { finalityParamsFromData, err := v1.LoadFinalityParamsFromData(bbnApp.AppCodec()) s.NoError(err) - s.EqualValues(finalityParamsFromData, *finalityParams) + + // Verifies the balance differences were really executed + tokenDistData, err := v1.LoadTokenDistributionFromData() + s.NoError(err) + + balanceDiffByAddr := make(map[string]int64) + for _, td := range tokenDistData.TokenDistribution { + balanceDiffByAddr[td.AddressSender] -= td.Amount + balanceDiffByAddr[td.AddressReceiver] += td.Amount + } + + for addr, diff := range balanceDiffByAddr { + coinDiff := sdk.NewCoin(appparams.DefaultBondDenom, sdkmath.NewInt(util.Abs(diff))) + expectedBalance := s.balancesBeforeUpgrade[addr].Add(coinDiff) + if diff < 0 { + expectedBalance = s.balancesBeforeUpgrade[addr].Sub(coinDiff) + } + + balanceAfterUpgrade, err := n.QueryBalance(addr, appparams.DefaultBondDenom) + s.NoError(err) + + expBalance := expectedBalance.String() + actBalance := balanceAfterUpgrade.String() + s.Equal(expBalance, actBalance, "addr %s has different balances. Expected %s != %s Actual", addr, expBalance, actBalance) + } } diff --git a/test/e2e/upgrades/signet-launch.json b/test/e2e/upgrades/v1.json similarity index 87% rename from test/e2e/upgrades/signet-launch.json rename to test/e2e/upgrades/v1.json index 2bd5a9f3..282a018a 100644 --- a/test/e2e/upgrades/signet-launch.json +++ b/test/e2e/upgrades/v1.json @@ -4,9 +4,9 @@ "@type": "/cosmos.upgrade.v1beta1.MsgSoftwareUpgrade", "authority": "bbn10d07y265gmmuvt4z0w9aw880jnsr700jduz5f2", "plan": { - "name": "signet-launch", + "name": "v1", "time": "0001-01-01T00:00:00Z", - "height": "21", + "height": "52", "info": "Msg info", "upgraded_client_state": null } @@ -17,4 +17,4 @@ "title": "any title", "summary": "any summary", "expedited": false -} +} \ No newline at end of file diff --git a/test/e2e/util/math.go b/test/e2e/util/math.go new file mode 100644 index 00000000..1b8403e0 --- /dev/null +++ b/test/e2e/util/math.go @@ -0,0 +1,10 @@ +package util + +import "golang.org/x/exp/constraints" + +func Abs[T constraints.Integer](x T) T { + if x < 0 { + return -x + } + return x +} diff --git a/testutil/datagen/tendermint.go b/testutil/datagen/tendermint.go index 8a40fc27..42d5bd49 100644 --- a/testutil/datagen/tendermint.go +++ b/testutil/datagen/tendermint.go @@ -8,8 +8,6 @@ import ( cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" sdk "github.com/cosmos/cosmos-sdk/types" ibctmtypes "github.com/cosmos/ibc-go/v8/modules/light-clients/07-tendermint" - - zctypes "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" ) func GenRandomTMHeader(r *rand.Rand, chainID string, height uint64) *cmtproto.Header { @@ -41,16 +39,6 @@ func GenRandomTMHeaderInfo(r *rand.Rand, chainID string, height uint64) *header. } } -func NewZCHeaderInfo(header *ibctmtypes.Header, clientID string) *zctypes.HeaderInfo { - return &zctypes.HeaderInfo{ - ClientId: clientID, - AppHash: header.Header.AppHash, - ChainId: header.Header.ChainID, - Time: header.Header.Time, - Height: uint64(header.Header.Height), - } -} - func WithCtxHeight(ctx sdk.Context, height uint64) sdk.Context { headerInfo := ctx.HeaderInfo() headerInfo.Height = int64(height) diff --git a/testutil/keeper/zoneconcierge.go b/testutil/keeper/zoneconcierge.go deleted file mode 100644 index 30ed2679..00000000 --- a/testutil/keeper/zoneconcierge.go +++ /dev/null @@ -1,108 +0,0 @@ -package keeper - -import ( - "testing" - - "cosmossdk.io/core/header" - "cosmossdk.io/log" - "cosmossdk.io/store" - "cosmossdk.io/store/metrics" - storetypes "cosmossdk.io/store/types" - cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - dbm "github.com/cosmos/cosmos-db" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/runtime" - sdk "github.com/cosmos/cosmos-sdk/types" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" - govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - capabilitykeeper "github.com/cosmos/ibc-go/modules/capability/keeper" - capabilitytypes "github.com/cosmos/ibc-go/modules/capability/types" - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" - ibcexported "github.com/cosmos/ibc-go/v8/modules/core/exported" - "github.com/stretchr/testify/require" - - "github.com/babylonlabs-io/babylon/x/zoneconcierge/keeper" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -// zoneconciergeChannelKeeper is a stub of ChannelKeeper -type zoneconciergeChannelKeeper struct{} - -func (zoneconciergeChannelKeeper) GetChannel(ctx sdk.Context, srcPort, srcChan string) (channel channeltypes.Channel, found bool) { - return channeltypes.Channel{}, false -} -func (zoneconciergeChannelKeeper) GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool) { - return 0, false -} -func (zoneconciergeChannelKeeper) SendPacket(ctx sdk.Context, channelCap *capabilitytypes.Capability, packet ibcexported.PacketI) error { - return nil -} -func (zoneconciergeChannelKeeper) ChanCloseInit(ctx sdk.Context, portID, channelID string, chanCap *capabilitytypes.Capability) error { - return nil -} - -func (zoneconciergeChannelKeeper) GetAllChannels(ctx sdk.Context) []channeltypes.IdentifiedChannel { - return nil -} -func (zoneconciergeChannelKeeper) GetChannelClientState(ctx sdk.Context, portID, channelID string) (string, ibcexported.ClientState, error) { - return "", nil, nil -} - -// zoneconciergeportKeeper is a stub of PortKeeper -type zoneconciergePortKeeper struct{} - -func (zoneconciergePortKeeper) BindPort(ctx sdk.Context, portID string) *capabilitytypes.Capability { - return &capabilitytypes.Capability{} -} - -type zoneconciergeStoreQuerier struct{} - -func (zoneconciergeStoreQuerier) Query(req *storetypes.RequestQuery) (*storetypes.ResponseQuery, error) { - return &storetypes.ResponseQuery{ - ProofOps: &cmtcrypto.ProofOps{ - Ops: []cmtcrypto.ProofOp{ - cmtcrypto.ProofOp{}, - }, - }, - }, nil -} - -func ZoneConciergeKeeper(t testing.TB, btclcKeeper types.BTCLightClientKeeper, checkpointingKeeper types.CheckpointingKeeper, btccKeeper types.BtcCheckpointKeeper, epochingKeeper types.EpochingKeeper) (*keeper.Keeper, sdk.Context) { - logger := log.NewTestLogger(t) - storeKey := storetypes.NewKVStoreKey(types.StoreKey) - memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) - - db := dbm.NewMemDB() - stateStore := store.NewCommitMultiStore(db, logger, metrics.NewNoOpMetrics()) - stateStore.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, db) - stateStore.MountStoreWithDB(memStoreKey, storetypes.StoreTypeMemory, nil) - require.NoError(t, stateStore.LoadLatestVersion()) - - registry := codectypes.NewInterfaceRegistry() - appCodec := codec.NewProtoCodec(registry) - capabilityKeeper := capabilitykeeper.NewKeeper(appCodec, storeKey, memStoreKey) - k := keeper.NewKeeper( - appCodec, - runtime.NewKVStoreService(storeKey), - nil, // TODO: mock this keeper - nil, // TODO: mock this keeper - zoneconciergeChannelKeeper{}, - zoneconciergePortKeeper{}, - nil, // TODO: mock this keeper - nil, // TODO: mock this keeper - btclcKeeper, - checkpointingKeeper, - btccKeeper, - epochingKeeper, - zoneconciergeStoreQuerier{}, - capabilityKeeper.ScopeToModule("ZoneconciergeScopedKeeper"), - authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - - ctx := sdk.NewContext(stateStore, cmtproto.Header{}, false, logger) - ctx = ctx.WithHeaderInfo(header.Info{}) - - return k, ctx -} diff --git a/wasmbinding/wasm.go b/wasmbinding/wasm.go index 8896e0ec..1c0c6cb6 100644 --- a/wasmbinding/wasm.go +++ b/wasmbinding/wasm.go @@ -10,27 +10,27 @@ import ( bbn "github.com/babylonlabs-io/babylon/types" "github.com/babylonlabs-io/babylon/wasmbinding/bindings" lcKeeper "github.com/babylonlabs-io/babylon/x/btclightclient/keeper" + checkpointingkeeper "github.com/babylonlabs-io/babylon/x/checkpointing/keeper" epochingkeeper "github.com/babylonlabs-io/babylon/x/epoching/keeper" - zckeeper "github.com/babylonlabs-io/babylon/x/zoneconcierge/keeper" sdk "github.com/cosmos/cosmos-sdk/types" ) type QueryPlugin struct { - epochingKeeper *epochingkeeper.Keeper - zcKeeper *zckeeper.Keeper - lcKeeper *lcKeeper.Keeper + epochingKeeper *epochingkeeper.Keeper + checkpointingkeeper *checkpointingkeeper.Keeper + lcKeeper *lcKeeper.Keeper } // NewQueryPlugin returns a reference to a new QueryPlugin. func NewQueryPlugin( ek *epochingkeeper.Keeper, - zcKeeper *zckeeper.Keeper, + ch *checkpointingkeeper.Keeper, lcKeeper *lcKeeper.Keeper, ) *QueryPlugin { return &QueryPlugin{ - epochingKeeper: ek, - zcKeeper: zcKeeper, - lcKeeper: lcKeeper, + epochingKeeper: ek, + checkpointingkeeper: ch, + lcKeeper: lcKeeper, } } @@ -55,8 +55,10 @@ func CustomQuerier(qp *QueryPlugin) func(ctx sdk.Context, request json.RawMessag } return bz, nil + case contractQuery.LatestFinalizedEpochInfo != nil: - epoch := qp.zcKeeper.GetLastFinalizedEpoch(ctx) + epoch := qp.checkpointingkeeper.GetLastFinalizedEpoch(ctx) + epochInfo, err := qp.epochingKeeper.GetHistoricalEpoch(ctx, epoch) if err != nil { @@ -153,10 +155,10 @@ func CustomQuerier(qp *QueryPlugin) func(ctx sdk.Context, request json.RawMessag func RegisterCustomPlugins( ek *epochingkeeper.Keeper, - zcKeeper *zckeeper.Keeper, + ck *checkpointingkeeper.Keeper, lcKeeper *lcKeeper.Keeper, ) []wasmkeeper.Option { - wasmQueryPlugin := NewQueryPlugin(ek, zcKeeper, lcKeeper) + wasmQueryPlugin := NewQueryPlugin(ek, ck, lcKeeper) queryPluginOpt := wasmkeeper.WithQueryPlugins(&wasmkeeper.QueryPlugins{ Custom: CustomQuerier(wasmQueryPlugin), diff --git a/x/epoching/keeper/drop_validator_msg_decorator.go b/x/epoching/keeper/drop_validator_msg_decorator.go index c6261b1e..7ab644b7 100644 --- a/x/epoching/keeper/drop_validator_msg_decorator.go +++ b/x/epoching/keeper/drop_validator_msg_decorator.go @@ -8,11 +8,11 @@ import ( // DropValidatorMsgDecorator defines an AnteHandler decorator that rejects all messages that might change the validator set. type DropValidatorMsgDecorator struct { - ek Keeper + ek *Keeper } // NewDropValidatorMsgDecorator creates a new DropValidatorMsgDecorator -func NewDropValidatorMsgDecorator(ek Keeper) *DropValidatorMsgDecorator { +func NewDropValidatorMsgDecorator(ek *Keeper) *DropValidatorMsgDecorator { return &DropValidatorMsgDecorator{ ek: ek, } diff --git a/x/epoching/keeper/drop_validator_msg_decorator_test.go b/x/epoching/keeper/drop_validator_msg_decorator_test.go index a6411e37..0880f946 100644 --- a/x/epoching/keeper/drop_validator_msg_decorator_test.go +++ b/x/epoching/keeper/drop_validator_msg_decorator_test.go @@ -24,7 +24,7 @@ func TestDropValidatorMsgDecorator(t *testing.T) { {&stakingtypes.MsgEditValidator{}, false}, } - decorator := NewDropValidatorMsgDecorator(Keeper{}) + decorator := NewDropValidatorMsgDecorator(&Keeper{}) for _, tc := range testCases { res := decorator.IsValidatorRelatedMsg(tc.msg) diff --git a/x/finality/client/cli/query.go b/x/finality/client/cli/query.go index da6ef936..1d1215c8 100644 --- a/x/finality/client/cli/query.go +++ b/x/finality/client/cli/query.go @@ -35,6 +35,8 @@ func GetQueryCmd(queryRoute string) *cobra.Command { cmd.AddCommand(CmdListBlocks()) cmd.AddCommand(CmdVotesAtHeight()) cmd.AddCommand(CmdListEvidences()) + cmd.AddCommand(CmdSigningInfo()) + cmd.AddCommand(CmdAllSigningInfo()) return cmd } @@ -244,3 +246,67 @@ func CmdListEvidences() *cobra.Command { return cmd } + +func CmdSigningInfo() *cobra.Command { + cmd := &cobra.Command{ + Use: "signing-info [fp-pk-hex]", + Short: "Show signing info of a given finality provider", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + fpPkHex := args[0] + + // query for the signing info of a given finality provider + res, err := queryClient.SigningInfo( + cmd.Context(), + &types.QuerySigningInfoRequest{FpBtcPkHex: fpPkHex}, + ) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdAllSigningInfo() *cobra.Command { + cmd := &cobra.Command{ + Use: "all-signing-info", + Short: "Show signing info of finality providers", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + // query for all the signing infos + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := queryClient.SigningInfos( + cmd.Context(), + &types.QuerySigningInfosRequest{Pagination: pageReq}, + ) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "all-signing-info") + + return cmd +} diff --git a/x/zoneconcierge/README.md b/x/zoneconcierge/README.md deleted file mode 100644 index 046c5567..00000000 --- a/x/zoneconcierge/README.md +++ /dev/null @@ -1,500 +0,0 @@ -# ZoneConcierge - -The Zone Concierge module is responsible for generating BTC timestamps of -headers from other PoS blockchains. These BTC timestamps allow PoS blockchains -integrating with Babylon to achieve Bitcoin security, i.e., forking the PoS -blockchain is as hard as forking Bitcoin. The Zone Concierge module leverages -the IBC protocol to receive PoS blockchains' headers and provide them with -succinct and provable information about their timestamps. - -There are two phases of integration for a PoS blockchain: - -- **Phase 1 integration:** Babylon receives PoS blockchain headers via standard - `MsgUpdateClient` messages in IBC light client protocol, timestamps them, and - functions as a canonical chain oracle for the PoS blockchain. - [Babylonscan](https://babylonscan.io/) shows PoS blockchains with phase 1 - integration. -- **Phase 2 integration:** In addition to phase 1, phase 2 allows a PoS - blockchain to receive BTC timestamps from Babylon via an IBC channel, such - that the PoS blockchain can use BTC timestamps to detect and resolve forks, as - well as other use cases such as Bitcoin-assisted fast unbonding. - -## Table of contents - -- [Table of contents](#table-of-contents) -- [Concepts](#concepts) - - [Problem Statement](#problem-statement) - - [Design](#design) - - [Use cases](#use-cases) -- [State](#state) - - [Parameters](#parameters) - - [ChainInfo](#chaininfo) - - [EpochChainInfo](#epochchaininfo) - - [CanonicalChain](#canonicalchain) - - [Fork](#fork) - - [Params](#params) -- [PostHandler for intercepting IBC headers](#posthandler-for-intercepting-ibc-headers) -- [Hooks](#hooks) - - [Indexing headers upon `AfterEpochEnds`](#indexing-headers-upon-afterepochends) - - [Sending BTC timestamps upon `AfterRawCheckpointFinalized`](#sending-btc-timestamps-upon-afterrawcheckpointfinalized) -- [Interaction with PoS blockchains under phase 1 integration](#interaction-with-pos-blockchains-under-phase-1-integration) -- [Interaction with PoS blockchains under phase 2 integration](#interaction-with-pos-blockchains-under-phase-2-integration) -- [Messages and Queries](#messages-and-queries) - -## Concepts - -The Zone Concierge module is responsible for providing BTC timestamps of headers -from PoS blockchains connected to Babylon via the IBC protocol. -These BTC timestamps allow PoS blockchains to achieve Bitcoin security, i.e., -forking a PoS blockchain is as hard as forking Bitcoin. The Zone Concierge -module leverages the IBC light client protocol to receive headers with a valid -quorum certificate from PoS blockchains. These headers are then timestamped -together with the Babylon blockchain by Bitcoin, thereby achieving Bitcoin -security. The BTC timestamps can be propagated back to the PoS blockchains, such -that PoS blockchains can know their headers that have been checkpointed by -Bitcoin. - -### Problem Statement - -Babylon aims to provide Bitcoin security to other PoS blockchains. This involves -two functionalities: 1) checkpointing Babylon to Bitcoin, and 2) checkpointing -other PoS blockchains to Babylon. The {[Epoching](../epoching/), -[Checkpointing](../checkpointing/), [BTCCheckpoint](../btccheckpoint/), -[BTCLightclient](../btclightclient/)} modules jointly provide the functionality -of checkpointing Babylon to Bitcoin. The [Zone Concierge module](./) and the -[IBC modules](https://github.com/cosmos/ibc-go) jointly provide the -functionality of checkpointing PoS blockchains to Babylon. - -In order to checkpoint PoS blockchains to Babylon, Babylon needs to receive -headers of PoS blockchains and maintain all headers that have a *quorum -certificate* (a set of signatures from validators with > 2/3 total voting -power). Checkpointing canonical headers allows Babylon to act as a canonical -chain oracle. Checkpointing fork headers allows Babylon to identify dishonest -majority attacks. - -To summarize, the Zone Concierge module aims at providing the following -guarantees: - -- **Timestamping headers:** Babylon checkpoints PoS blockchains' (canonical and - fork) headers with a valid quorum certificate. -- **Verifiability of timestamps:** Babylon can provide a proof that a given - header is checkpointed by Bitcoin, where the proof is publicly verifiable - assuming access to a BTC light client. - -Under the following assumptions: - -- BTC is always secure with the [k-deep confirmation - rule](https://en.bitcoin.it/wiki/Confirmation); -- There exists >=1 honest IBC relayer and vigilante {submitter, reporter}; and -- The network is synchronous (i.e., messages are delivered within a known and - finite time bound). - -Note that the Bitcoin timestamping protocol uses Bitcoin as a single source of -truth, and does not make any assumption on the fraction of adversarial -validators in Babylon or PoS blockchains. That is, the above statement shall -hold even if Babylon and a PoS blockchain have dishonest supermajority. The -formal security analysis of the Bitcoin timestamping protocol can be found at -the Bitcoin timestamping [reseaarch paper](https://arxiv.org/pdf/2207.08392.pdf) -published at [S\&P'23](https://sp2023.ieee-security.org/). - -### Design - -The Zone Concierge module is responsible for checkpointing headers from PoS -blockchains and propagating succinct and verifiable information about them back -to the PoS blockchains. Specifically, the Zone Concierge module - -- leverages IBC light clients for checkpointing PoS blockchains; -- intercepts and indexes headers from PoS blockchains; and -- provides BTC timestamps proving that a header is checkpointed by Babylon and - Bitcoin (via queries or IBC packets). - -**Leveraging IBC light clients for checkpointing PoS blockchains.** Babylon -leverages the [IBC light client -protocol](https://github.com/cosmos/ibc/tree/main/spec/client/ics-007-tendermint-client) -to receive and verify headers of PoS blockchains. The IBC light client protocol -allows a blockchain `A` to maintain a *light client* of another blockchain `B`. -The light client contains a subset of headers in the ledger of blockchain `B`, -securing the following properties when blockchain `B` has more than 2/3 honest -voting power and there exists at least 1 honest IBC relayer. - -- **Safety:** The IBC light client in blockchain `A` is consistent with the - ledger of blockchain `B`. -- **Liveness:** The IBC light client in blockchain `A` keeps growing. - -Verifying a header is done by a special [quorum intersection -mechanism](https://arxiv.org/abs/2010.07031): upon a header from the relayer, -the light client checks whether the intersected voting power between the quorum -certificates of the current tip and the header is more than 1/3 of the voting -power in the current tip. If yes, then this ensures that there exists at least -one honest validator in the header's quorum certificate, and this header is -agreed by all honest validators. - -Babylon leverages the IBC light client protocol to checkpoint PoS blockchains to -itself. In particular, each header with a valid quorum certificate can be viewed -as a timestamp, and Babylon can generate an inclusion proof that a given header -of a PoS blockchain is committed to Babylon's `AppHash`. - -**Intercepting and Indexing Headers from PoS blockchains.** In order to further -checkpoint headers of PoS blockchains to Bitcoin, the Zone Concierge module -builds an index recording headers' positions on Babylon's ledger, which will -eventually be checkpointed by Bitcoin. To this end, the Zone Concierge module -intercepts headers from IBC light clients via a -[PostHandler](https://docs.cosmos.network/v0.50/learn/advanced/baseapp#runtx-antehandler-runmsgs-posthandler), -and indexes them. - -Note that the Zone Concierge module intercepts all headers that have a valid -quorum certificate, including canonical headers and fork headers. A fork header -with a valid quorum certificate is a signal of the dishonest majority attack: -the majority of validators are dishonest and sign conflicted headers. - -**Providing Proofs that a Header is Checkpointed by Bitcoin.** To support use -cases that need to verify BTC timestamps of headers, Zone Concierge can provide -proofs that the headers are indeed checkpointed to Bitcoin. The proof includes -the following: - -- `ProofCzHeaderInEpoch`: Proof that the header of the PoS blockchain is - included in an epoch of Babylon; -- `ProofEpochSealed`: Proof that the epoch has been agreed by > 2/3 voting power - of the validator set; and -- `ProofEpochSubmitted`: Proof that the epoch's checkpoint has been submitted to - Bitcoin. - -The first proof is formed as a Merkle proof that the IBC header is committed to -the `AppHash` after the epoch. The second proof is formed as a BLS -multi-signature jointly generated by the epoch's validator set. The last proof -is formed as Merkle proofs of two transactions that constitute a BTC checkpoint, -same as in [BTCCheckpoint module](../btccheckpoint/README.md). - -### Use cases - -The Bitcoin-checkpointed PoS blockchain will enable several applications, such -as raising alarms upon dishonest majority attacks and reducing the unbonding -period. These use cases require new plugins in the PoS blockchains, and will be -developed by Babylon team in the future. - -**Raising Alarms upon Dishonest Majority Attacks.** Zone Concierge timestamps -fork headers that have valid quorum certificates. Such fork header signals a -safety attack launched by the dishonest majority of validators. Babylon can send -the fork header back to the corresponding PoS blockchain, such that the PoS -blockchain will get notified with this dishonest majority attack, and can decide -to stall or initiate a social consensus. - -**Reducing Unbonding Period.** Zone Concierge provides a Bitcoin-checkpointed -prefix for a PoS blockchain. Such Bitcoin-checkpointed prefix resists against -the long range attacks, thus unbonding requests in this prefix can be safely -finished, leading to much shorter unbonding period compared to that in existing -PoS blockchains (e.g., 21 days in Cosmos SDK chains). - -## State - -The Zone Concierge module keeps handling IBC headers of PoS blockchains, and -maintains the following KV stores. - -### Parameters - -The [parameter storage](./keeper/params.go) maintains Zone Concierge module's -parameters. The Zone Concierge module's parameters are represented as a `Params` -[object](../../proto/babylon/zoneconcierge/v1/params.proto) defined as follows: - -```protobuf -// Params defines the parameters for the module. -message Params { - option (gogoproto.equal) = true; - - // ibc_packet_timeout_seconds is the time period after which an unrelayed - // IBC packet becomes timeout, measured in seconds - uint32 ibc_packet_timeout_seconds = 1 - [ (gogoproto.moretags) = "yaml:\"ibc_packet_timeout_seconds\"" ]; -} -``` - -### ChainInfo - -The [chain info storage](./keeper/chain_info_indexer.go) maintains `ChainInfo` -for each PoS blockchain. The key is the PoS blockchain's `ConsumerID`, which is the -ID of the IBC light client. The value is a `ChainInfo` object. The `ChainInfo` is -a structure storing the information of a PoS blockchain that checkpoints to Babylon. - -```protobuf -// ChainInfo is the information of a CZ -message ChainInfo { - // chain_id is the ID of the chain - string chain_id = 1; - // latest_header is the latest header in CZ's canonical chain - IndexedHeader latest_header = 2; - // latest_forks is the latest forks, formed as a series of IndexedHeader (from - // low to high) - Forks latest_forks = 3; - // timestamped_headers_count is the number of timestamped headers in CZ's - // canonical chain - uint64 timestamped_headers_count = 4; -} -``` - -### EpochChainInfo - -The [epoch chain info storage](./keeper/epoch_chain_info_indexer.go) maintains -`ChainInfo` at the end of each Babylon epoch for each PoS blockchain. The key is -the PoS blockchain's `ConsumerID` plus the epoch number, and the value is a -`ChainInfo` object. - -### CanonicalChain - -The [canonical chain storage](./keeper/canonical_chain_indexer.go) maintains the -metadata of canonical IBC headers of a PoS blockchain. The key is the consumer -chain's `ConsumerID` plus the height, and the value is a `IndexedHeader` object. -`IndexedHeader` is a structure storing IBC header's metadata. - -```protobuf -// IndexedHeader is the metadata of a CZ header -message IndexedHeader { - // chain_id is the unique ID of the chain - string chain_id = 1; - // hash is the hash of this header - bytes hash = 2; - // height is the height of this header on CZ ledger - // (hash, height) jointly provides the position of the header on CZ ledger - uint64 height = 3; - // time is the timestamp of this header on CZ ledger - // it is needed for CZ to unbond all mature validators/delegations - // before this timestamp when this header is BTC-finalized - google.protobuf.Timestamp time = 4 [ (gogoproto.stdtime) = true ]; - // babylon_header_hash is the hash of the babylon block that includes this CZ - // header - bytes babylon_header_hash = 5; - // babylon_header_height is the height of the babylon block that includes this CZ - // header - uint64 babylon_header_height = 6; - // epoch is the epoch number of this header on Babylon ledger - uint64 babylon_epoch = 7; - // babylon_tx_hash is the hash of the tx that includes this header - // (babylon_block_height, babylon_tx_hash) jointly provides the position of - // the header on Babylon ledger - bytes babylon_tx_hash = 8; -} -``` - -### Fork - -The [fork storage](./keeper/fork_indexer.go) maintains the metadata of canonical -IBC headers of a PoS blockchain. The key is the PoS blockchain's `ConsumerID` plus -the height, and the value is a list of `IndexedHeader` objects, which represent -fork headers at that height. - -### Params - -The [parameter storage](./keeper/params.go) maintains the parameters for the -Zone Concierge module. - -```protobuf -// Params defines the parameters for the module. -message Params { - option (gogoproto.equal) = true; - - // ibc_packet_timeout_seconds is the time period after which an unrelayed - // IBC packet becomes timeout, measured in seconds - uint32 ibc_packet_timeout_seconds = 1 - [ (gogoproto.moretags) = "yaml:\"ibc_packet_timeout_seconds\"" ]; -} -``` - -## PostHandler for intercepting IBC headers - -The Zone Concierge module implements a -[PostHandler](https://docs.cosmos.network/v0.50/learn/advanced/baseapp#runtx-antehandler-runmsgs-posthandler) -`IBCHeaderDecorator` to intercept headers sent to the [IBC client -module](https://github.com/cosmos/ibc-go/tree/v8.0.0/modules/core/02-client). -The `IBCHeaderDecorator` PostHandler is defined at -[x/zoneconcierge/keeper/header_handler.go](./keeper/header_handler.go), and -works as follows. - -1. If the PoS blockchain hosting the header is not known to Babylon, initialize - `ChainInfo` storage for the PoS blockchain. -2. If the header is on a fork, insert the header to the fork storage and update - `ChainInfo`. -3. If the header is canonical, insert the header to the canonical chain storage - and update `ChainInfo`. - -## Hooks - -The Zone Concierge module subscribes to the Epoching module's `AfterEpochEnds` -[hook](../epoching/types/hooks.go) for indexing the epochs when receiving -headers from PoS blockchains, and the Checkpointing module's -`AfterRawCheckpointFinalized` [hook](../checkpointing/types/hooks.go) for phase -2 integration. - -### Indexing headers upon `AfterEpochEnds` - -The `AfterEpochEnds` hook is triggered upon an epoch is ended, i.e., the last -block in this epoch has been committed by CometBFT. Upon `AfterEpochEnds`, the -Zone Concierge will save the current `ChainInfo` to the `EpochChainInfo` storage -for each PoS blockchain. - -### Sending BTC timestamps upon `AfterRawCheckpointFinalized` - -The `AfterRawCheckpointFinalized` hook is triggered upon a checkpoint becoming -*finalized*, i.e., Bitcoin transactions of the checkpoint become `w`-deep in -Bitcoin's canonical chain, where `w` is the `checkpoint_finalization_timeout` -[parameter](../../proto/babylon/btccheckpoint/v1/params.proto) in the -[BTCCheckpoint](../btccheckpoint/) module. - -Upon `AfterRawCheckpointFinalized`, the Zone Concierge module will prepare and -send a BTC timestamp to each PoS blockchain. -The [BTCTimestamp](../../proto/babylon/zoneconcierge/v1/packet.proto) structure -includes a header and a set of proofs that the header is checkpointed by -Bitcoin. - - - -```protobuf -// BTCTimestamp is a BTC timestamp that carries information of a BTC-finalized epoch -// It includes a number of BTC headers, a raw checkpoint, an epoch metadata, and -// a CZ header if there exists CZ headers checkpointed to this epoch. -// Upon a newly finalized epoch in Babylon, Babylon will send a BTC timestamp to each -// PoS blockchain that has phase-2 integration with Babylon via IBC. -message BTCTimestamp { - // header is the last CZ header in the finalized Babylon epoch - babylon.zoneconcierge.v1.IndexedHeader header = 1; - - /* - Data for BTC light client - */ - // btc_headers is BTC headers between - // - the block AFTER the common ancestor of BTC tip at epoch `lastFinalizedEpoch-1` and BTC tip at epoch `lastFinalizedEpoch` - // - BTC tip at epoch `lastFinalizedEpoch` - // where `lastFinalizedEpoch` is the last finalized epoch in Babylon - repeated babylon.btclightclient.v1.BTCHeaderInfo btc_headers = 2; - - /* - Data for Babylon epoch chain - */ - // epoch_info is the metadata of the sealed epoch - babylon.epoching.v1.Epoch epoch_info = 3; - // raw_checkpoint is the raw checkpoint that seals this epoch - babylon.checkpointing.v1.RawCheckpoint raw_checkpoint = 4; - // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch - babylon.btccheckpoint.v1.SubmissionKey btc_submission_key = 5; - - /* - Proofs that the header is finalized - */ - babylon.zoneconcierge.v1.ProofFinalizedChainInfo proof = 6; -} - -// ProofFinalizedChainInfo is a set of proofs that attest a chain info is -// BTC-finalized -message ProofFinalizedChainInfo { - /* - The following fields include proofs that attest the chain info is - BTC-finalized - */ - // proof_cz_header_in_epoch is the proof that the CZ header is timestamped - // within a certain epoch - tendermint.crypto.ProofOps proof_cz_header_in_epoch = 1; - // proof_epoch_sealed is the proof that the epoch is sealed - babylon.zoneconcierge.v1.ProofEpochSealed proof_epoch_sealed = 2; - // proof_epoch_submitted is the proof that the epoch's checkpoint is included - // in BTC ledger It is the two TransactionInfo in the best (i.e., earliest) - // checkpoint submission - repeated babylon.btccheckpoint.v1.TransactionInfo proof_epoch_submitted = 3; -} -``` - -Upon `AfterRawCheckpointFinalized` is triggered, the Zone Concierge module will -send an IBC packet including a `BTCTimestamp` to each PoS blockchain doing -[phase 2 -integration](#interaction-with-pos-blockchains-under-phase-2-integration) with -Babylon. The logic is defined at -[x/zoneconcierge/keeper/hooks.go](./keeper/hooks.go) and works as follows. - -1. Find all open IBC channels with Babylon's Zone Concierge module. The - counterparty at each IBC channel is a PoS blockchain. -2. Get all BTC headers to be sent in BTC timestamps. Specifically, - 1. Find the segment of BTC headers sent upon the last time - `AfterRawCheckpointFinalized` is triggered. - 2. If all BTC headers in the segment are no longer canonical, the BTC headers - to be sent will be the last `w+1` ones in the BTC light client, where `w` - is the `checkpoint_finalization_timeout` - [parameter](../../proto/babylon/btccheckpoint/v1/params.proto) in the - [BTCCheckpoint](../btccheckpoint/) module. - 3. Otherwise, the BTC headers to be sent will be from the latest header that - is still canonical in the segment to the current tip of the BTC light - client. -3. For each of these IBC channels: - 1. Find the `ConsumerID` of the counterparty chain (i.e., the PoS blockchain) in - the IBC channel. - 2. Get the `ChainInfo` of the `ConsumerID` at the last finalized epoch. - 3. Get the metadata of the last finalized epoch and its corresponding raw - checkpoint. - 4. Generate the proof that the last PoS blockchain's canonical header is - committed to the epoch's metadata. - 5. Generate the proof that the epoch is sealed, i.e., receives a BLS - multisignature generated by validators with >2/3 total voting power at the - last finalized epoch. - 6. Generate the proof that the epoch's checkpoint is submitted, i.e., encoded - in transactions on Bitcoin. - 7. Assemble all the above and the BTC headers obtained in step 2 as - `BTCTimestamp`, and send it to the IBC channel in an IBC packet. - -## Interaction with PoS blockchains under phase 1 integration - - - - -In phase 1 integration, Babylon maintains headers for a PoS blockchain via the -IBC light client protocol. The IBC light client of the PoS blockchain is -checkpointed by Bitcoin via Babylon, thus achieves Bitcoin security. - -Babylon utilizes the [IBC light client -protocol](https://github.com/cosmos/ibc/tree/main/spec/client/ics-007-tendermint-client) -for receiving headers from other PoS blockchains. The IBC headers are -encapsulated in the IBC protocol's `MsgUpdateClient` -[messages](https://github.com/cosmos/ibc-go/blob/v8.0.0/proto/ibc/core/client/v1/tx.proto#L20-L21), -and are sent to the [IBC client -module](https://github.com/cosmos/ibc-go/tree/v8.0.0/modules/core/02-client) by -an [IBC -relayer](https://github.com/cosmos/ibc/blob/main/spec/relayer/ics-018-relayer-algorithms/README.md). -The `IBCHeaderDecorator` PostHandler intercepts the headers and indexes their -positions in the `ChainInfo` storage, as per -[here](#indexing-headers-upon-afterepochends). This effectively checkpoints the -headers of PoS blockchains, completing the phase 1 integration. - -## Interaction with PoS blockchains under phase 2 integration - - - -In phase 2 integration, Babylon does everything in phase 1, and will send BTC -timestamps of headers back to each PoS blockchain. Each PoS blockchain can -verify the BTC timestamp and ensure that each header is finalized by Bitcoin, -thus obtaining Bitcoin security. The BTC timestamps can be used by the PoS -blockchain -for different use cases, e.g., BTC-assisted unbonding. - -The phase 2 integration does not require any change to the PoS blockchain's -code. Rather, it only needs to deploy a [Babylon -contract](https://github.com/babylonlabs-io/babylon-contract) on the PoS -blockchain, and start an IBC relayer between Babylon and the Babylon contract on -the PoS blockchain. The Babylon contract can be deployed to a blockchain -supporting [CosmWasm](https://github.com/CosmWasm/cosmwasm) smart contracts, -connects with Babylon's Zone Concierge module via an IBC channel, and receives -BTC timestamps from Babylon to help the PoS blockchain get Bitcoin security. - -Upon a Babylon epoch becoming finalized, i.e., upon -`AfterRawCheckpointFinalized` is triggered, Babylon will send an IBC packet -including a `BTCTimestamp` to each PoS blockchain doing phase 2/3 integration -with Babylon, as per -[here](#sending-btc-timestamps-upon-afterrawcheckpointfinalized). - -Note that Zone Concierge provides 1-to-all connection, where the Zone Concierge -module establishes an IBC channel with each of multiple consumer chains. Zone -Concierge will send an BTC timestamp to each of these consumer chains upon an -epoch is finalized. - -## Messages and Queries - -The Zone Concierge module only has one message `MsgUpdateParams` for updating -the module parameters via a governance proposal. - -It provides a set of queries about the status of checkpointed PoS blockchains, -listed at -[docs.babylonchain.io](https://docs.babylonchain.io/docs/developer-guides/grpcrestapi#tag/ZoneConcierge). diff --git a/x/zoneconcierge/abci.go b/x/zoneconcierge/abci.go deleted file mode 100644 index a0989f67..00000000 --- a/x/zoneconcierge/abci.go +++ /dev/null @@ -1,23 +0,0 @@ -package zoneconcierge - -import ( - "context" - "time" - - "github.com/babylonlabs-io/babylon/x/zoneconcierge/keeper" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - abci "github.com/cometbft/cometbft/abci/types" - "github.com/cosmos/cosmos-sdk/telemetry" -) - -// BeginBlocker sends a pending packet for every channel upon each new block, -// so that the relayer is kept awake to relay headers -func BeginBlocker(ctx context.Context, k keeper.Keeper) error { - defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker) - return nil -} - -func EndBlocker(ctx context.Context, k keeper.Keeper) ([]abci.ValidatorUpdate, error) { - defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyEndBlocker) - return []abci.ValidatorUpdate{}, nil -} diff --git a/x/zoneconcierge/client/cli/query.go b/x/zoneconcierge/client/cli/query.go deleted file mode 100644 index 7e55d19e..00000000 --- a/x/zoneconcierge/client/cli/query.go +++ /dev/null @@ -1,105 +0,0 @@ -package cli - -import ( - "fmt" - "strconv" - - "github.com/cosmos/cosmos-sdk/client/flags" - "github.com/spf13/cobra" - - "github.com/cosmos/cosmos-sdk/client" - - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -// GetQueryCmd returns the cli query commands for this module -func GetQueryCmd(queryRoute string) *cobra.Command { - // Group zoneconcierge queries under a subcommand - cmd := &cobra.Command{ - Use: types.ModuleName, - Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), - DisableFlagParsing: true, - SuggestionsMinimumDistance: 2, - RunE: client.ValidateCmd, - } - - cmd.AddCommand(CmdChainsInfo()) - cmd.AddCommand(CmdFinalizedChainsInfo()) - cmd.AddCommand(CmdEpochChainsInfoInfo()) - return cmd -} - -func CmdChainsInfo() *cobra.Command { - cmd := &cobra.Command{ - Use: "chains-info ", - Short: "retrieve the latest info for a given list of consumers", - Args: cobra.ArbitraryArgs, - RunE: func(cmd *cobra.Command, args []string) error { - clientCtx := client.GetClientContextFromCmd(cmd) - queryClient := types.NewQueryClient(clientCtx) - req := types.QueryChainsInfoRequest{ConsumerIds: args} - resp, err := queryClient.ChainsInfo(cmd.Context(), &req) - if err != nil { - return err - } - - return clientCtx.PrintProto(resp) - }, - } - - flags.AddQueryFlagsToCmd(cmd) - return cmd -} - -func CmdFinalizedChainsInfo() *cobra.Command { - cmd := &cobra.Command{ - Use: "finalized-chains-info ", - Short: "retrieve the finalized info for a given list of consumers", - Args: cobra.ArbitraryArgs, - RunE: func(cmd *cobra.Command, args []string) error { - prove, _ := cmd.Flags().GetBool("prove") - - clientCtx := client.GetClientContextFromCmd(cmd) - queryClient := types.NewQueryClient(clientCtx) - req := types.QueryFinalizedChainsInfoRequest{ConsumerIds: args, Prove: prove} - resp, err := queryClient.FinalizedChainsInfo(cmd.Context(), &req) - if err != nil { - return err - } - - return clientCtx.PrintProto(resp) - }, - } - - cmd.Flags().Bool("prove", false, "whether to retrieve proofs for each FinalizedChainInfo") - flags.AddQueryFlagsToCmd(cmd) - - return cmd -} - -func CmdEpochChainsInfoInfo() *cobra.Command { - cmd := &cobra.Command{ - Use: "epoch-chains-info ", - Short: "retrieve the latest info for a list of consumers in a given epoch", - Args: cobra.MinimumNArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - clientCtx := client.GetClientContextFromCmd(cmd) - queryClient := types.NewQueryClient(clientCtx) - - epoch, err := strconv.ParseUint(args[0], 10, 64) - if err != nil { - return err - } - req := types.QueryEpochChainsInfoRequest{EpochNum: epoch, ConsumerIds: args[1:]} - resp, err := queryClient.EpochChainsInfo(cmd.Context(), &req) - if err != nil { - return err - } - - return clientCtx.PrintProto(resp) - }, - } - - flags.AddQueryFlagsToCmd(cmd) - return cmd -} diff --git a/x/zoneconcierge/client/cli/tx.go b/x/zoneconcierge/client/cli/tx.go deleted file mode 100644 index 23bd93b9..00000000 --- a/x/zoneconcierge/client/cli/tx.go +++ /dev/null @@ -1,23 +0,0 @@ -package cli - -import ( - "fmt" - "github.com/spf13/cobra" - - "github.com/cosmos/cosmos-sdk/client" - // "github.com/cosmos/cosmos-sdk/client/flags" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -// GetTxCmd returns the transaction commands for this module -func GetTxCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: types.ModuleName, - Short: fmt.Sprintf("%s transactions subcommands", types.ModuleName), - DisableFlagParsing: true, - SuggestionsMinimumDistance: 2, - RunE: client.ValidateCmd, - } - - return cmd -} diff --git a/x/zoneconcierge/genesis.go b/x/zoneconcierge/genesis.go deleted file mode 100644 index 84771473..00000000 --- a/x/zoneconcierge/genesis.go +++ /dev/null @@ -1,37 +0,0 @@ -package zoneconcierge - -import ( - "context" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/keeper" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// InitGenesis initializes the module's state from a provided genesis state. -func InitGenesis(ctx context.Context, k keeper.Keeper, genState types.GenesisState) { - sdkCtx := sdk.UnwrapSDKContext(ctx) - // set params for this module - if err := k.SetParams(ctx, genState.Params); err != nil { - panic(err) - } - - k.SetPort(ctx, genState.PortId) - // Only try to bind to port if it is not already bound, since we may already own - // port capability from capability InitGenesis - if !k.IsBound(sdkCtx, genState.PortId) { - // module binds to the port on InitChain - // and claims the returned capability - err := k.BindPort(sdkCtx, genState.PortId) - if err != nil { - panic("could not claim port capability: " + err.Error()) - } - } -} - -// ExportGenesis returns the module's exported genesis -func ExportGenesis(ctx context.Context, k keeper.Keeper) *types.GenesisState { - genesis := types.DefaultGenesis() - genesis.Params = k.GetParams(ctx) - genesis.PortId = k.GetPort(ctx) - return genesis -} diff --git a/x/zoneconcierge/genesis_test.go b/x/zoneconcierge/genesis_test.go deleted file mode 100644 index e4b3181e..00000000 --- a/x/zoneconcierge/genesis_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package zoneconcierge_test - -import ( - "testing" - - keepertest "github.com/babylonlabs-io/babylon/testutil/keeper" - "github.com/babylonlabs-io/babylon/testutil/nullify" - "github.com/babylonlabs-io/babylon/x/zoneconcierge" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - "github.com/stretchr/testify/require" -) - -func TestGenesis(t *testing.T) { - genesisState := types.GenesisState{ - PortId: types.PortID, - Params: types.Params{IbcPacketTimeoutSeconds: 100}, - } - - k, ctx := keepertest.ZoneConciergeKeeper(t, nil, nil, nil, nil) - zoneconcierge.InitGenesis(ctx, *k, genesisState) - got := zoneconcierge.ExportGenesis(ctx, *k) - require.NotNil(t, got) - - nullify.Fill(&genesisState) - nullify.Fill(got) - - require.Equal(t, genesisState.PortId, got.PortId) - require.Equal(t, genesisState.Params, got.Params) -} diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer.go b/x/zoneconcierge/keeper/canonical_chain_indexer.go deleted file mode 100644 index 253c05c5..00000000 --- a/x/zoneconcierge/keeper/canonical_chain_indexer.go +++ /dev/null @@ -1,74 +0,0 @@ -package keeper - -import ( - "context" - "fmt" - - "github.com/cosmos/cosmos-sdk/runtime" - - sdkerrors "cosmossdk.io/errors" - "cosmossdk.io/store/prefix" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// FindClosestHeader finds the IndexedHeader that is closest to (but not after) the given height -func (k Keeper) FindClosestHeader(ctx context.Context, consumerID string, height uint64) (*types.IndexedHeader, error) { - chainInfo, err := k.GetChainInfo(ctx, consumerID) - if err != nil { - return nil, fmt.Errorf("failed to get chain info for chain with ID %s: %w", consumerID, err) - } - - // if the given height is no lower than the latest header, return the latest header directly - if chainInfo.LatestHeader.Height <= height { - return chainInfo.LatestHeader, nil - } - - // the requested height is lower than the latest header, trace back until finding a timestamped header - store := k.canonicalChainStore(ctx, consumerID) - heightBytes := sdk.Uint64ToBigEndian(height) - iter := store.ReverseIterator(nil, heightBytes) - defer iter.Close() - // if there is no key within range [0, height], return error - if !iter.Valid() { - return nil, fmt.Errorf("chain with ID %s does not have a timestamped header before height %d", consumerID, height) - } - // find the header in bytes, decode and return - headerBytes := iter.Value() - var header types.IndexedHeader - k.cdc.MustUnmarshal(headerBytes, &header) - return &header, nil -} - -func (k Keeper) GetHeader(ctx context.Context, consumerID string, height uint64) (*types.IndexedHeader, error) { - store := k.canonicalChainStore(ctx, consumerID) - heightBytes := sdk.Uint64ToBigEndian(height) - if !store.Has(heightBytes) { - return nil, types.ErrHeaderNotFound - } - headerBytes := store.Get(heightBytes) - var header types.IndexedHeader - k.cdc.MustUnmarshal(headerBytes, &header) - return &header, nil -} - -func (k Keeper) insertHeader(ctx context.Context, consumerID string, header *types.IndexedHeader) error { - if header == nil { - return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil") - } - // NOTE: we can accept header without ancestor since IBC connection can be established at any height - store := k.canonicalChainStore(ctx, consumerID) - store.Set(sdk.Uint64ToBigEndian(header.Height), k.cdc.MustMarshal(header)) - return nil -} - -// canonicalChainStore stores the canonical chain of a CZ, formed as a list of IndexedHeader -// prefix: CanonicalChainKey || consumerID -// key: height -// value: IndexedHeader -func (k Keeper) canonicalChainStore(ctx context.Context, consumerID string) prefix.Store { - storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) - canonicalChainStore := prefix.NewStore(storeAdapter, types.CanonicalChainKey) - consumerIDBytes := []byte(consumerID) - return prefix.NewStore(canonicalChainStore, consumerIDBytes) -} diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go deleted file mode 100644 index c13a7bed..00000000 --- a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package keeper_test - -import ( - "math/rand" - "testing" - - "github.com/babylonlabs-io/babylon/app" - "github.com/babylonlabs-io/babylon/testutil/datagen" - "github.com/stretchr/testify/require" -) - -func FuzzCanonicalChainIndexer(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - ctx := babylonApp.NewContext(false) - czConsumerId := "test-consumerid" - - // simulate a random number of blocks - numHeaders := datagen.RandomInt(r, 100) + 1 - headers := SimulateNewHeaders(ctx, r, &zcKeeper, czConsumerId, 0, numHeaders) - - // check if the canonical chain index is correct or not - for i := uint64(0); i < numHeaders; i++ { - header, err := zcKeeper.GetHeader(ctx, czConsumerId, i) - require.NoError(t, err) - require.NotNil(t, header) - require.Equal(t, czConsumerId, header.ConsumerId) - require.Equal(t, i, header.Height) - require.Equal(t, headers[i].Header.AppHash, header.Hash) - } - - // check if the chain info is updated or not - chainInfo, err := zcKeeper.GetChainInfo(ctx, czConsumerId) - require.NoError(t, err) - require.NotNil(t, chainInfo.LatestHeader) - require.Equal(t, czConsumerId, chainInfo.LatestHeader.ConsumerId) - require.Equal(t, numHeaders-1, chainInfo.LatestHeader.Height) - require.Equal(t, headers[numHeaders-1].Header.AppHash, chainInfo.LatestHeader.Hash) - }) -} - -func FuzzFindClosestHeader(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - ctx := babylonApp.NewContext(false) - czConsumerId := "test-consumerid" - - // no header at the moment, FindClosestHeader invocation should give error - _, err := zcKeeper.FindClosestHeader(ctx, czConsumerId, 100) - require.Error(t, err) - - // simulate a random number of blocks - numHeaders := datagen.RandomInt(r, 100) + 1 - headers := SimulateNewHeaders(ctx, r, &zcKeeper, czConsumerId, 0, numHeaders) - - header, err := zcKeeper.FindClosestHeader(ctx, czConsumerId, numHeaders) - require.NoError(t, err) - require.Equal(t, headers[len(headers)-1].Header.AppHash, header.Hash) - - // skip a non-zero number of headers in between, in order to create a gap of non-timestamped headers - gap := datagen.RandomInt(r, 10) + 1 - - // simulate a random number of blocks - // where the new batch of headers has a gap with the previous batch - SimulateNewHeaders(ctx, r, &zcKeeper, czConsumerId, numHeaders+gap+1, numHeaders) - - // get a random height that is in this gap - randomHeightInGap := datagen.RandomInt(r, int(gap+1)) + numHeaders - // find the closest header with the given randomHeightInGap - header, err = zcKeeper.FindClosestHeader(ctx, czConsumerId, randomHeightInGap) - require.NoError(t, err) - // the header should be the same as the last header in the last batch - require.Equal(t, headers[len(headers)-1].Header.AppHash, header.Hash) - }) -} diff --git a/x/zoneconcierge/keeper/chain_info_indexer.go b/x/zoneconcierge/keeper/chain_info_indexer.go deleted file mode 100644 index 29deeea9..00000000 --- a/x/zoneconcierge/keeper/chain_info_indexer.go +++ /dev/null @@ -1,142 +0,0 @@ -package keeper - -import ( - "context" - "fmt" - - "github.com/cosmos/cosmos-sdk/runtime" - - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/store/prefix" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -func (k Keeper) setChainInfo(ctx context.Context, chainInfo *types.ChainInfo) { - store := k.chainInfoStore(ctx) - store.Set([]byte(chainInfo.ConsumerId), k.cdc.MustMarshal(chainInfo)) -} - -func (k Keeper) InitChainInfo(ctx context.Context, consumerID string) (*types.ChainInfo, error) { - if len(consumerID) == 0 { - return nil, fmt.Errorf("consumerID is empty") - } - // ensure chain info has not been initialised yet - if k.HasChainInfo(ctx, consumerID) { - return nil, errorsmod.Wrapf(types.ErrInvalidChainInfo, "chain info has already initialized") - } - - chainInfo := &types.ChainInfo{ - ConsumerId: consumerID, - LatestHeader: nil, - LatestForks: &types.Forks{ - Headers: []*types.IndexedHeader{}, - }, - TimestampedHeadersCount: 0, - } - - k.setChainInfo(ctx, chainInfo) - return chainInfo, nil -} - -// HasChainInfo returns whether the chain info exists for a given ID -// Since IBC does not provide API that allows to initialise chain info right before creating an IBC connection, -// we can only check its existence every time, and return an empty one if it's not initialised yet. -func (k Keeper) HasChainInfo(ctx context.Context, consumerId string) bool { - store := k.chainInfoStore(ctx) - return store.Has([]byte(consumerId)) -} - -// GetChainInfo returns the ChainInfo struct for a chain with a given ID -// Since IBC does not provide API that allows to initialise chain info right before creating an IBC connection, -// we can only check its existence every time, and return an empty one if it's not initialised yet. -func (k Keeper) GetChainInfo(ctx context.Context, consumerId string) (*types.ChainInfo, error) { - if !k.HasChainInfo(ctx, consumerId) { - return nil, types.ErrChainInfoNotFound - } - - store := k.chainInfoStore(ctx) - chainInfoBytes := store.Get([]byte(consumerId)) - var chainInfo types.ChainInfo - k.cdc.MustUnmarshal(chainInfoBytes, &chainInfo) - return &chainInfo, nil -} - -// updateLatestHeader updates the chainInfo w.r.t. the given header, including -// - replace the old latest header with the given one -// - increment the number of timestamped headers -// Note that this function is triggered only upon receiving headers from the relayer, -// and only a subset of headers in CZ are relayed. Thus TimestampedHeadersCount is not -// equal to the total number of headers in CZ. -func (k Keeper) updateLatestHeader(ctx context.Context, consumerId string, header *types.IndexedHeader) error { - if header == nil { - return errorsmod.Wrapf(types.ErrInvalidHeader, "header is nil") - } - chainInfo, err := k.GetChainInfo(ctx, consumerId) - if err != nil { - // chain info has not been initialised yet - return fmt.Errorf("failed to get chain info of %s: %w", consumerId, err) - } - chainInfo.LatestHeader = header // replace the old latest header with the given one - chainInfo.TimestampedHeadersCount++ // increment the number of timestamped headers - - k.setChainInfo(ctx, chainInfo) - return nil -} - -// tryToUpdateLatestForkHeader tries to update the chainInfo w.r.t. the given fork header -// - If no fork exists, add this fork header as the latest one -// - If there is a fork header at the same height, add this fork to the set of latest fork headers -// - If this fork header is newer than the previous one, replace the old fork headers with this fork header -// - If this fork header is older than the current latest fork, ignore -func (k Keeper) tryToUpdateLatestForkHeader(ctx context.Context, consumerId string, header *types.IndexedHeader) error { - if header == nil { - return errorsmod.Wrapf(types.ErrInvalidHeader, "header is nil") - } - - chainInfo, err := k.GetChainInfo(ctx, consumerId) - if err != nil { - return errorsmod.Wrapf(types.ErrChainInfoNotFound, "cannot insert fork header when chain info is not initialized") - } - - if len(chainInfo.LatestForks.Headers) == 0 { - // no fork at the moment, add this fork header as the latest one - chainInfo.LatestForks.Headers = append(chainInfo.LatestForks.Headers, header) - } else if chainInfo.LatestForks.Headers[0].Height == header.Height { - // there exists fork headers at the same height, add this fork header to the set of latest fork headers - chainInfo.LatestForks.Headers = append(chainInfo.LatestForks.Headers, header) - } else if chainInfo.LatestForks.Headers[0].Height < header.Height { - // this fork header is newer than the previous one, replace the old fork headers with this fork header - chainInfo.LatestForks = &types.Forks{ - Headers: []*types.IndexedHeader{header}, - } - } else { - // this fork header is older than the current latest fork, don't record this fork header in chain info - return nil - } - - k.setChainInfo(ctx, chainInfo) - return nil -} - -// GetAllConsumerIDs gets IDs of all consumer that integrate Babylon -func (k Keeper) GetAllConsumerIDs(ctx context.Context) []string { - consumerIds := []string{} - iter := k.chainInfoStore(ctx).Iterator(nil, nil) - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - consumerIdBytes := iter.Key() - consumerId := string(consumerIdBytes) - consumerIds = append(consumerIds, consumerId) - } - return consumerIds -} - -// msgChainInfoStore stores the information of canonical chains and forks for CZs -// prefix: ChainInfoKey -// key: consumerId -// value: ChainInfo -func (k Keeper) chainInfoStore(ctx context.Context) prefix.Store { - storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) - return prefix.NewStore(storeAdapter, types.ChainInfoKey) -} diff --git a/x/zoneconcierge/keeper/chain_info_indexer_test.go b/x/zoneconcierge/keeper/chain_info_indexer_test.go deleted file mode 100644 index 8bbb1fc1..00000000 --- a/x/zoneconcierge/keeper/chain_info_indexer_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package keeper_test - -// NOTE: the chain info indexer is tested in `canonical_chain_indexer_test.go` and `fork_indexer_test.go` altogether given the similar paradigm. diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go deleted file mode 100644 index 942f9000..00000000 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go +++ /dev/null @@ -1,139 +0,0 @@ -package keeper - -import ( - "context" - "fmt" - - "cosmossdk.io/store/prefix" - "github.com/cosmos/cosmos-sdk/runtime" - sdk "github.com/cosmos/cosmos-sdk/types" - - bbn "github.com/babylonlabs-io/babylon/types" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -// GetEpochChainInfo gets the latest chain info of a given epoch for a given chain ID -func (k Keeper) GetEpochChainInfo(ctx context.Context, consumerID string, epochNumber uint64) (*types.ChainInfoWithProof, error) { - if !k.EpochChainInfoExists(ctx, consumerID, epochNumber) { - return nil, types.ErrEpochChainInfoNotFound - } - - store := k.epochChainInfoStore(ctx, consumerID) - epochNumberBytes := sdk.Uint64ToBigEndian(epochNumber) - epochChainInfoBytes := store.Get(epochNumberBytes) - var chainInfo types.ChainInfoWithProof - k.cdc.MustUnmarshal(epochChainInfoBytes, &chainInfo) - return &chainInfo, nil -} - -func (k Keeper) setEpochChainInfo(ctx context.Context, consumerID string, epochNumber uint64, chainInfo *types.ChainInfoWithProof) { - store := k.epochChainInfoStore(ctx, consumerID) - store.Set(sdk.Uint64ToBigEndian(epochNumber), k.cdc.MustMarshal(chainInfo)) -} - -// EpochChainInfoExists checks if the latest chain info exists of a given epoch for a given chain ID -func (k Keeper) EpochChainInfoExists(ctx context.Context, consumerID string, epochNumber uint64) bool { - store := k.epochChainInfoStore(ctx, consumerID) - epochNumberBytes := sdk.Uint64ToBigEndian(epochNumber) - return store.Has(epochNumberBytes) -} - -// GetEpochHeaders gets the headers timestamped in a given epoch, in the ascending order -func (k Keeper) GetEpochHeaders(ctx context.Context, consumerID string, epochNumber uint64) ([]*types.IndexedHeader, error) { - headers := []*types.IndexedHeader{} - - // find the last timestamped header of this chain in the epoch - epochChainInfoWithProof, err := k.GetEpochChainInfo(ctx, consumerID, epochNumber) - if err != nil { - return nil, err - } - epochChainInfo := epochChainInfoWithProof.ChainInfo - // it's possible that this epoch's snapshot is not updated for many epochs - // this implies that this epoch does not timestamp any header for this chain at all - if epochChainInfo.LatestHeader.BabylonEpoch < epochNumber { - return nil, types.ErrEpochHeadersNotFound - } - // now we have the last header in this epoch - headers = append(headers, epochChainInfo.LatestHeader) - - // append all previous headers until reaching the previous epoch - canonicalChainStore := k.canonicalChainStore(ctx, consumerID) - lastHeaderKey := sdk.Uint64ToBigEndian(epochChainInfo.LatestHeader.Height) - // NOTE: even in ReverseIterator, start and end should still be specified in ascending order - canonicalChainIter := canonicalChainStore.ReverseIterator(nil, lastHeaderKey) - defer canonicalChainIter.Close() - for ; canonicalChainIter.Valid(); canonicalChainIter.Next() { - var prevHeader types.IndexedHeader - k.cdc.MustUnmarshal(canonicalChainIter.Value(), &prevHeader) - if prevHeader.BabylonEpoch < epochNumber { - // we have reached the previous epoch, break the loop - break - } - headers = append(headers, &prevHeader) - } - - // reverse the list so that it remains ascending order - bbn.Reverse(headers) - - return headers, nil -} - -// recordEpochChainInfo records the chain info for a given epoch number of given chain ID -// where the latest chain info is retrieved from the chain info indexer -func (k Keeper) recordEpochChainInfo(ctx context.Context, consumerID string, epochNumber uint64) { - // get the latest known chain info - chainInfo, err := k.GetChainInfo(ctx, consumerID) - if err != nil { - k.Logger(sdk.UnwrapSDKContext(ctx)).Debug("chain info does not exist yet, nothing to record") - return - } - chainInfoWithProof := &types.ChainInfoWithProof{ - ChainInfo: chainInfo, - ProofHeaderInEpoch: nil, - } - - // NOTE: we can record epoch chain info without ancestor since IBC connection can be established at any height - k.setEpochChainInfo(ctx, consumerID, epochNumber, chainInfoWithProof) -} - -// recordEpochChainInfo records the chain info for a given epoch number of given chain ID -// where the latest chain info is retrieved from the chain info indexer -func (k Keeper) recordEpochChainInfoProofs(ctx context.Context, epochNumber uint64) { - curEpoch := k.GetEpoch(ctx) - consumerIDs := k.GetAllConsumerIDs(ctx) - - // save all inclusion proofs - for _, consumerID := range consumerIDs { - // retrieve chain info with empty proof - chainInfo, err := k.GetEpochChainInfo(ctx, consumerID, epochNumber) - if err != nil { - panic(err) // only programming error - } - - lastHeaderInEpoch := chainInfo.ChainInfo.LatestHeader - if lastHeaderInEpoch.BabylonEpoch == curEpoch.EpochNumber { - // get proofCZHeaderInEpoch - proofCZHeaderInEpoch, err := k.ProveCZHeaderInEpoch(ctx, lastHeaderInEpoch, curEpoch) - if err != nil { - // only programming error is possible here - panic(fmt.Errorf("failed to generate proofCZHeaderInEpoch for consumer %s: %w", consumerID, err)) - } - - chainInfo.ProofHeaderInEpoch = proofCZHeaderInEpoch - - // set chain info with proof back - k.setEpochChainInfo(ctx, consumerID, epochNumber, chainInfo) - } - } -} - -// epochChainInfoStore stores each epoch's latest ChainInfo for a CZ -// prefix: EpochChainInfoKey || consumerID -// key: epochNumber -// value: ChainInfoWithProof -func (k Keeper) epochChainInfoStore(ctx context.Context, consumerID string) prefix.Store { - storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) - epochChainInfoStore := prefix.NewStore(storeAdapter, types.EpochChainInfoKey) - consumerIDBytes := []byte(consumerID) - return prefix.NewStore(epochChainInfoStore, consumerIDBytes) -} diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go deleted file mode 100644 index 472c76dd..00000000 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package keeper_test - -import ( - "math/rand" - "testing" - - ibctmtypes "github.com/cosmos/ibc-go/v8/modules/light-clients/07-tendermint" - "github.com/stretchr/testify/require" - - "github.com/babylonlabs-io/babylon/app" - "github.com/babylonlabs-io/babylon/testutil/datagen" -) - -func FuzzEpochChainInfoIndexer(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - ctx := babylonApp.NewContext(false) - consumerID := "test-consumerid" - - hooks := zcKeeper.Hooks() - - // enter a random epoch - epochNum := datagen.RandomInt(r, 10) - for j := uint64(0); j < epochNum; j++ { - babylonApp.EpochingKeeper.IncEpoch(ctx) - } - - // invoke the hook a random number of times to simulate a random number of blocks - numHeaders := datagen.RandomInt(r, 100) + 1 - numForkHeaders := datagen.RandomInt(r, 10) + 1 - SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, 0, numHeaders, numForkHeaders) - - // end this epoch - hooks.AfterEpochEnds(ctx, epochNum) - - // check if the chain info of this epoch is recorded or not - chainInfoWithProof, err := zcKeeper.GetEpochChainInfo(ctx, consumerID, epochNum) - chainInfo := chainInfoWithProof.ChainInfo - require.NoError(t, err) - require.Equal(t, numHeaders-1, chainInfo.LatestHeader.Height) - require.Equal(t, numHeaders, chainInfo.TimestampedHeadersCount) - require.Equal(t, numForkHeaders, uint64(len(chainInfo.LatestForks.Headers))) - }) -} - -func FuzzGetEpochHeaders(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - ctx := babylonApp.NewContext(false) - consumerID := "test-consumerid" - - hooks := zcKeeper.Hooks() - - numReqs := datagen.RandomInt(r, 5) + 1 - epochNumList := []uint64{datagen.RandomInt(r, 10) + 1} - nextHeightList := []uint64{0} - numHeadersList := []uint64{} - expectedHeadersMap := map[uint64][]*ibctmtypes.Header{} - numForkHeadersList := []uint64{} - - // we test the scenario of ending an epoch for multiple times, in order to ensure that - // consecutive epoch infos do not affect each other. - for i := uint64(0); i < numReqs; i++ { - epochNum := epochNumList[i] - // enter a random epoch - if i == 0 { - for j := uint64(1); j < epochNum; j++ { // starting from epoch 1 - babylonApp.EpochingKeeper.IncEpoch(ctx) - } - } else { - for j := uint64(0); j < epochNum-epochNumList[i-1]; j++ { - babylonApp.EpochingKeeper.IncEpoch(ctx) - } - } - - // generate a random number of headers and fork headers - numHeadersList = append(numHeadersList, datagen.RandomInt(r, 100)+1) - numForkHeadersList = append(numForkHeadersList, datagen.RandomInt(r, 10)+1) - // trigger hooks to append these headers and fork headers - expectedHeaders, _ := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i]) - expectedHeadersMap[epochNum] = expectedHeaders - // prepare nextHeight for the next request - nextHeightList = append(nextHeightList, nextHeightList[i]+numHeadersList[i]) - - // simulate the scenario that a random epoch has ended - hooks.AfterEpochEnds(ctx, epochNum) - // prepare epochNum for the next request - epochNumList = append(epochNumList, epochNum+datagen.RandomInt(r, 10)+1) - } - - // attest the correctness of epoch info for each tested epoch - for i := uint64(0); i < numReqs; i++ { - epochNum := epochNumList[i] - // check if the headers are same as expected - headers, err := zcKeeper.GetEpochHeaders(ctx, consumerID, epochNum) - require.NoError(t, err) - require.Equal(t, len(expectedHeadersMap[epochNum]), len(headers)) - for j := 0; j < len(expectedHeadersMap[epochNum]); j++ { - require.Equal(t, expectedHeadersMap[epochNum][j].Header.AppHash, headers[j].Hash) - } - } - }) -} diff --git a/x/zoneconcierge/keeper/epochs.go b/x/zoneconcierge/keeper/epochs.go deleted file mode 100644 index 6c3bfac8..00000000 --- a/x/zoneconcierge/keeper/epochs.go +++ /dev/null @@ -1,79 +0,0 @@ -package keeper - -import ( - "context" - - "cosmossdk.io/store/prefix" - epochingtypes "github.com/babylonlabs-io/babylon/x/epoching/types" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - "github.com/cosmos/cosmos-sdk/runtime" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// GetLastSentSegment get last broadcasted btc light client segment -func (k Keeper) GetLastSentSegment(ctx context.Context) *types.BTCChainSegment { - store := k.storeService.OpenKVStore(ctx) - has, err := store.Has(types.LastSentBTCSegmentKey) - if err != nil { - panic(err) - } - if !has { - return nil - } - segmentBytes, err := store.Get(types.LastSentBTCSegmentKey) - if err != nil { - panic(err) - } - var segment types.BTCChainSegment - k.cdc.MustUnmarshal(segmentBytes, &segment) - return &segment -} - -// setLastSentSegment sets the last segment which was broadcasted to the other light clients -// called upon each AfterRawCheckpointFinalized hook invocation -func (k Keeper) setLastSentSegment(ctx context.Context, segment *types.BTCChainSegment) { - store := k.storeService.OpenKVStore(ctx) - segmentBytes := k.cdc.MustMarshal(segment) - if err := store.Set(types.LastSentBTCSegmentKey, segmentBytes); err != nil { - panic(err) - } -} - -func (k Keeper) GetLastFinalizedEpoch(ctx context.Context) uint64 { - return k.checkpointingKeeper.GetLastFinalizedEpoch(ctx) -} - -func (k Keeper) GetEpoch(ctx context.Context) *epochingtypes.Epoch { - return k.epochingKeeper.GetEpoch(ctx) -} - -func (k Keeper) recordSealedEpochProof(ctx context.Context, epochNum uint64) { - // proof that the epoch is sealed - proofEpochSealed, err := k.ProveEpochSealed(ctx, epochNum) - if err != nil { - panic(err) // only programming error - } - - store := k.sealedEpochProofStore(ctx) - store.Set(sdk.Uint64ToBigEndian(epochNum), k.cdc.MustMarshal(proofEpochSealed)) -} - -func (k Keeper) getSealedEpochProof(ctx context.Context, epochNum uint64) *types.ProofEpochSealed { - store := k.sealedEpochProofStore(ctx) - proofBytes := store.Get(sdk.Uint64ToBigEndian(epochNum)) - if len(proofBytes) == 0 { - return nil - } - var proof types.ProofEpochSealed - k.cdc.MustUnmarshal(proofBytes, &proof) - return &proof -} - -// sealedEpochProofStore stores the proof that each epoch is sealed -// prefix: SealedEpochProofKey -// key: epochNumber -// value: ChainInfoWithProof -func (k Keeper) sealedEpochProofStore(ctx context.Context) prefix.Store { - storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) - return prefix.NewStore(storeAdapter, types.SealedEpochProofKey) -} diff --git a/x/zoneconcierge/keeper/fork_indexer.go b/x/zoneconcierge/keeper/fork_indexer.go deleted file mode 100644 index 936c13ea..00000000 --- a/x/zoneconcierge/keeper/fork_indexer.go +++ /dev/null @@ -1,59 +0,0 @@ -package keeper - -import ( - "bytes" - "context" - - "github.com/cosmos/cosmos-sdk/runtime" - - sdkerrors "cosmossdk.io/errors" - "cosmossdk.io/store/prefix" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// GetForks returns a list of forked headers at a given height -func (k Keeper) GetForks(ctx context.Context, consumerID string, height uint64) *types.Forks { - store := k.forkStore(ctx, consumerID) - heightBytes := sdk.Uint64ToBigEndian(height) - // if no fork at the moment, create an empty struct - if !store.Has(heightBytes) { - return &types.Forks{ - Headers: []*types.IndexedHeader{}, - } - } - forksBytes := store.Get(heightBytes) - var forks types.Forks - k.cdc.MustUnmarshal(forksBytes, &forks) - return &forks -} - -// insertForkHeader inserts a forked header to the list of forked headers at the same height -func (k Keeper) insertForkHeader(ctx context.Context, consumerID string, header *types.IndexedHeader) error { - if header == nil { - return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil") - } - store := k.forkStore(ctx, consumerID) - forks := k.GetForks(ctx, consumerID, header.Height) // if no fork at the height, forks will be an empty struct rather than nil - // if the header is already in forks, discard this header and return directly - for _, h := range forks.Headers { - if bytes.Equal(h.Hash, header.Hash) { - return nil - } - } - forks.Headers = append(forks.Headers, header) - forksBytes := k.cdc.MustMarshal(forks) - store.Set(sdk.Uint64ToBigEndian(header.Height), forksBytes) - return nil -} - -// forkStore stores the forks for each CZ -// prefix: ForkKey || consumerID -// key: height that this fork starts from -// value: a list of IndexedHeader, representing each header in the fork -func (k Keeper) forkStore(ctx context.Context, consumerID string) prefix.Store { - storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) - forkStore := prefix.NewStore(storeAdapter, types.ForkKey) - consumerIDBytes := []byte(consumerID) - return prefix.NewStore(forkStore, consumerIDBytes) -} diff --git a/x/zoneconcierge/keeper/fork_indexer_test.go b/x/zoneconcierge/keeper/fork_indexer_test.go deleted file mode 100644 index e1fb2a95..00000000 --- a/x/zoneconcierge/keeper/fork_indexer_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package keeper_test - -import ( - "math/rand" - "testing" - - "github.com/babylonlabs-io/babylon/app" - "github.com/babylonlabs-io/babylon/testutil/datagen" - "github.com/stretchr/testify/require" -) - -func FuzzForkIndexer(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - ctx := babylonApp.NewContext(false) - consumerID := "test-consumerid" - - // invoke the hook a random number of times to simulate a random number of blocks - numHeaders := datagen.RandomInt(r, 100) + 1 - numForkHeaders := datagen.RandomInt(r, 10) + 1 - _, forkHeaders := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, 0, numHeaders, numForkHeaders) - - // check if the fork is updated or not - forks := zcKeeper.GetForks(ctx, consumerID, numHeaders-1) - require.Equal(t, numForkHeaders, uint64(len(forks.Headers))) - for i := range forks.Headers { - require.Equal(t, numHeaders-1, forks.Headers[i].Height) - require.Equal(t, forkHeaders[i].Header.AppHash, forks.Headers[i].Hash) - } - - // check if the chain info is updated or not - chainInfo, err := zcKeeper.GetChainInfo(ctx, consumerID) - require.NoError(t, err) - require.Equal(t, numForkHeaders, uint64(len(chainInfo.LatestForks.Headers))) - for i := range forks.Headers { - require.Equal(t, consumerID, chainInfo.LatestForks.Headers[i].ConsumerId) - require.Equal(t, numHeaders-1, chainInfo.LatestForks.Headers[i].Height) - require.Equal(t, forkHeaders[i].Header.AppHash, chainInfo.LatestForks.Headers[i].Hash) - } - }) -} diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go deleted file mode 100644 index 5b0e7ee6..00000000 --- a/x/zoneconcierge/keeper/grpc_query.go +++ /dev/null @@ -1,403 +0,0 @@ -package keeper - -import ( - "context" - - bbntypes "github.com/babylonlabs-io/babylon/types" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/query" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var _ types.QueryServer = Keeper{} - -const maxQueryChainsInfoLimit = 100 - -func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "invalid request") - } - ctx := sdk.UnwrapSDKContext(c) - - return &types.QueryParamsResponse{Params: k.GetParams(ctx)}, nil -} - -func (k Keeper) ChainList(c context.Context, req *types.QueryChainListRequest) (*types.QueryChainListResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "invalid request") - } - - ctx := sdk.UnwrapSDKContext(c) - - ConsumerIds := []string{} - store := k.chainInfoStore(ctx) - pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { - ConsumerId := string(key) - ConsumerIds = append(ConsumerIds, ConsumerId) - return nil - }) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &types.QueryChainListResponse{ - ConsumerIds: ConsumerIds, - Pagination: pageRes, - } - return resp, nil -} - -// ChainsInfo returns the latest info for a given list of chains -func (k Keeper) ChainsInfo(c context.Context, req *types.QueryChainsInfoRequest) (*types.QueryChainsInfoResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "invalid request") - } - - // return if no chain IDs are provided - if len(req.ConsumerIds) == 0 { - return nil, status.Error(codes.InvalidArgument, "consumer IDs cannot be empty") - } - - // return if chain IDs exceed the limit - if len(req.ConsumerIds) > maxQueryChainsInfoLimit { - return nil, status.Errorf(codes.InvalidArgument, "cannot query more than %d chains", maxQueryChainsInfoLimit) - } - - // return if chain IDs contain duplicates or empty strings - if err := bbntypes.CheckForDuplicatesAndEmptyStrings(req.ConsumerIds); err != nil { - return nil, status.Error(codes.InvalidArgument, types.ErrInvalidConsumerIDs.Wrap(err.Error()).Error()) - } - - ctx := sdk.UnwrapSDKContext(c) - var chainsInfo []*types.ChainInfo - for _, ConsumerId := range req.ConsumerIds { - chainInfo, err := k.GetChainInfo(ctx, ConsumerId) - if err != nil { - return nil, err - } - - chainsInfo = append(chainsInfo, chainInfo) - } - - resp := &types.QueryChainsInfoResponse{ChainsInfo: chainsInfo} - return resp, nil -} - -// Header returns the header and fork headers at a given height -func (k Keeper) Header(c context.Context, req *types.QueryHeaderRequest) (*types.QueryHeaderResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "invalid request") - } - - if len(req.ConsumerId) == 0 { - return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") - } - - ctx := sdk.UnwrapSDKContext(c) - - header, err := k.GetHeader(ctx, req.ConsumerId, req.Height) - if err != nil { - return nil, err - } - forks := k.GetForks(ctx, req.ConsumerId, req.Height) - resp := &types.QueryHeaderResponse{ - Header: header, - ForkHeaders: forks, - } - - return resp, nil -} - -// EpochChainsInfo returns the latest info for list of chains in a given epoch -func (k Keeper) EpochChainsInfo(c context.Context, req *types.QueryEpochChainsInfoRequest) (*types.QueryEpochChainsInfoResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "invalid request") - } - - // return if no chain IDs are provided - if len(req.ConsumerIds) == 0 { - return nil, status.Error(codes.InvalidArgument, "consumer IDs cannot be empty") - } - - // return if chain IDs exceed the limit - if len(req.ConsumerIds) > maxQueryChainsInfoLimit { - return nil, status.Errorf(codes.InvalidArgument, "cannot query more than %d chains", maxQueryChainsInfoLimit) - } - - // return if chain IDs contain duplicates or empty strings - if err := bbntypes.CheckForDuplicatesAndEmptyStrings(req.ConsumerIds); err != nil { - return nil, status.Error(codes.InvalidArgument, types.ErrInvalidConsumerIDs.Wrap(err.Error()).Error()) - } - - ctx := sdk.UnwrapSDKContext(c) - var chainsInfo []*types.ChainInfo - for _, ConsumerId := range req.ConsumerIds { - // check if chain ID is valid - if !k.HasChainInfo(ctx, ConsumerId) { - return nil, status.Error(codes.InvalidArgument, types.ErrChainInfoNotFound.Wrapf("chain ID %s", ConsumerId).Error()) - } - - // if the chain info is not found in the given epoch, return with empty fields - if !k.EpochChainInfoExists(ctx, ConsumerId, req.EpochNum) { - chainsInfo = append(chainsInfo, &types.ChainInfo{ConsumerId: ConsumerId}) - continue - } - - // find the chain info of the given epoch - chainInfoWithProof, err := k.GetEpochChainInfo(ctx, ConsumerId, req.EpochNum) - if err != nil { - return nil, err - } - - chainsInfo = append(chainsInfo, chainInfoWithProof.ChainInfo) - } - - resp := &types.QueryEpochChainsInfoResponse{ChainsInfo: chainsInfo} - return resp, nil -} - -// ListHeaders returns all headers of a chain with given ID, with pagination support -func (k Keeper) ListHeaders(c context.Context, req *types.QueryListHeadersRequest) (*types.QueryListHeadersResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "invalid request") - } - - if len(req.ConsumerId) == 0 { - return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") - } - - ctx := sdk.UnwrapSDKContext(c) - - headers := []*types.IndexedHeader{} - store := k.canonicalChainStore(ctx, req.ConsumerId) - pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { - var header types.IndexedHeader - k.cdc.MustUnmarshal(value, &header) - headers = append(headers, &header) - return nil - }) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &types.QueryListHeadersResponse{ - Headers: headers, - Pagination: pageRes, - } - return resp, nil -} - -// ListEpochHeaders returns all headers of a chain with given ID -// TODO: support pagination in this RPC -func (k Keeper) ListEpochHeaders(c context.Context, req *types.QueryListEpochHeadersRequest) (*types.QueryListEpochHeadersResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "invalid request") - } - - if len(req.ConsumerId) == 0 { - return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") - } - - ctx := sdk.UnwrapSDKContext(c) - - headers, err := k.GetEpochHeaders(ctx, req.ConsumerId, req.EpochNum) - if err != nil { - return nil, err - } - - resp := &types.QueryListEpochHeadersResponse{ - Headers: headers, - } - return resp, nil -} - -// FinalizedChainsInfo returns the finalized info for a given list of chains -func (k Keeper) FinalizedChainsInfo(c context.Context, req *types.QueryFinalizedChainsInfoRequest) (*types.QueryFinalizedChainsInfoResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "invalid request") - } - - // return if no chain IDs are provided - if len(req.ConsumerIds) == 0 { - return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") - } - - // return if chain IDs exceed the limit - if len(req.ConsumerIds) > maxQueryChainsInfoLimit { - return nil, status.Errorf(codes.InvalidArgument, "cannot query more than %d chains", maxQueryChainsInfoLimit) - } - - // return if chain IDs contain duplicates or empty strings - if err := bbntypes.CheckForDuplicatesAndEmptyStrings(req.ConsumerIds); err != nil { - return nil, status.Error(codes.InvalidArgument, types.ErrInvalidConsumerIDs.Wrap(err.Error()).Error()) - } - - ctx := sdk.UnwrapSDKContext(c) - resp := &types.QueryFinalizedChainsInfoResponse{FinalizedChainsInfo: []*types.FinalizedChainInfo{}} - - // find the last finalised epoch - lastFinalizedEpoch := k.GetLastFinalizedEpoch(ctx) - for _, ConsumerId := range req.ConsumerIds { - // check if chain ID is valid - if !k.HasChainInfo(ctx, ConsumerId) { - return nil, status.Error(codes.InvalidArgument, types.ErrChainInfoNotFound.Wrapf("chain ID %s", ConsumerId).Error()) - } - - data := &types.FinalizedChainInfo{ConsumerId: ConsumerId} - - // if the chain info is not found in the last finalised epoch, return with empty fields - if !k.EpochChainInfoExists(ctx, ConsumerId, lastFinalizedEpoch) { - resp.FinalizedChainsInfo = append(resp.FinalizedChainsInfo, data) - continue - } - - // find the chain info in the last finalised epoch - chainInfoWithProof, err := k.GetEpochChainInfo(ctx, ConsumerId, lastFinalizedEpoch) - if err != nil { - return nil, err - } - chainInfo := chainInfoWithProof.ChainInfo - - // set finalizedEpoch as the earliest epoch that snapshots this chain info. - // it's possible that the chain info's epoch is way before the last finalised epoch - // e.g., when there is no relayer for many epochs - // NOTE: if an epoch is finalised then all of its previous epochs are also finalised - finalizedEpoch := lastFinalizedEpoch - if chainInfo.LatestHeader.BabylonEpoch < finalizedEpoch { - finalizedEpoch = chainInfo.LatestHeader.BabylonEpoch - } - - data.FinalizedChainInfo = chainInfo - - // find the epoch metadata of the finalised epoch - data.EpochInfo, err = k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) - if err != nil { - return nil, err - } - - rawCheckpoint, err := k.checkpointingKeeper.GetRawCheckpoint(ctx, finalizedEpoch) - if err != nil { - return nil, err - } - - data.RawCheckpoint = rawCheckpoint.Ckpt - - // find the raw checkpoint and the best submission key for the finalised epoch - _, data.BtcSubmissionKey, err = k.btccKeeper.GetBestSubmission(ctx, finalizedEpoch) - if err != nil { - return nil, err - } - - // generate all proofs - if req.Prove { - data.Proof, err = k.proveFinalizedChainInfo(ctx, chainInfo, data.EpochInfo, data.BtcSubmissionKey) - if err != nil { - return nil, err - } - } - - resp.FinalizedChainsInfo = append(resp.FinalizedChainsInfo, data) - } - - return resp, nil -} - -func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.QueryFinalizedChainInfoUntilHeightRequest) (*types.QueryFinalizedChainInfoUntilHeightResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "invalid request") - } - - if len(req.ConsumerId) == 0 { - return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") - } - - ctx := sdk.UnwrapSDKContext(c) - resp := &types.QueryFinalizedChainInfoUntilHeightResponse{} - - // find the last finalised epoch - lastFinalizedEpoch := k.GetLastFinalizedEpoch(ctx) - // find the chain info in the last finalised epoch - chainInfoWithProof, err := k.GetEpochChainInfo(ctx, req.ConsumerId, lastFinalizedEpoch) - if err != nil { - return nil, err - } - chainInfo := chainInfoWithProof.ChainInfo - - // set finalizedEpoch as the earliest epoch that snapshots this chain info. - // it's possible that the chain info's epoch is way before the last finalised epoch - // e.g., when there is no relayer for many epochs - // NOTE: if an epoch is finalised then all of its previous epochs are also finalised - finalizedEpoch := lastFinalizedEpoch - if chainInfo.LatestHeader.BabylonEpoch < finalizedEpoch { - finalizedEpoch = chainInfo.LatestHeader.BabylonEpoch - } - - resp.FinalizedChainInfo = chainInfo - - if chainInfo.LatestHeader.Height <= req.Height { // the requested height is after the last finalised chain info - // find and assign the epoch metadata of the finalised epoch - resp.EpochInfo, err = k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) - if err != nil { - return nil, err - } - - rawCheckpoint, err := k.checkpointingKeeper.GetRawCheckpoint(ctx, finalizedEpoch) - - if err != nil { - return nil, err - } - - resp.RawCheckpoint = rawCheckpoint.Ckpt - - // find and assign the raw checkpoint and the best submission key for the finalised epoch - _, resp.BtcSubmissionKey, err = k.btccKeeper.GetBestSubmission(ctx, finalizedEpoch) - if err != nil { - return nil, err - } - } else { // the requested height is before the last finalised chain info - // starting from the requested height, iterate backward until a timestamped header - closestHeader, err := k.FindClosestHeader(ctx, req.ConsumerId, req.Height) - if err != nil { - return nil, err - } - // assign the finalizedEpoch, and retrieve epoch info, raw ckpt and submission key - finalizedEpoch = closestHeader.BabylonEpoch - chainInfoWithProof, err := k.GetEpochChainInfo(ctx, req.ConsumerId, finalizedEpoch) - if err != nil { - return nil, err - } - resp.FinalizedChainInfo = chainInfoWithProof.ChainInfo - resp.EpochInfo, err = k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) - if err != nil { - return nil, err - } - - rawCheckpoint, err := k.checkpointingKeeper.GetRawCheckpoint(ctx, finalizedEpoch) - - if err != nil { - return nil, err - } - - resp.RawCheckpoint = rawCheckpoint.Ckpt - - _, resp.BtcSubmissionKey, err = k.btccKeeper.GetBestSubmission(ctx, finalizedEpoch) - if err != nil { - return nil, err - } - } - - // if the query does not want the proofs, return here - if !req.Prove { - return resp, nil - } - - // generate all proofs - resp.Proof, err = k.proveFinalizedChainInfo(ctx, chainInfo, resp.EpochInfo, resp.BtcSubmissionKey) - if err != nil { - return nil, err - } - - return resp, nil -} diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go deleted file mode 100644 index b160119a..00000000 --- a/x/zoneconcierge/keeper/grpc_query_test.go +++ /dev/null @@ -1,438 +0,0 @@ -package keeper_test - -import ( - "math/rand" - "testing" - - "github.com/babylonlabs-io/babylon/app" - btclightclienttypes "github.com/babylonlabs-io/babylon/x/btclightclient/types" - "github.com/cosmos/cosmos-sdk/types/query" - ibctmtypes "github.com/cosmos/ibc-go/v8/modules/light-clients/07-tendermint" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "github.com/babylonlabs-io/babylon/testutil/datagen" - testkeeper "github.com/babylonlabs-io/babylon/testutil/keeper" - btcctypes "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - checkpointingtypes "github.com/babylonlabs-io/babylon/x/checkpointing/types" - zctypes "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -type chainInfo struct { - consumerID string - numHeaders uint64 - numForkHeaders uint64 - headerStartHeight uint64 -} - -func FuzzChainList(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - ctx := babylonApp.NewContext(false) - - // invoke the hook a random number of times with random chain IDs - numHeaders := datagen.RandomInt(r, 100) + 1 - allConsumerIDs := []string{} - for i := uint64(0); i < numHeaders; i++ { - var consumerID string - // simulate the scenario that some headers belong to the same chain - if i > 0 && datagen.OneInN(r, 2) { - consumerID = allConsumerIDs[r.Intn(len(allConsumerIDs))] - } else { - consumerID = datagen.GenRandomHexStr(r, 30) - allConsumerIDs = append(allConsumerIDs, consumerID) - } - header := datagen.GenRandomIBCTMHeader(r, 0) - zcKeeper.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.NewZCHeaderInfo(header, consumerID), false) - } - - limit := datagen.RandomInt(r, len(allConsumerIDs)) + 1 - - // make query to get actual chain IDs - resp, err := zcKeeper.ChainList(ctx, &zctypes.QueryChainListRequest{ - Pagination: &query.PageRequest{ - Limit: limit, - }, - }) - require.NoError(t, err) - actualConsumerIDs := resp.ConsumerIds - - require.Equal(t, limit, uint64(len(actualConsumerIDs))) - allConsumerIDs = zcKeeper.GetAllConsumerIDs(ctx) - for i := uint64(0); i < limit; i++ { - require.Equal(t, allConsumerIDs[i], actualConsumerIDs[i]) - } - }) -} - -func FuzzChainsInfo(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - ctx := babylonApp.NewContext(false) - - var ( - chainsInfo []chainInfo - consumerIDs []string - ) - numChains := datagen.RandomInt(r, 100) + 1 - for i := uint64(0); i < numChains; i++ { - consumerID := datagen.GenRandomHexStr(r, 30) - numHeaders := datagen.RandomInt(r, 100) + 1 - numForkHeaders := datagen.RandomInt(r, 10) + 1 - SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, 0, numHeaders, numForkHeaders) - - consumerIDs = append(consumerIDs, consumerID) - chainsInfo = append(chainsInfo, chainInfo{ - consumerID: consumerID, - numHeaders: numHeaders, - numForkHeaders: numForkHeaders, - }) - } - - resp, err := zcKeeper.ChainsInfo(ctx, &zctypes.QueryChainsInfoRequest{ - ConsumerIds: consumerIDs, - }) - require.NoError(t, err) - - for i, respData := range resp.ChainsInfo { - require.Equal(t, chainsInfo[i].consumerID, respData.ConsumerId) - require.Equal(t, chainsInfo[i].numHeaders-1, respData.LatestHeader.Height) - require.Equal(t, chainsInfo[i].numForkHeaders, uint64(len(respData.LatestForks.Headers))) - } - }) -} - -func FuzzHeader(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - ctx := babylonApp.NewContext(false) - consumerID := "test-consumerid" - - // invoke the hook a random number of times to simulate a random number of blocks - numHeaders := datagen.RandomInt(r, 100) + 2 - numForkHeaders := datagen.RandomInt(r, 10) + 1 - headers, forkHeaders := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, 0, numHeaders, numForkHeaders) - - // find header at a random height and assert correctness against the expected header - randomHeight := datagen.RandomInt(r, int(numHeaders-1)) - resp, err := zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ConsumerId: consumerID, Height: randomHeight}) - require.NoError(t, err) - require.Equal(t, headers[randomHeight].Header.AppHash, resp.Header.Hash) - require.Len(t, resp.ForkHeaders.Headers, 0) - - // find the last header and fork headers then assert correctness - resp, err = zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ConsumerId: consumerID, Height: numHeaders - 1}) - require.NoError(t, err) - require.Equal(t, headers[numHeaders-1].Header.AppHash, resp.Header.Hash) - require.Len(t, resp.ForkHeaders.Headers, int(numForkHeaders)) - for i := 0; i < int(numForkHeaders); i++ { - require.Equal(t, forkHeaders[i].Header.AppHash, resp.ForkHeaders.Headers[i].Hash) - } - }) -} - -func FuzzEpochChainsInfo(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - ctx := babylonApp.NewContext(false) - - hooks := zcKeeper.Hooks() - - // generate a random number of chains - numChains := datagen.RandomInt(r, 10) + 1 - var consumerIDs []string - for j := uint64(0); j < numChains; j++ { - consumerID := datagen.GenRandomHexStr(r, 30) - consumerIDs = append(consumerIDs, consumerID) - } - - // generate a random number of epochNums - totalNumEpochs := datagen.RandomInt(r, 5) + 1 - epochNums := []uint64{datagen.RandomInt(r, 10) + 1} - for i := uint64(1); i < totalNumEpochs; i++ { - nextEpoch := epochNums[i-1] + datagen.RandomInt(r, 10) + 1 - epochNums = append(epochNums, nextEpoch) - } - - // we insert random number of headers and fork headers for each chain in each epoch, - // chainHeaderStartHeights keeps track of the next start height of header for each chain - chainHeaderStartHeights := make([]uint64, numChains) - epochToChainInfo := make(map[uint64]map[string]chainInfo) - for _, epochNum := range epochNums { - epochToChainInfo[epochNum] = make(map[string]chainInfo) - for j, consumerID := range consumerIDs { - // generate a random number of headers and fork headers for each chain - numHeaders := datagen.RandomInt(r, 100) + 1 - numForkHeaders := datagen.RandomInt(r, 10) + 1 - - // trigger hooks to append these headers and fork headers - SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, chainHeaderStartHeights[j], numHeaders, numForkHeaders) - - epochToChainInfo[epochNum][consumerID] = chainInfo{ - consumerID: consumerID, - numHeaders: numHeaders, - numForkHeaders: numForkHeaders, - headerStartHeight: chainHeaderStartHeights[j], - } - - // update next insertion height for this chain - chainHeaderStartHeights[j] += numHeaders - } - - // simulate the scenario that a random epoch has ended - hooks.AfterEpochEnds(ctx, epochNum) - } - - // assert correctness of best case scenario - for _, epochNum := range epochNums { - resp, err := zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: epochNum, ConsumerIds: consumerIDs}) - require.NoError(t, err) - epochChainsInfo := resp.ChainsInfo - require.Len(t, epochChainsInfo, int(numChains)) - for _, info := range epochChainsInfo { - require.Equal(t, epochToChainInfo[epochNum][info.ConsumerId].numForkHeaders, uint64(len(info.LatestForks.Headers))) - - actualHeight := epochToChainInfo[epochNum][info.ConsumerId].headerStartHeight + (epochToChainInfo[epochNum][info.ConsumerId].numHeaders - 1) - require.Equal(t, actualHeight, info.LatestHeader.Height) - - } - } - - // if num of chain ids exceed the max limit, query should fail - largeNumChains := datagen.RandomInt(r, 10) + 101 - var maxConsumerIDs []string - for i := uint64(0); i < largeNumChains; i++ { - maxConsumerIDs = append(maxConsumerIDs, datagen.GenRandomHexStr(r, 30)) - } - randomEpochNum := datagen.RandomInt(r, 10) + 1 - _, err := zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: maxConsumerIDs}) - require.Error(t, err) - - // if no input is passed in, query should fail - _, err = zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: nil}) - require.Error(t, err) - - // if len of chain ids is 0, query should fail - _, err = zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: []string{}}) - require.Error(t, err) - - // if chain ids contain duplicates, query should fail - randomConsumerID := datagen.GenRandomHexStr(r, 30) - dupConsumerIds := []string{randomConsumerID, randomConsumerID} - _, err = zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: dupConsumerIds}) - require.Error(t, err) - }) -} - -func FuzzListHeaders(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - ctx := babylonApp.NewContext(false) - consumerID := "test-consumerid" - - // invoke the hook a random number of times to simulate a random number of blocks - numHeaders := datagen.RandomInt(r, 100) + 1 - numForkHeaders := datagen.RandomInt(r, 10) + 1 - headers, _ := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, 0, numHeaders, numForkHeaders) - - // a request with randomised pagination - limit := datagen.RandomInt(r, int(numHeaders)) + 1 - req := &zctypes.QueryListHeadersRequest{ - ConsumerId: consumerID, - Pagination: &query.PageRequest{ - Limit: limit, - }, - } - resp, err := zcKeeper.ListHeaders(ctx, req) - require.NoError(t, err) - require.Equal(t, int(limit), len(resp.Headers)) - for i := uint64(0); i < limit; i++ { - require.Equal(t, headers[i].Header.AppHash, resp.Headers[i].Hash) - } - }) -} - -func FuzzListEpochHeaders(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - epochingKeeper := babylonApp.EpochingKeeper - ctx := babylonApp.NewContext(false) - consumerID := "test-consumerid" - - hooks := zcKeeper.Hooks() - - numReqs := datagen.RandomInt(r, 5) + 1 - - epochNumList := []uint64{datagen.RandomInt(r, 10) + 1} - nextHeightList := []uint64{0} - numHeadersList := []uint64{} - expectedHeadersMap := map[uint64][]*ibctmtypes.Header{} - numForkHeadersList := []uint64{} - - // we test the scenario of ending an epoch for multiple times, in order to ensure that - // consecutive epoch infos do not affect each other. - for i := uint64(0); i < numReqs; i++ { - epochNum := epochNumList[i] - // enter a random epoch - if i == 0 { - for j := uint64(1); j < epochNum; j++ { // starting from epoch 1 - epochingKeeper.IncEpoch(ctx) - } - } else { - for j := uint64(0); j < epochNum-epochNumList[i-1]; j++ { - epochingKeeper.IncEpoch(ctx) - } - } - - // generate a random number of headers and fork headers - numHeadersList = append(numHeadersList, datagen.RandomInt(r, 100)+1) - numForkHeadersList = append(numForkHeadersList, datagen.RandomInt(r, 10)+1) - // trigger hooks to append these headers and fork headers - expectedHeaders, _ := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i]) - expectedHeadersMap[epochNum] = expectedHeaders - // prepare nextHeight for the next request - nextHeightList = append(nextHeightList, nextHeightList[i]+numHeadersList[i]) - - // simulate the scenario that a random epoch has ended - hooks.AfterEpochEnds(ctx, epochNum) - // prepare epochNum for the next request - epochNumList = append(epochNumList, epochNum+datagen.RandomInt(r, 10)+1) - } - - // attest the correctness of epoch info for each tested epoch - for i := uint64(0); i < numReqs; i++ { - epochNum := epochNumList[i] - // make request - req := &zctypes.QueryListEpochHeadersRequest{ - ConsumerId: consumerID, - EpochNum: epochNum, - } - resp, err := zcKeeper.ListEpochHeaders(ctx, req) - require.NoError(t, err) - - // check if the headers are same as expected - headers := resp.Headers - require.Equal(t, len(expectedHeadersMap[epochNum]), len(headers)) - for j := 0; j < len(expectedHeadersMap[epochNum]); j++ { - require.Equal(t, expectedHeadersMap[epochNum][j].Header.AppHash, headers[j].Hash) - } - } - }) -} - -func FuzzFinalizedChainInfo(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // simulate the scenario that a random epoch has ended and finalised - epoch := datagen.GenRandomEpoch(r) - - // mock checkpointing keeper - // TODO: tests with a set of validators - checkpointingKeeper := zctypes.NewMockCheckpointingKeeper(ctrl) - checkpointingKeeper.EXPECT().GetBLSPubKeySet(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return([]*checkpointingtypes.ValidatorWithBlsKey{}, nil).AnyTimes() - // mock btccheckpoint keeper - // TODO: test with BTCSpvProofs - randomRawCkpt := datagen.GenRandomRawCheckpoint(r) - randomRawCkpt.EpochNum = epoch.EpochNumber - btccKeeper := zctypes.NewMockBtcCheckpointKeeper(ctrl) - checkpointingKeeper.EXPECT().GetRawCheckpoint(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return( - &checkpointingtypes.RawCheckpointWithMeta{ - Ckpt: randomRawCkpt, - }, nil, - ).AnyTimes() - btccKeeper.EXPECT().GetParams(gomock.Any()).Return(btcctypes.DefaultParams()).AnyTimes() - btccKeeper.EXPECT().GetBestSubmission(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return( - btcctypes.Finalized, - &btcctypes.SubmissionKey{ - Key: []*btcctypes.TransactionKey{}, - }, - nil, - ).AnyTimes() - mockSubmissionData := &btcctypes.SubmissionData{TxsInfo: []*btcctypes.TransactionInfo{}} - btccKeeper.EXPECT().GetSubmissionData(gomock.Any(), gomock.Any()).Return(mockSubmissionData).AnyTimes() - // mock epoching keeper - epochingKeeper := zctypes.NewMockEpochingKeeper(ctrl) - epochingKeeper.EXPECT().GetEpoch(gomock.Any()).Return(epoch).AnyTimes() - epochingKeeper.EXPECT().GetHistoricalEpoch(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return(epoch, nil).AnyTimes() - // mock btclc keeper - btclcKeeper := zctypes.NewMockBTCLightClientKeeper(ctrl) - mockBTCHeaderInfo := datagen.GenRandomBTCHeaderInfo(r) - btclcKeeper.EXPECT().GetMainChainFrom(gomock.Any(), gomock.Any()).Return([]*btclightclienttypes.BTCHeaderInfo{mockBTCHeaderInfo}).AnyTimes() - btclcKeeper.EXPECT().GetTipInfo(gomock.Any()).Return(mockBTCHeaderInfo).AnyTimes() - - zcKeeper, ctx := testkeeper.ZoneConciergeKeeper(t, btclcKeeper, checkpointingKeeper, btccKeeper, epochingKeeper) - hooks := zcKeeper.Hooks() - - var ( - chainsInfo []chainInfo - consumerIDs []string - ) - numChains := datagen.RandomInt(r, 100) + 1 - for i := uint64(0); i < numChains; i++ { - consumerIDLen := datagen.RandomInt(r, 40) + 10 - consumerID := string(datagen.GenRandomByteArray(r, consumerIDLen)) - - // invoke the hook a random number of times to simulate a random number of blocks - numHeaders := datagen.RandomInt(r, 100) + 1 - numForkHeaders := datagen.RandomInt(r, 10) + 1 - SimulateNewHeadersAndForks(ctx, r, zcKeeper, consumerID, 0, numHeaders, numForkHeaders) - - consumerIDs = append(consumerIDs, consumerID) - chainsInfo = append(chainsInfo, chainInfo{ - consumerID: consumerID, - numHeaders: numHeaders, - numForkHeaders: numForkHeaders, - }) - } - - hooks.AfterEpochEnds(ctx, epoch.EpochNumber) - err := hooks.AfterRawCheckpointFinalized(ctx, epoch.EpochNumber) - require.NoError(t, err) - checkpointingKeeper.EXPECT().GetLastFinalizedEpoch(gomock.Any()).Return(epoch.EpochNumber).AnyTimes() - - // check if the chain info of this epoch is recorded or not - resp, err := zcKeeper.FinalizedChainsInfo(ctx, &zctypes.QueryFinalizedChainsInfoRequest{ConsumerIds: consumerIDs, Prove: true}) - require.NoError(t, err) - for i, respData := range resp.FinalizedChainsInfo { - require.Equal(t, chainsInfo[i].consumerID, respData.FinalizedChainInfo.ConsumerId) - require.Equal(t, chainsInfo[i].numHeaders-1, respData.FinalizedChainInfo.LatestHeader.Height) - require.Equal(t, chainsInfo[i].numForkHeaders, uint64(len(respData.FinalizedChainInfo.LatestForks.Headers))) - } - }) -} diff --git a/x/zoneconcierge/keeper/header_handler.go b/x/zoneconcierge/keeper/header_handler.go deleted file mode 100644 index 41266432..00000000 --- a/x/zoneconcierge/keeper/header_handler.go +++ /dev/null @@ -1,73 +0,0 @@ -package keeper - -import ( - "context" - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -// HandleHeaderWithValidCommit handles a CZ header with a valid QC -func (k Keeper) HandleHeaderWithValidCommit(ctx context.Context, txHash []byte, header *types.HeaderInfo, isOnFork bool) { - sdkCtx := sdk.UnwrapSDKContext(ctx) - babylonHeader := sdkCtx.HeaderInfo() - indexedHeader := types.IndexedHeader{ - ConsumerId: header.ClientId, - Hash: header.AppHash, - Height: header.Height, - Time: &header.Time, - BabylonHeaderHash: babylonHeader.AppHash, - BabylonHeaderHeight: uint64(babylonHeader.Height), - BabylonEpoch: k.GetEpoch(ctx).EpochNumber, - BabylonTxHash: txHash, - } - - k.Logger(sdkCtx).Debug("found new IBC header", "header", indexedHeader) - - var ( - chainInfo *types.ChainInfo - err error - ) - if !k.HasChainInfo(ctx, indexedHeader.ConsumerId) { - // chain info does not exist yet, initialise chain info for this chain - chainInfo, err = k.InitChainInfo(ctx, indexedHeader.ConsumerId) - if err != nil { - panic(fmt.Errorf("failed to initialize chain info of %s: %w", indexedHeader.ConsumerId, err)) - } - } else { - // get chain info - chainInfo, err = k.GetChainInfo(ctx, indexedHeader.ConsumerId) - if err != nil { - panic(fmt.Errorf("failed to get chain info of %s: %w", indexedHeader.ConsumerId, err)) - } - } - - if isOnFork { - // insert header to fork index - if err := k.insertForkHeader(ctx, indexedHeader.ConsumerId, &indexedHeader); err != nil { - panic(err) - } - // update the latest fork in chain info - if err := k.tryToUpdateLatestForkHeader(ctx, indexedHeader.ConsumerId, &indexedHeader); err != nil { - panic(err) - } - } else { - // ensure the header is the latest one, otherwise ignore it - // NOTE: while an old header is considered acceptable in IBC-Go (see Case_valid_past_update), but - // ZoneConcierge should not checkpoint it since Babylon requires monotonic checkpointing - if !chainInfo.IsLatestHeader(&indexedHeader) { - return - } - - // insert header to canonical chain index - if err := k.insertHeader(ctx, indexedHeader.ConsumerId, &indexedHeader); err != nil { - panic(err) - } - // update the latest canonical header in chain info - if err := k.updateLatestHeader(ctx, indexedHeader.ConsumerId, &indexedHeader); err != nil { - panic(err) - } - } -} diff --git a/x/zoneconcierge/keeper/hooks.go b/x/zoneconcierge/keeper/hooks.go deleted file mode 100644 index f8d4c503..00000000 --- a/x/zoneconcierge/keeper/hooks.go +++ /dev/null @@ -1,65 +0,0 @@ -package keeper - -import ( - "context" - - sdk "github.com/cosmos/cosmos-sdk/types" - - checkpointingtypes "github.com/babylonlabs-io/babylon/x/checkpointing/types" - epochingtypes "github.com/babylonlabs-io/babylon/x/epoching/types" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -type Hooks struct { - k Keeper -} - -// ensures Hooks implements ClientHooks interfaces -var _ checkpointingtypes.CheckpointingHooks = Hooks{} -var _ epochingtypes.EpochingHooks = Hooks{} - -func (k Keeper) Hooks() Hooks { return Hooks{k} } - -func (h Hooks) AfterEpochEnds(ctx context.Context, epoch uint64) { - // upon an epoch has ended, index the current chain info for each CZ - // TODO: do this together when epoch is sealed? - for _, consumerID := range h.k.GetAllConsumerIDs(ctx) { - h.k.recordEpochChainInfo(ctx, consumerID, epoch) - } -} - -func (h Hooks) AfterRawCheckpointSealed(ctx context.Context, epoch uint64) error { - // upon a raw checkpoint is sealed, index the current chain info for each consumer, - // and generate/save the proof that the epoch is sealed - h.k.recordEpochChainInfoProofs(ctx, epoch) - h.k.recordSealedEpochProof(ctx, epoch) - return nil -} - -// AfterRawCheckpointFinalized is triggered upon an epoch has been finalised -func (h Hooks) AfterRawCheckpointFinalized(ctx context.Context, epoch uint64) error { - headersToBroadcast := h.k.getHeadersToBroadcast(ctx) - - // send BTC timestamp to all open channels with ZoneConcierge - h.k.BroadcastBTCTimestamps(ctx, epoch, headersToBroadcast) - - // Update the last broadcasted segment - h.k.setLastSentSegment(ctx, &types.BTCChainSegment{ - BtcHeaders: headersToBroadcast, - }) - return nil -} - -// Other unused hooks - -func (h Hooks) AfterBlsKeyRegistered(ctx context.Context, valAddr sdk.ValAddress) error { return nil } -func (h Hooks) AfterRawCheckpointConfirmed(ctx context.Context, epoch uint64) error { return nil } -func (h Hooks) AfterRawCheckpointForgotten(ctx context.Context, ckpt *checkpointingtypes.RawCheckpoint) error { - return nil -} -func (h Hooks) AfterRawCheckpointBlsSigVerified(ctx context.Context, ckpt *checkpointingtypes.RawCheckpoint) error { - return nil -} - -func (h Hooks) AfterEpochBegins(ctx context.Context, epoch uint64) {} -func (h Hooks) BeforeSlashThreshold(ctx context.Context, valSet epochingtypes.ValidatorSet) {} diff --git a/x/zoneconcierge/keeper/ibc_channels.go b/x/zoneconcierge/keeper/ibc_channels.go deleted file mode 100644 index d1ad90dc..00000000 --- a/x/zoneconcierge/keeper/ibc_channels.go +++ /dev/null @@ -1,41 +0,0 @@ -package keeper - -import ( - "context" - sdk "github.com/cosmos/cosmos-sdk/types" - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" -) - -func (k Keeper) GetAllChannels(ctx context.Context) []channeltypes.IdentifiedChannel { - return k.channelKeeper.GetAllChannels(sdk.UnwrapSDKContext(ctx)) -} - -// GetAllOpenZCChannels returns all open channels that are connected to ZoneConcierge's port -func (k Keeper) GetAllOpenZCChannels(ctx context.Context) []channeltypes.IdentifiedChannel { - zcPort := k.GetPort(ctx) - channels := k.GetAllChannels(ctx) - - openZCChannels := []channeltypes.IdentifiedChannel{} - for _, channel := range channels { - if channel.State != channeltypes.OPEN { - continue - } - if channel.PortId != zcPort { - continue - } - openZCChannels = append(openZCChannels, channel) - } - - return openZCChannels -} - -// isChannelUninitialized checks whether the channel is not initilialised yet -// it's done by checking whether the packet sequence number is 1 (the first sequence number) or not -func (k Keeper) isChannelUninitialized(ctx context.Context, channel channeltypes.IdentifiedChannel) bool { - sdkCtx := sdk.UnwrapSDKContext(ctx) - portID := channel.PortId - channelID := channel.ChannelId - // NOTE: channeltypes.IdentifiedChannel object is guaranteed to exist, so guaranteed to be found - nextSeqSend, _ := k.channelKeeper.GetNextSequenceSend(sdkCtx, portID, channelID) - return nextSeqSend == 1 -} diff --git a/x/zoneconcierge/keeper/ibc_header_decorator.go b/x/zoneconcierge/keeper/ibc_header_decorator.go deleted file mode 100644 index 930f33e6..00000000 --- a/x/zoneconcierge/keeper/ibc_header_decorator.go +++ /dev/null @@ -1,105 +0,0 @@ -package keeper - -import ( - "github.com/cometbft/cometbft/crypto/tmhash" - sdk "github.com/cosmos/cosmos-sdk/types" - clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" //nolint:staticcheck - ibctmtypes "github.com/cosmos/ibc-go/v8/modules/light-clients/07-tendermint" - - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -var _ sdk.PostDecorator = &IBCHeaderDecorator{} - -type IBCHeaderDecorator struct { - k Keeper -} - -// NewIBCHeaderDecorator creates a new IBCHeaderDecorator -func NewIBCHeaderDecorator(k Keeper) *IBCHeaderDecorator { - return &IBCHeaderDecorator{ - k: k, - } -} - -func (d *IBCHeaderDecorator) getHeaderAndClientState(ctx sdk.Context, m sdk.Msg) (*types.HeaderInfo, *ibctmtypes.ClientState) { - // ensure the message is MsgUpdateClient - msgUpdateClient, ok := m.(*clienttypes.MsgUpdateClient) - if !ok { - return nil, nil - } - // unpack ClientMsg inside MsgUpdateClient - clientMsg, err := clienttypes.UnpackClientMessage(msgUpdateClient.ClientMessage) - if err != nil { - return nil, nil - } - // ensure the ClientMsg is a Comet header - ibctmHeader, ok := clientMsg.(*ibctmtypes.Header) - if !ok { - return nil, nil - } - - // all good, we get the headerInfo - headerInfo := &types.HeaderInfo{ - ClientId: msgUpdateClient.ClientId, - ChainId: ibctmHeader.Header.ChainID, - AppHash: ibctmHeader.Header.AppHash, - Height: uint64(ibctmHeader.Header.Height), - Time: ibctmHeader.Header.Time, - } - - // ensure the corresponding clientState exists - clientState, exist := d.k.clientKeeper.GetClientState(ctx, msgUpdateClient.ClientId) - if !exist { - return nil, nil - } - // ensure the clientState is a Comet clientState - cmtClientState, ok := clientState.(*ibctmtypes.ClientState) - if !ok { - return nil, nil - } - - return headerInfo, cmtClientState -} - -func (d *IBCHeaderDecorator) PostHandle(ctx sdk.Context, tx sdk.Tx, simulate, success bool, next sdk.PostHandler) (sdk.Context, error) { - // only do this when finalizing a block or simulating the current tx - if ctx.ExecMode() != sdk.ExecModeFinalize && !simulate { - return next(ctx, tx, simulate, success) - } - // ignore unsuccessful tx - // NOTE: tx with a misbehaving header will still succeed, but will make the client to be frozen - if !success { - return next(ctx, tx, simulate, success) - } - - // calculate tx hash - txHash := tmhash.Sum(ctx.TxBytes()) - - for _, msg := range tx.GetMsgs() { - // try to extract the headerInfo and the client's status - headerInfo, clientState := d.getHeaderAndClientState(ctx, msg) - if headerInfo == nil { - continue - } - - // FrozenHeight is non-zero -> client is frozen -> this is a fork header - // NOTE: A valid tx can ONLY have a single fork header msg, and this fork - // header msg can ONLY be the LAST msg in this tx. If there is a fork - // header before a canonical header in a tx, then the client will be - // frozen upon the fork header, and the subsequent canonical header will - // fail, eventually failing the entire tx. All state updates due to this - // failed tx will be rolled back. - isOnFork := !clientState.FrozenHeight.IsZero() - d.k.HandleHeaderWithValidCommit(ctx, txHash, headerInfo, isOnFork) - - // unfreeze client (by setting FrozenHeight to zero again) if the client is frozen - // due to a fork header - if isOnFork { - clientState.FrozenHeight = clienttypes.ZeroHeight() - d.k.clientKeeper.SetClientState(ctx, headerInfo.ClientId, clientState) - } - } - - return next(ctx, tx, simulate, success) -} diff --git a/x/zoneconcierge/keeper/ibc_packet.go b/x/zoneconcierge/keeper/ibc_packet.go deleted file mode 100644 index c4f6bbc0..00000000 --- a/x/zoneconcierge/keeper/ibc_packet.go +++ /dev/null @@ -1,74 +0,0 @@ -package keeper - -import ( - "context" - "fmt" - "time" - - sdk "github.com/cosmos/cosmos-sdk/types" - - errorsmod "cosmossdk.io/errors" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - "github.com/cosmos/cosmos-sdk/telemetry" - clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" //nolint:staticcheck - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" - host "github.com/cosmos/ibc-go/v8/modules/core/24-host" - coretypes "github.com/cosmos/ibc-go/v8/modules/core/types" - "github.com/hashicorp/go-metrics" -) - -// SendIBCPacket sends an IBC packet to a channel -// (adapted from https://github.com/cosmos/ibc-go/blob/v5.0.0/modules/apps/transfer/keeper/relay.go) -func (k Keeper) SendIBCPacket(ctx context.Context, channel channeltypes.IdentifiedChannel, packetData *types.ZoneconciergePacketData) error { - sdkCtx := sdk.UnwrapSDKContext(ctx) - // get src/dst ports and channels - sourcePort := channel.PortId - sourceChannel := channel.ChannelId - destinationPort := channel.Counterparty.GetPortID() - destinationChannel := channel.Counterparty.GetChannelID() - - // begin createOutgoingPacket logic - // See spec for this logic: https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer#packet-relay - channelCap, ok := k.scopedKeeper.GetCapability(sdkCtx, host.ChannelCapabilityPath(sourcePort, sourceChannel)) - if !ok { - return errorsmod.Wrapf(channeltypes.ErrChannelCapabilityNotFound, "module does not own channel capability: sourcePort: %s, sourceChannel: %s", sourcePort, sourceChannel) - } - - // timeout - timeoutPeriod := time.Duration(k.GetParams(sdkCtx).IbcPacketTimeoutSeconds) * time.Second - timeoutTime := uint64(sdkCtx.HeaderInfo().Time.Add(timeoutPeriod).UnixNano()) - zeroheight := clienttypes.ZeroHeight() - - seq, err := k.ics4Wrapper.SendPacket( - sdkCtx, - channelCap, - sourcePort, - sourceChannel, - zeroheight, // no need to set timeout height if timeout timestamp is set - timeoutTime, // if the packet is not relayed after this time, then the packet will be time out - k.cdc.MustMarshal(packetData), - ) - - // send packet - if err != nil { - // Failed/timeout packet should not make the system crash - k.Logger(sdkCtx).Error(fmt.Sprintf("failed to send IBC packet (sequence number: %d) to channel %v port %s: %v", seq, destinationChannel, destinationPort, err)) - } else { - k.Logger(sdkCtx).Info(fmt.Sprintf("successfully sent IBC packet (sequence number: %d) to channel %v port %s", seq, destinationChannel, destinationPort)) - } - - // metrics stuff - labels := []metrics.Label{ - telemetry.NewLabel(coretypes.LabelDestinationPort, destinationPort), - telemetry.NewLabel(coretypes.LabelDestinationChannel, destinationChannel), - } - defer func() { - telemetry.IncrCounterWithLabels( - []string{"ibc", types.ModuleName, "send"}, - 1, - labels, - ) - }() - - return nil -} diff --git a/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go b/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go deleted file mode 100644 index 44705197..00000000 --- a/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go +++ /dev/null @@ -1,253 +0,0 @@ -package keeper - -import ( - "context" - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" - - bbn "github.com/babylonlabs-io/babylon/types" - btcctypes "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - btclctypes "github.com/babylonlabs-io/babylon/x/btclightclient/types" - checkpointingtypes "github.com/babylonlabs-io/babylon/x/checkpointing/types" - epochingtypes "github.com/babylonlabs-io/babylon/x/epoching/types" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -// finalizedInfo is a private struct that stores metadata and proofs -// identical to all BTC timestamps in the same epoch -type finalizedInfo struct { - EpochInfo *epochingtypes.Epoch - RawCheckpoint *checkpointingtypes.RawCheckpoint - BTCSubmissionKey *btcctypes.SubmissionKey - ProofEpochSealed *types.ProofEpochSealed - ProofEpochSubmitted []*btcctypes.TransactionInfo - BTCHeaders []*btclctypes.BTCHeaderInfo -} - -// getClientID gets the ID of the IBC client under the given channel -// We will use the client ID as the consumer ID to uniquely identify -// the consumer chain -func (k Keeper) getClientID(ctx context.Context, channel channeltypes.IdentifiedChannel) (string, error) { - sdkCtx := sdk.UnwrapSDKContext(ctx) - clientID, _, err := k.channelKeeper.GetChannelClientState(sdkCtx, channel.PortId, channel.ChannelId) - if err != nil { - return "", err - } - return clientID, nil -} - -// getFinalizedInfo returns metadata and proofs that are identical to all BTC timestamps in the same epoch -func (k Keeper) getFinalizedInfo( - ctx context.Context, - epochNum uint64, - headersToBroadcast []*btclctypes.BTCHeaderInfo, -) (*finalizedInfo, error) { - finalizedEpochInfo, err := k.epochingKeeper.GetHistoricalEpoch(ctx, epochNum) - if err != nil { - return nil, err - } - - // get proof that the epoch is sealed - proofEpochSealed := k.getSealedEpochProof(ctx, epochNum) - if proofEpochSealed == nil { - panic(err) // only programming error - } - - // assign raw checkpoint - rawCheckpoint, err := k.checkpointingKeeper.GetRawCheckpoint(ctx, epochNum) - if err != nil { - return nil, err - } - - // assign BTC submission key - ed := k.btccKeeper.GetEpochData(ctx, epochNum) - bestSubmissionBtcInfo := k.btccKeeper.GetEpochBestSubmissionBtcInfo(ctx, ed) - if bestSubmissionBtcInfo == nil { - return nil, fmt.Errorf("empty bestSubmissionBtcInfo") - } - btcSubmissionKey := &bestSubmissionBtcInfo.SubmissionKey - - // proof that the epoch's checkpoint is submitted to BTC - // i.e., the two `TransactionInfo`s for the checkpoint - proofEpochSubmitted, err := k.ProveEpochSubmitted(ctx, btcSubmissionKey) - if err != nil { - return nil, err - } - - // construct finalizedInfo - finalizedInfo := &finalizedInfo{ - EpochInfo: finalizedEpochInfo, - RawCheckpoint: rawCheckpoint.Ckpt, - BTCSubmissionKey: btcSubmissionKey, - ProofEpochSealed: proofEpochSealed, - ProofEpochSubmitted: proofEpochSubmitted, - BTCHeaders: headersToBroadcast, - } - - return finalizedInfo, nil -} - -// createBTCTimestamp creates a BTC timestamp from finalizedInfo for a given IBC channel -// where the counterparty is a Cosmos zone -func (k Keeper) createBTCTimestamp( - ctx context.Context, - consumerID string, - channel channeltypes.IdentifiedChannel, - finalizedInfo *finalizedInfo, -) (*types.BTCTimestamp, error) { - // if the Babylon contract in this channel has not been initialised, get headers from - // the tip to (w+1+len(finalizedInfo.BTCHeaders))-deep header - var btcHeaders []*btclctypes.BTCHeaderInfo - if k.isChannelUninitialized(ctx, channel) { - w := k.btccKeeper.GetParams(ctx).CheckpointFinalizationTimeout - depth := w + 1 + uint64(len(finalizedInfo.BTCHeaders)) - - btcHeaders = k.btclcKeeper.GetMainChainUpTo(ctx, depth) - if btcHeaders == nil { - return nil, fmt.Errorf("failed to get Bitcoin main chain up to depth %d", depth) - } - bbn.Reverse(btcHeaders) - } else { - btcHeaders = finalizedInfo.BTCHeaders - } - - // get finalised chainInfo - // NOTE: it's possible that this chain does not have chain info at the moment - // In this case, skip sending BTC timestamp for this chain at this epoch - epochNum := finalizedInfo.EpochInfo.EpochNumber - epochChainInfo, err := k.GetEpochChainInfo(ctx, consumerID, epochNum) - if err != nil { - return nil, fmt.Errorf("no epochChainInfo for chain %s at epoch %d", consumerID, epochNum) - } - - // construct BTC timestamp from everything - // NOTE: it's possible that there is no header checkpointed in this epoch - btcTimestamp := &types.BTCTimestamp{ - Header: nil, - BtcHeaders: btcHeaders, - EpochInfo: finalizedInfo.EpochInfo, - RawCheckpoint: finalizedInfo.RawCheckpoint, - BtcSubmissionKey: finalizedInfo.BTCSubmissionKey, - Proof: &types.ProofFinalizedChainInfo{ - ProofCzHeaderInEpoch: nil, - ProofEpochSealed: finalizedInfo.ProofEpochSealed, - ProofEpochSubmitted: finalizedInfo.ProofEpochSubmitted, - }, - } - - // if there is a CZ header checkpointed in this finalised epoch, - // add this CZ header and corresponding proofs to the BTC timestamp - epochOfHeader := epochChainInfo.ChainInfo.LatestHeader.BabylonEpoch - if epochOfHeader == epochNum { - btcTimestamp.Header = epochChainInfo.ChainInfo.LatestHeader - btcTimestamp.Proof.ProofCzHeaderInEpoch = epochChainInfo.ProofHeaderInEpoch - } - - return btcTimestamp, nil -} - -// getDeepEnoughBTCHeaders returns the last w+1 BTC headers, in which the 1st BTC header -// must be in the canonical chain assuming w-long reorg will never happen -// This function will only be triggered upon a finalised epoch, where w-deep BTC checkpoint -// is guaranteed. Thus the function is safe to be called upon generating BTC timestamps -func (k Keeper) getDeepEnoughBTCHeaders(ctx context.Context) []*btclctypes.BTCHeaderInfo { - wValue := k.btccKeeper.GetParams(ctx).CheckpointFinalizationTimeout - startHeight := k.btclcKeeper.GetTipInfo(ctx).Height - wValue - return k.btclcKeeper.GetMainChainFrom(ctx, startHeight) -} - -// getHeadersToBroadcast retrieves headers to be broadcasted to all open IBC channels to ZoneConcierge -// The header to be broadcasted are: -// - either the whole known chain if we did not broadcast any headers yet -// - headers from the child of the most recent header we sent which is still in the main chain up to the current tip -func (k Keeper) getHeadersToBroadcast(ctx context.Context) []*btclctypes.BTCHeaderInfo { - - lastSegment := k.GetLastSentSegment(ctx) - - if lastSegment == nil { - // we did not send any headers yet, so we need to send the last w+1 BTC headers - // where w+1 is imposed by Babylon contract. This ensures that the first BTC header - // in Babylon contract will be w-deep - return k.getDeepEnoughBTCHeaders(ctx) - } - - // we already sent some headers, so we need to send headers from the child of the most recent header we sent - // which is still in the main chain. - // In most cases it will be header just after the tip, but in case of the forks it may as well be some older header - // of the segment - var initHeader *btclctypes.BTCHeaderInfo - for i := len(lastSegment.BtcHeaders) - 1; i >= 0; i-- { - header := lastSegment.BtcHeaders[i] - if k.btclcKeeper.GetHeaderByHash(ctx, header.Hash) != nil { - initHeader = header - break - } - } - - if initHeader == nil { - // if initHeader is nil, then this means a reorg happens such that all headers - // in the last segment are reverted. In this case, send the last w+1 BTC headers - return k.getDeepEnoughBTCHeaders(ctx) - } - - headersToSend := k.btclcKeeper.GetMainChainFrom(ctx, initHeader.Height+1) - - return headersToSend -} - -// BroadcastBTCTimestamps sends an IBC packet of BTC timestamp to all open IBC channels to ZoneConcierge -func (k Keeper) BroadcastBTCTimestamps( - ctx context.Context, - epochNum uint64, - headersToBroadcast []*btclctypes.BTCHeaderInfo, -) { - sdkCtx := sdk.UnwrapSDKContext(ctx) - // Babylon does not broadcast BTC timestamps until finalising epoch 1 - if epochNum < 1 { - k.Logger(sdkCtx).Info("Babylon does not finalize epoch 1 yet, skip broadcasting BTC timestamps") - return - } - - // get all channels that are open and are connected to ZoneConcierge's port - openZCChannels := k.GetAllOpenZCChannels(ctx) - if len(openZCChannels) == 0 { - k.Logger(sdkCtx).Info("no open IBC channel with ZoneConcierge, skip broadcasting BTC timestamps") - return - } - - k.Logger(sdkCtx).Info("there exists open IBC channels with ZoneConcierge, generating BTC timestamps", "number of channels", len(openZCChannels)) - - // get all metadata shared across BTC timestamps in the same epoch - finalizedInfo, err := k.getFinalizedInfo(ctx, epochNum, headersToBroadcast) - if err != nil { - k.Logger(sdkCtx).Error("failed to generate metadata shared across BTC timestamps in the same epoch, skip broadcasting BTC timestamps", "error", err) - return - } - - // for each channel, construct and send BTC timestamp - for _, channel := range openZCChannels { - // get the ID of the chain under this channel - consumerID, err := k.getClientID(ctx, channel) - if err != nil { - k.Logger(sdkCtx).Error("failed to get client ID, skip sending BTC timestamp for this consumer", "channelID", channel.ChannelId, "error", err) - continue - } - - // generate timestamp for this channel - btcTimestamp, err := k.createBTCTimestamp(ctx, consumerID, channel, finalizedInfo) - if err != nil { - k.Logger(sdkCtx).Error("failed to generate BTC timestamp, skip sending BTC timestamp for this chain", "consumerID", consumerID, "error", err) - continue - } - - // wrap BTC timestamp to IBC packet - packet := types.NewBTCTimestampPacketData(btcTimestamp) - // send IBC packet - if err := k.SendIBCPacket(ctx, channel, packet); err != nil { - k.Logger(sdkCtx).Error("failed to send BTC timestamp IBC packet, skip sending BTC timestamp for this chain", "consumerID", consumerID, "channelID", channel.ChannelId, "error", err) - continue - } - } -} diff --git a/x/zoneconcierge/keeper/ibc_packet_btc_timestamp_test.go b/x/zoneconcierge/keeper/ibc_packet_btc_timestamp_test.go deleted file mode 100644 index 014203fe..00000000 --- a/x/zoneconcierge/keeper/ibc_packet_btc_timestamp_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package keeper_test - -import ( - "context" - "math/rand" - "testing" - - "github.com/babylonlabs-io/babylon/app" - "github.com/babylonlabs-io/babylon/testutil/datagen" - btclckeeper "github.com/babylonlabs-io/babylon/x/btclightclient/keeper" - btclctypes "github.com/babylonlabs-io/babylon/x/btclightclient/types" - "github.com/stretchr/testify/require" -) - -func allFieldsEqual(a *btclctypes.BTCHeaderInfo, b *btclctypes.BTCHeaderInfo) bool { - return a.Height == b.Height && a.Hash.Eq(b.Hash) && a.Header.Eq(b.Header) && a.Work.Equal(*b.Work) -} - -// this function must not be used at difficulty adjustment boundaries, as then -// difficulty adjustment calculation will fail -func genRandomChain( - t *testing.T, - r *rand.Rand, - k *btclckeeper.Keeper, - ctx context.Context, - initialHeight uint64, - chainLength uint64, -) *datagen.BTCHeaderPartialChain { - initHeader := k.GetHeaderByHeight(ctx, initialHeight) - randomChain := datagen.NewBTCHeaderChainFromParentInfo( - r, - initHeader, - uint32(chainLength), - ) - err := k.InsertHeadersWithHookAndEvents(ctx, randomChain.ChainToBytes()) - require.NoError(t, err) - tip := k.GetTipInfo(ctx) - randomChainTipInfo := randomChain.GetTipInfo() - require.True(t, allFieldsEqual(tip, randomChainTipInfo)) - return randomChain -} - -func FuzzGetHeadersToBroadcast(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - babylonApp := app.Setup(t, false) - zcKeeper := babylonApp.ZoneConciergeKeeper - btclcKeeper := babylonApp.BTCLightClientKeeper - ctx := babylonApp.NewContext(false) - - hooks := zcKeeper.Hooks() - - // insert a random number of BTC headers to BTC light client - wValue := babylonApp.BtcCheckpointKeeper.GetParams(ctx).CheckpointFinalizationTimeout - chainLength := datagen.RandomInt(r, 10) + wValue - genRandomChain( - t, - r, - &btclcKeeper, - ctx, - 0, - chainLength, - ) - - // finalise a random epoch - epochNum := datagen.RandomInt(r, 10) - err := hooks.AfterRawCheckpointFinalized(ctx, epochNum) - require.NoError(t, err) - // current tip - btcTip := btclcKeeper.GetTipInfo(ctx) - // assert the last segment is the last w+1 BTC headers - lastSegment := zcKeeper.GetLastSentSegment(ctx) - require.Len(t, lastSegment.BtcHeaders, int(wValue)+1) - for i := range lastSegment.BtcHeaders { - require.Equal(t, btclcKeeper.GetHeaderByHeight(ctx, btcTip.Height-wValue+uint64(i)), lastSegment.BtcHeaders[i]) - } - - // finalise another epoch, during which a small number of new BTC headers are inserted - epochNum += 1 - chainLength = datagen.RandomInt(r, 10) + 1 - genRandomChain( - t, - r, - &btclcKeeper, - ctx, - btcTip.Height, - chainLength, - ) - err = hooks.AfterRawCheckpointFinalized(ctx, epochNum) - require.NoError(t, err) - // assert the last segment is since the header after the last tip - lastSegment = zcKeeper.GetLastSentSegment(ctx) - require.Len(t, lastSegment.BtcHeaders, int(chainLength)) - for i := range lastSegment.BtcHeaders { - require.Equal(t, btclcKeeper.GetHeaderByHeight(ctx, uint64(i)+btcTip.Height+1), lastSegment.BtcHeaders[i]) - } - - // remember the current tip and the segment length - btcTip = btclcKeeper.GetTipInfo(ctx) - lastSegmentLength := uint64(len(lastSegment.BtcHeaders)) - - // finalise another epoch, during which a number of new BTC headers with reorg are inserted - epochNum += 1 - // reorg at a super random point - // NOTE: it's possible that the last segment is totally reverted. We want to be resilient against - // this, by sending the BTC headers since the last reorg point - reorgPoint := datagen.RandomInt(r, int(btcTip.Height)) - revertedChainLength := btcTip.Height - reorgPoint - // the fork chain needs to be longer than the canonical one - forkChainLength := revertedChainLength + datagen.RandomInt(r, 10) + 1 - genRandomChain( - t, - r, - &btclcKeeper, - ctx, - reorgPoint, - forkChainLength, - ) - err = hooks.AfterRawCheckpointFinalized(ctx, epochNum) - require.NoError(t, err) - // current tip - btcTip = btclcKeeper.GetTipInfo(ctx) - // assert the last segment is the last w+1 BTC headers - lastSegment = zcKeeper.GetLastSentSegment(ctx) - if revertedChainLength >= lastSegmentLength { - // the entire last segment is reverted, the last w+1 BTC headers should be sent - require.Len(t, lastSegment.BtcHeaders, int(wValue)+1) - // assert the consistency of w+1 sent BTC headers - for i := range lastSegment.BtcHeaders { - expectedHeight := btcTip.Height - wValue + uint64(i) - require.Equal(t, btclcKeeper.GetHeaderByHeight(ctx, expectedHeight), lastSegment.BtcHeaders[i]) - } - } else { - // only a subset headers of last segment are reverted, only the new fork should be sent - require.Len(t, lastSegment.BtcHeaders, int(forkChainLength)) - // assert the consistency of the sent fork BTC headers - for i := range lastSegment.BtcHeaders { - expectedHeight := btcTip.Height - forkChainLength + 1 + uint64(i) - require.Equal(t, btclcKeeper.GetHeaderByHeight(ctx, expectedHeight), lastSegment.BtcHeaders[i]) - } - } - }) -} diff --git a/x/zoneconcierge/keeper/keeper.go b/x/zoneconcierge/keeper/keeper.go deleted file mode 100644 index 82fee2b4..00000000 --- a/x/zoneconcierge/keeper/keeper.go +++ /dev/null @@ -1,121 +0,0 @@ -package keeper - -import ( - "context" - - corestoretypes "cosmossdk.io/core/store" - "cosmossdk.io/log" - storetypes "cosmossdk.io/store/types" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - "github.com/cosmos/cosmos-sdk/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - capabilitytypes "github.com/cosmos/ibc-go/modules/capability/types" - host "github.com/cosmos/ibc-go/v8/modules/core/24-host" - ibcexported "github.com/cosmos/ibc-go/v8/modules/core/exported" -) - -type ( - Keeper struct { - cdc codec.BinaryCodec - storeService corestoretypes.KVStoreService - - ics4Wrapper types.ICS4Wrapper - clientKeeper types.ClientKeeper - channelKeeper types.ChannelKeeper - portKeeper types.PortKeeper - authKeeper types.AccountKeeper - bankKeeper types.BankKeeper - btclcKeeper types.BTCLightClientKeeper - checkpointingKeeper types.CheckpointingKeeper - btccKeeper types.BtcCheckpointKeeper - epochingKeeper types.EpochingKeeper - storeQuerier storetypes.Queryable - scopedKeeper types.ScopedKeeper - // the address capable of executing a MsgUpdateParams message. Typically, this - // should be the x/gov module account. - authority string - } -) - -func NewKeeper( - cdc codec.BinaryCodec, - storeService corestoretypes.KVStoreService, - ics4Wrapper types.ICS4Wrapper, - clientKeeper types.ClientKeeper, - channelKeeper types.ChannelKeeper, - portKeeper types.PortKeeper, - authKeeper types.AccountKeeper, - bankKeeper types.BankKeeper, - btclcKeeper types.BTCLightClientKeeper, - checkpointingKeeper types.CheckpointingKeeper, - btccKeeper types.BtcCheckpointKeeper, - epochingKeeper types.EpochingKeeper, - storeQuerier storetypes.Queryable, - scopedKeeper types.ScopedKeeper, - authority string, -) *Keeper { - return &Keeper{ - cdc: cdc, - storeService: storeService, - ics4Wrapper: ics4Wrapper, - clientKeeper: clientKeeper, - channelKeeper: channelKeeper, - portKeeper: portKeeper, - authKeeper: authKeeper, - bankKeeper: bankKeeper, - btclcKeeper: btclcKeeper, - checkpointingKeeper: checkpointingKeeper, - btccKeeper: btccKeeper, - epochingKeeper: epochingKeeper, - storeQuerier: storeQuerier, - scopedKeeper: scopedKeeper, - authority: authority, - } -} - -// Logger returns a module-specific logger. -func (k Keeper) Logger(ctx sdk.Context) log.Logger { - return ctx.Logger().With("module", "x/"+ibcexported.ModuleName+"-"+types.ModuleName) -} - -// IsBound checks if the transfer module is already bound to the desired port -func (k Keeper) IsBound(ctx sdk.Context, portID string) bool { - _, ok := k.scopedKeeper.GetCapability(ctx, host.PortPath(portID)) - return ok -} - -// BindPort defines a wrapper function for the ort Keeper's function in -// order to expose it to module's InitGenesis function -func (k Keeper) BindPort(ctx sdk.Context, portID string) error { - cap := k.portKeeper.BindPort(ctx, portID) - return k.ClaimCapability(ctx, cap, host.PortPath(portID)) -} - -// GetPort returns the portID for the transfer module. Used in ExportGenesis -func (k Keeper) GetPort(ctx context.Context) string { - store := k.storeService.OpenKVStore(ctx) - port, err := store.Get(types.PortKey) - if err != nil { - panic(err) - } - return string(port) -} - -// SetPort sets the portID for the transfer module. Used in InitGenesis -func (k Keeper) SetPort(ctx context.Context, portID string) { - store := k.storeService.OpenKVStore(ctx) - if err := store.Set(types.PortKey, []byte(portID)); err != nil { - panic(err) - } -} - -// AuthenticateCapability wraps the scopedKeeper's AuthenticateCapability function -func (k Keeper) AuthenticateCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) bool { - return k.scopedKeeper.AuthenticateCapability(ctx, cap, name) -} - -// ClaimCapability allows the transfer module that can claim a capability that IBC module -// passes to it -func (k Keeper) ClaimCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) error { - return k.scopedKeeper.ClaimCapability(ctx, cap, name) -} diff --git a/x/zoneconcierge/keeper/keeper_test.go b/x/zoneconcierge/keeper/keeper_test.go deleted file mode 100644 index a552219b..00000000 --- a/x/zoneconcierge/keeper/keeper_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package keeper_test - -import ( - "context" - "math/rand" - - ibctmtypes "github.com/cosmos/ibc-go/v8/modules/light-clients/07-tendermint" - - "github.com/babylonlabs-io/babylon/testutil/datagen" - zckeeper "github.com/babylonlabs-io/babylon/x/zoneconcierge/keeper" -) - -// SimulateNewHeaders generates a non-zero number of canonical headers -func SimulateNewHeaders(ctx context.Context, r *rand.Rand, k *zckeeper.Keeper, consumerID string, startHeight uint64, numHeaders uint64) []*ibctmtypes.Header { - headers := []*ibctmtypes.Header{} - // invoke the hook a number of times to simulate a number of blocks - for i := uint64(0); i < numHeaders; i++ { - header := datagen.GenRandomIBCTMHeader(r, startHeight+i) - k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.NewZCHeaderInfo(header, consumerID), false) - headers = append(headers, header) - } - return headers -} - -// SimulateNewHeadersAndForks generates a random non-zero number of canonical headers and fork headers -func SimulateNewHeadersAndForks(ctx context.Context, r *rand.Rand, k *zckeeper.Keeper, consumerID string, startHeight uint64, numHeaders uint64, numForkHeaders uint64) ([]*ibctmtypes.Header, []*ibctmtypes.Header) { - headers := []*ibctmtypes.Header{} - // invoke the hook a number of times to simulate a number of blocks - for i := uint64(0); i < numHeaders; i++ { - header := datagen.GenRandomIBCTMHeader(r, startHeight+i) - k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.NewZCHeaderInfo(header, consumerID), false) - headers = append(headers, header) - } - - // generate a number of fork headers - forkHeaders := []*ibctmtypes.Header{} - for i := uint64(0); i < numForkHeaders; i++ { - header := datagen.GenRandomIBCTMHeader(r, startHeight+numHeaders-1) - k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.NewZCHeaderInfo(header, consumerID), true) - forkHeaders = append(forkHeaders, header) - } - return headers, forkHeaders -} diff --git a/x/zoneconcierge/keeper/msg_server.go b/x/zoneconcierge/keeper/msg_server.go deleted file mode 100644 index 26d5e9c9..00000000 --- a/x/zoneconcierge/keeper/msg_server.go +++ /dev/null @@ -1,39 +0,0 @@ -package keeper - -import ( - "context" - - errorsmod "cosmossdk.io/errors" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - sdk "github.com/cosmos/cosmos-sdk/types" - govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" -) - -type msgServer struct { - Keeper -} - -// NewMsgServerImpl returns an implementation of the MsgServer interface -// for the provided Keeper. -func NewMsgServerImpl(keeper Keeper) types.MsgServer { - return &msgServer{Keeper: keeper} -} - -var _ types.MsgServer = msgServer{} - -// UpdateParams updates the params -func (ms msgServer) UpdateParams(goCtx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { - if ms.authority != req.Authority { - return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", ms.authority, req.Authority) - } - if err := req.Params.Validate(); err != nil { - return nil, govtypes.ErrInvalidProposalMsg.Wrapf("invalid parameter: %v", err) - } - - ctx := sdk.UnwrapSDKContext(goCtx) - if err := ms.SetParams(ctx, req.Params); err != nil { - return nil, err - } - - return &types.MsgUpdateParamsResponse{}, nil -} diff --git a/x/zoneconcierge/keeper/msg_server_test.go b/x/zoneconcierge/keeper/msg_server_test.go deleted file mode 100644 index 94292649..00000000 --- a/x/zoneconcierge/keeper/msg_server_test.go +++ /dev/null @@ -1 +0,0 @@ -package keeper_test diff --git a/x/zoneconcierge/keeper/params.go b/x/zoneconcierge/keeper/params.go deleted file mode 100644 index f9661e57..00000000 --- a/x/zoneconcierge/keeper/params.go +++ /dev/null @@ -1,36 +0,0 @@ -package keeper - -import ( - "context" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -// SetParams sets the x/zoneconcierge module parameters. -func (k Keeper) SetParams(ctx context.Context, p types.Params) error { - if err := p.Validate(); err != nil { - return err - } - - store := k.storeService.OpenKVStore(ctx) - bz := k.cdc.MustMarshal(&p) - if err := store.Set(types.ParamsKey, bz); err != nil { - panic(err) - } - - return nil -} - -// GetParams returns the current x/zoneconcierge module parameters. -func (k Keeper) GetParams(ctx context.Context) (p types.Params) { - store := k.storeService.OpenKVStore(ctx) - bz, err := store.Get(types.ParamsKey) - if err != nil { - panic(err) - } - if bz == nil { - return p - } - - k.cdc.MustUnmarshal(bz, &p) - return p -} diff --git a/x/zoneconcierge/keeper/params_test.go b/x/zoneconcierge/keeper/params_test.go deleted file mode 100644 index a35f8116..00000000 --- a/x/zoneconcierge/keeper/params_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package keeper_test - -import ( - "testing" - - testkeeper "github.com/babylonlabs-io/babylon/testutil/keeper" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - "github.com/stretchr/testify/require" -) - -func TestGetParams(t *testing.T) { - k, ctx := testkeeper.ZoneConciergeKeeper(t, nil, nil, nil, nil) - params := types.DefaultParams() - - if err := k.SetParams(ctx, params); err != nil { - panic(err) - } - - require.EqualValues(t, params, k.GetParams(ctx)) -} diff --git a/x/zoneconcierge/keeper/proof_btc_timestamp.go b/x/zoneconcierge/keeper/proof_btc_timestamp.go deleted file mode 100644 index 5e9924ef..00000000 --- a/x/zoneconcierge/keeper/proof_btc_timestamp.go +++ /dev/null @@ -1,129 +0,0 @@ -package keeper - -import ( - "context" - "fmt" - - cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - - btcctypes "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - checkpointingtypes "github.com/babylonlabs-io/babylon/x/checkpointing/types" - epochingtypes "github.com/babylonlabs-io/babylon/x/epoching/types" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -func (k Keeper) ProveCZHeaderInEpoch(_ context.Context, header *types.IndexedHeader, epoch *epochingtypes.Epoch) (*cmtcrypto.ProofOps, error) { - czHeaderKey := types.GetCZHeaderKey(header.ConsumerId, header.Height) - _, _, proof, err := k.QueryStore(types.StoreKey, czHeaderKey, int64(epoch.GetSealerBlockHeight())) - if err != nil { - return nil, err - } - - return proof, nil -} - -func (k Keeper) ProveEpochInfo(epoch *epochingtypes.Epoch) (*cmtcrypto.ProofOps, error) { - epochInfoKey := types.GetEpochInfoKey(epoch.EpochNumber) - _, _, proof, err := k.QueryStore(epochingtypes.StoreKey, epochInfoKey, int64(epoch.GetSealerBlockHeight())) - if err != nil { - return nil, err - } - - return proof, nil -} - -func (k Keeper) ProveValSet(epoch *epochingtypes.Epoch) (*cmtcrypto.ProofOps, error) { - valSetKey := types.GetValSetKey(epoch.EpochNumber) - _, _, proof, err := k.QueryStore(checkpointingtypes.StoreKey, valSetKey, int64(epoch.GetSealerBlockHeight())) - if err != nil { - return nil, err - } - return proof, nil -} - -// ProveEpochSealed proves an epoch has been sealed, i.e., -// - the epoch's validator set has a valid multisig over the sealer header -// - the epoch's validator set is committed to the sealer header's app_hash -// - the epoch's metadata is committed to the sealer header's app_hash -func (k Keeper) ProveEpochSealed(ctx context.Context, epochNumber uint64) (*types.ProofEpochSealed, error) { - var ( - proof = &types.ProofEpochSealed{} - err error - ) - - // get the validator set of the sealed epoch - proof.ValidatorSet, err = k.checkpointingKeeper.GetBLSPubKeySet(ctx, epochNumber) - if err != nil { - return nil, err - } - - // get sealer header and the query height - epoch, err := k.epochingKeeper.GetHistoricalEpoch(ctx, epochNumber) - if err != nil { - return nil, err - } - - // proof of inclusion for epoch metadata in sealer header - proof.ProofEpochInfo, err = k.ProveEpochInfo(epoch) - if err != nil { - return nil, err - } - - // proof of inclusion for validator set in sealer header - proof.ProofEpochValSet, err = k.ProveValSet(epoch) - if err != nil { - return nil, err - } - - return proof, nil -} - -// ProveEpochSubmitted generates proof that the epoch's checkpoint is submitted to BTC -// i.e., the two `TransactionInfo`s for the checkpoint -func (k Keeper) ProveEpochSubmitted(ctx context.Context, sk *btcctypes.SubmissionKey) ([]*btcctypes.TransactionInfo, error) { - bestSubmissionData := k.btccKeeper.GetSubmissionData(ctx, *sk) - if bestSubmissionData == nil { - return nil, fmt.Errorf("the best submission key for epoch %d has no submission data", bestSubmissionData.Epoch) - } - return bestSubmissionData.TxsInfo, nil -} - -// proveFinalizedChainInfo generates proofs that a chainInfo has been finalised by the given epoch with epochInfo -// It includes proofTxInBlock, proofHeaderInEpoch, proofEpochSealed and proofEpochSubmitted -// The proofs can be verified by a verifier with access to a BTC and Babylon light client -// CONTRACT: this is only a private helper function for simplifying the implementation of RPC calls -func (k Keeper) proveFinalizedChainInfo( - ctx context.Context, - chainInfo *types.ChainInfo, - epochInfo *epochingtypes.Epoch, - bestSubmissionKey *btcctypes.SubmissionKey, -) (*types.ProofFinalizedChainInfo, error) { - var ( - err error - proof = &types.ProofFinalizedChainInfo{} - ) - - // Proof that the CZ header is timestamped in epoch - proof.ProofCzHeaderInEpoch, err = k.ProveCZHeaderInEpoch(ctx, chainInfo.LatestHeader, epochInfo) - if err != nil { - return nil, err - } - - // proof that the epoch is sealed - proof.ProofEpochSealed, err = k.ProveEpochSealed(ctx, epochInfo.EpochNumber) - if err != nil { - return nil, err - } - - // proof that the epoch's checkpoint is submitted to BTC - // i.e., the two `TransactionInfo`s for the checkpoint - proof.ProofEpochSubmitted, err = k.ProveEpochSubmitted(ctx, bestSubmissionKey) - if err != nil { - // The only error in ProveEpochSubmitted is the nil bestSubmission. - // Since the epoch w.r.t. the bestSubmissionKey is finalised, this - // can only be a programming error, so we should panic here. - panic(err) - } - - return proof, nil -} diff --git a/x/zoneconcierge/keeper/proof_btc_timestamp_test.go b/x/zoneconcierge/keeper/proof_btc_timestamp_test.go deleted file mode 100644 index 97ef52fb..00000000 --- a/x/zoneconcierge/keeper/proof_btc_timestamp_test.go +++ /dev/null @@ -1,292 +0,0 @@ -package keeper_test - -import ( - "encoding/hex" - "math/rand" - "testing" - - "github.com/boljen/go-bitmap" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/wire" - - "github.com/babylonlabs-io/babylon/crypto/bls12381" - "github.com/babylonlabs-io/babylon/testutil/datagen" - testhelper "github.com/babylonlabs-io/babylon/testutil/helper" - testkeeper "github.com/babylonlabs-io/babylon/testutil/keeper" - btcctypes "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - checkpointingtypes "github.com/babylonlabs-io/babylon/x/checkpointing/types" - zctypes "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -func FuzzProofCZHeaderInEpoch(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - h := testhelper.NewHelper(t) - ek := h.App.EpochingKeeper - zck := h.App.ZoneConciergeKeeper - var err error - - // chain is at height 1 thus epoch 1 - - // enter the 1st block of epoch 2 - epochInterval := ek.GetParams(h.Ctx).EpochInterval - for j := 0; j < int(epochInterval)-1; j++ { - h.Ctx, err = h.ApplyEmptyBlockWithVoteExtension(r) - h.NoError(err) - } - - // handle a random header from a random consumer chain - consumerID := datagen.GenRandomHexStr(r, 10) - height := datagen.RandomInt(r, 100) + 1 - ibctmHeader := datagen.GenRandomIBCTMHeader(r, height) - headerInfo := datagen.NewZCHeaderInfo(ibctmHeader, consumerID) - zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, false) - - // ensure the header is successfully inserted - indexedHeader, err := zck.GetHeader(h.Ctx, consumerID, height) - h.NoError(err) - - // enter the 1st block of the next epoch - for j := 0; j < int(epochInterval); j++ { - h.Ctx, err = h.ApplyEmptyBlockWithVoteExtension(r) - h.NoError(err) - } - - epochWithHeader, err := ek.GetHistoricalEpoch(h.Ctx, indexedHeader.BabylonEpoch) - h.NoError(err) - - // generate inclusion proof - proof, err := zck.ProveCZHeaderInEpoch(h.Ctx, indexedHeader, epochWithHeader) - h.NoError(err) - - // verify the inclusion proof - err = zctypes.VerifyCZHeaderInEpoch(indexedHeader, epochWithHeader, proof) - h.NoError(err) - }) -} - -func signBLSWithBitmap(blsSKs []bls12381.PrivateKey, bm bitmap.Bitmap, msg []byte) (bls12381.Signature, error) { - sigs := []bls12381.Signature{} - for i := 0; i < len(blsSKs); i++ { - if bitmap.Get(bm, i) { - sig := bls12381.Sign(blsSKs[i], msg) - sigs = append(sigs, sig) - } - } - return bls12381.AggrSigList(sigs) -} - -// FuzzProofEpochSealed fuzz tests the prover and verifier of ProofEpochSealed -// Process: -// 1. Generate a random epoch that has a legitimate-looking SealerHeader -// 2. Generate a random validator set with BLS PKs -// 3. Generate a BLS multisig with >2/3 random validators of the validator set -// 4. Generate a checkpoint based on the above validator subset and the above sealer header -// 5. Execute ProveEpochSealed where the mocked checkpointing keeper produces the above validator set -// 6. Execute VerifyEpochSealed with above epoch, checkpoint and proof, and assert the outcome to be true -// -// Tested property: proof is valid only when -// - BLS sig in proof is valid -func FuzzProofEpochSealed_BLSSig(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // generate a random epoch - epoch := datagen.GenRandomEpoch(r) - - // generate a random validator set with 100 validators - numVals := 100 - valSet, blsSKs := datagen.GenerateValidatorSetWithBLSPrivKeys(numVals) - - // sample a validator subset, which may or may not reach a quorum - bm, numSubSet := datagen.GenRandomBitmap(r) - _, subsetPower, err := valSet.FindSubsetWithPowerSum(bm) - require.NoError(t, err) - - // construct the rawCkpt - // Note that the BlsMultiSig will be generated and assigned later - blockHash := checkpointingtypes.BlockHash(epoch.SealerBlockHash) - rawCkpt := &checkpointingtypes.RawCheckpoint{ - EpochNum: epoch.EpochNumber, - BlockHash: &blockHash, - Bitmap: bm, - BlsMultiSig: nil, - } - - // let the subset generate a BLS multisig over sealer header's app_hash - multiSig, err := signBLSWithBitmap(blsSKs, bm, rawCkpt.SignedMsg()) - require.NoError(t, err) - // assign multiSig to rawCkpt - rawCkpt.BlsMultiSig = &multiSig - - // mock checkpointing keeper that produces the expected validator set - checkpointingKeeper := zctypes.NewMockCheckpointingKeeper(ctrl) - checkpointingKeeper.EXPECT().GetBLSPubKeySet(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return(valSet.ValSet, nil).AnyTimes() - // mock epoching keeper - epochingKeeper := zctypes.NewMockEpochingKeeper(ctrl) - epochingKeeper.EXPECT().GetEpoch(gomock.Any()).Return(epoch).AnyTimes() - epochingKeeper.EXPECT().GetHistoricalEpoch(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return(epoch, nil).AnyTimes() - // create zcKeeper and ctx - zcKeeper, ctx := testkeeper.ZoneConciergeKeeper(t, nil, checkpointingKeeper, nil, epochingKeeper) - - // prove - proof, err := zcKeeper.ProveEpochSealed(ctx, epoch.EpochNumber) - require.NoError(t, err) - // verify - err = zctypes.VerifyEpochSealed(epoch, rawCkpt, proof) - - if subsetPower*3 <= valSet.GetTotalPower()*2 { // BLS sig does not reach a quorum - require.LessOrEqual(t, numSubSet*3, numVals*2) - require.Error(t, err) - require.NotErrorIs(t, err, zctypes.ErrInvalidMerkleProof) - } else { // BLS sig has a valid quorum - require.Greater(t, numSubSet*3, numVals*2) - require.Error(t, err) - require.ErrorIs(t, err, zctypes.ErrInvalidMerkleProof) - } - }) -} - -func FuzzProofEpochSealed_Epoch(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - h := testhelper.NewHelper(t) - ek := h.App.EpochingKeeper - zck := h.App.ZoneConciergeKeeper - var err error - - // chain is at height 1 - - // enter the 1st block of a random epoch - epochInterval := ek.GetParams(h.Ctx).EpochInterval - newEpochs := datagen.RandomInt(r, 10) + 2 - for i := 0; i < int(newEpochs); i++ { - for j := 0; j < int(epochInterval); j++ { - h.Ctx, err = h.ApplyEmptyBlockWithVoteExtension(r) - h.NoError(err) - } - } - - // prove the inclusion of last epoch - lastEpochNumber := ek.GetEpoch(h.Ctx).EpochNumber - 1 - h.NoError(err) - lastEpoch, err := ek.GetHistoricalEpoch(h.Ctx, lastEpochNumber) - h.NoError(err) - proof, err := zck.ProveEpochInfo(lastEpoch) - h.NoError(err) - - // verify inclusion proof - err = zctypes.VerifyEpochInfo(lastEpoch, proof) - h.NoError(err) - }) -} - -func FuzzProofEpochSealed_ValSet(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - // generate the validator set with 10 validators as genesis - genesisValSet, privSigner, err := datagen.GenesisValidatorSetWithPrivSigner(10) - require.NoError(t, err) - h := testhelper.NewHelperWithValSet(t, genesisValSet, privSigner) - ek := h.App.EpochingKeeper - ck := h.App.CheckpointingKeeper - zck := h.App.ZoneConciergeKeeper - - // chain is at height 1 - // enter the 1st block of a random epoch - epochInterval := ek.GetParams(h.Ctx).EpochInterval - newEpochs := datagen.RandomInt(r, 10) + 2 - for i := 0; i < int(newEpochs); i++ { - for j := 0; j < int(epochInterval); j++ { - _, err = h.ApplyEmptyBlockWithVoteExtension(r) - h.NoError(err) - } - } - - // seal the last epoch at the 2nd block of the current epoch - h.Ctx, err = h.ApplyEmptyBlockWithVoteExtension(r) - h.NoError(err) - - // prove the inclusion of last epoch - lastEpochNumber := ek.GetEpoch(h.Ctx).EpochNumber - 1 - h.NoError(err) - lastEpoch, err := ek.GetHistoricalEpoch(h.Ctx, lastEpochNumber) - h.NoError(err) - lastEpochValSet := ck.GetValidatorBlsKeySet(h.Ctx, lastEpochNumber) - proof, err := zck.ProveValSet(lastEpoch) - h.NoError(err) - - // verify inclusion proof - err = zctypes.VerifyValSet(lastEpoch, lastEpochValSet, proof) - h.NoError(err) - }) -} - -func FuzzProofEpochSubmitted(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - - // generate random epoch, random rawBtcCkpt and random rawCkpt - epoch := datagen.GenRandomEpoch(r) - rawBtcCkpt := datagen.GetRandomRawBtcCheckpoint(r) - rawBtcCkpt.Epoch = epoch.EpochNumber - rawCkpt, err := checkpointingtypes.FromBTCCkptToRawCkpt(rawBtcCkpt) - require.NoError(t, err) - - // encode ckpt to BTC txs in BTC blocks - testRawCkptData := datagen.EncodeRawCkptToTestData(rawBtcCkpt) - idxs := []uint64{datagen.RandomInt(r, 5) + 1, datagen.RandomInt(r, 5) + 1} - offsets := []uint64{datagen.RandomInt(r, 5) + 1, datagen.RandomInt(r, 5) + 1} - btcBlocks := []*datagen.BlockCreationResult{ - datagen.CreateBlock(r, 1, uint32(idxs[0]+offsets[0]), uint32(idxs[0]), testRawCkptData.FirstPart), - datagen.CreateBlock(r, 2, uint32(idxs[1]+offsets[1]), uint32(idxs[1]), testRawCkptData.SecondPart), - } - // create MsgInsertBtcSpvProof for the rawCkpt - msgInsertBtcSpvProof := datagen.GenerateMessageWithRandomSubmitter([]*datagen.BlockCreationResult{btcBlocks[0], btcBlocks[1]}) - - // get headers for verification - btcHeaders := []*wire.BlockHeader{ - btcBlocks[0].HeaderBytes.ToBlockHeader(), - btcBlocks[1].HeaderBytes.ToBlockHeader(), - } - - // get 2 tx info for the ckpt parts - txsInfo := []*btcctypes.TransactionInfo{ - { - Key: &btcctypes.TransactionKey{Index: uint32(idxs[0]), Hash: btcBlocks[0].HeaderBytes.Hash()}, - Transaction: msgInsertBtcSpvProof.Proofs[0].BtcTransaction, - Proof: msgInsertBtcSpvProof.Proofs[0].MerkleNodes, - }, - { - Key: &btcctypes.TransactionKey{Index: uint32(idxs[1]), Hash: btcBlocks[1].HeaderBytes.Hash()}, - Transaction: msgInsertBtcSpvProof.Proofs[1].BtcTransaction, - Proof: msgInsertBtcSpvProof.Proofs[1].MerkleNodes, - }, - } - - // net param, babylonTag - powLimit := chaincfg.SimNetParams.PowLimit - babylonTag := btcctypes.DefaultCheckpointTag - tagAsBytes, _ := hex.DecodeString(babylonTag) - - // verify - err = zctypes.VerifyEpochSubmitted(rawCkpt, txsInfo, btcHeaders, powLimit, tagAsBytes) - require.NoError(t, err) - }) -} diff --git a/x/zoneconcierge/keeper/query_kvstore.go b/x/zoneconcierge/keeper/query_kvstore.go deleted file mode 100644 index 6f2568f4..00000000 --- a/x/zoneconcierge/keeper/query_kvstore.go +++ /dev/null @@ -1,40 +0,0 @@ -package keeper - -import ( - "fmt" - - storetypes "cosmossdk.io/store/types" - cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" -) - -// QueryStore queries a KV pair in the KVStore, where -// - moduleStoreKey is the store key of a module, e.g., zctypes.StoreKey -// - key is the key of the queried KV pair, including the prefix, e.g., zctypes.EpochChainInfoKey || consumerID in the chain info store -// and returns -// - key of this KV pair -// - value of this KV pair -// - Merkle proof of this KV pair -// - error -// (adapted from https://github.com/cosmos/cosmos-sdk/blob/v0.46.6/baseapp/abci.go#L774-L795) -func (k Keeper) QueryStore(moduleStoreKey string, key []byte, queryHeight int64) ([]byte, []byte, *cmtcrypto.ProofOps, error) { - // construct the query path for ABCI query - // since we are querying the DB directly, the path will not need prefix "/store" as done in ABCIQuery - // Instead, it will be formed as "//key", e.g., "/epoching/key" - path := fmt.Sprintf("/%s/key", moduleStoreKey) - - // query the KV with Merkle proof - resp, err := k.storeQuerier.Query(&storetypes.RequestQuery{ - Path: path, - Data: key, - Height: queryHeight - 1, // NOTE: the inclusion proof corresponds to the NEXT header - Prove: true, - }) - if err != nil { - return nil, nil, nil, err - } - if resp.Code != 0 { - return nil, nil, nil, fmt.Errorf("query (with path %s) failed with response: %v", path, resp) - } - - return resp.Key, resp.Value, resp.ProofOps, nil -} diff --git a/x/zoneconcierge/module.go b/x/zoneconcierge/module.go deleted file mode 100644 index 5fcc64a1..00000000 --- a/x/zoneconcierge/module.go +++ /dev/null @@ -1,163 +0,0 @@ -package zoneconcierge - -import ( - "context" - "cosmossdk.io/core/appmodule" - "encoding/json" - "fmt" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/spf13/cobra" - - abci "github.com/cometbft/cometbft/abci/types" - - "github.com/babylonlabs-io/babylon/x/zoneconcierge/client/cli" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/keeper" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - porttypes "github.com/cosmos/ibc-go/v8/modules/core/05-port/types" -) - -var ( - _ appmodule.AppModule = AppModule{} - _ appmodule.HasBeginBlocker = AppModule{} - _ module.HasABCIEndBlock = AppModule{} - _ module.AppModuleBasic = AppModuleBasic{} - _ porttypes.IBCModule = IBCModule{} -) - -// ---------------------------------------------------------------------------- -// AppModuleBasic -// ---------------------------------------------------------------------------- - -// AppModuleBasic implements the AppModuleBasic interface that defines the independent methods a Cosmos SDK module needs to implement. -type AppModuleBasic struct { - cdc codec.BinaryCodec -} - -func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { - return AppModuleBasic{cdc: cdc} -} - -// Name returns the name of the module as a string -func (AppModuleBasic) Name() string { - return types.ModuleName -} - -// RegisterLegacyAminoCodec registers the amino codec for the module, which is used to marshal and unmarshal structs to/from []byte in order to persist them in the module's KVStore -func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - types.RegisterCodec(cdc) -} - -// RegisterInterfaces registers a module's interface types and their concrete implementations as proto.Message -func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { - types.RegisterInterfaces(reg) -} - -// DefaultGenesis returns a default GenesisState for the module, marshalled to json.RawMessage. The default GenesisState need to be defined by the module developer and is primarily used for testing -func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { - return cdc.MustMarshalJSON(types.DefaultGenesis()) -} - -// ValidateGenesis used to validate the GenesisState, given in its json.RawMessage form -func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error { - var genState types.GenesisState - if err := cdc.UnmarshalJSON(bz, &genState); err != nil { - return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) - } - return genState.Validate() -} - -// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module -func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { - types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) //nolint:errcheck // either we propogate the error up the stack, or don't check here. -} - -// GetTxCmd returns the root Tx command for the module. The subcommands of this root command are used by end-users to generate new transactions containing messages defined in the module -func (a AppModuleBasic) GetTxCmd() *cobra.Command { - return cli.GetTxCmd() -} - -// GetQueryCmd returns the root query command for the module. The subcommands of this root command are used by end-users to generate new queries to the subset of the state defined by the module -func (AppModuleBasic) GetQueryCmd() *cobra.Command { - return cli.GetQueryCmd(types.StoreKey) -} - -// ---------------------------------------------------------------------------- -// AppModule -// ---------------------------------------------------------------------------- - -// AppModule implements the AppModule interface that defines the inter-dependent methods that modules need to implement -type AppModule struct { - AppModuleBasic - - keeper keeper.Keeper - accountKeeper types.AccountKeeper - bankKeeper types.BankKeeper -} - -func NewAppModule( - cdc codec.Codec, - keeper keeper.Keeper, - accountKeeper types.AccountKeeper, - bankKeeper types.BankKeeper, -) AppModule { - return AppModule{ - AppModuleBasic: NewAppModuleBasic(cdc), - keeper: keeper, - accountKeeper: accountKeeper, - bankKeeper: bankKeeper, - } -} - -// Deprecated: use RegisterServices -func (AppModule) QuerierRoute() string { return types.RouterKey } - -// RegisterServices registers a gRPC query service to respond to the module-specific gRPC queries -func (am AppModule) RegisterServices(cfg module.Configurator) { - types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) - types.RegisterQueryServer(cfg.QueryServer(), am.keeper) -} - -// RegisterInvariants registers the invariants of the module. If an invariant deviates from its predicted value, the InvariantRegistry triggers appropriate logic (most often the chain will be halted) -func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} - -// InitGenesis performs the module's genesis initialization. It returns no validator updates. -func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) { - var genState types.GenesisState - // Initialize global index to index in genesis state - cdc.MustUnmarshalJSON(gs, &genState) - - InitGenesis(ctx, am.keeper, genState) -} - -// ExportGenesis returns the module's exported genesis state as raw JSON bytes. -func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { - genState := ExportGenesis(ctx, am.keeper) - return cdc.MustMarshalJSON(genState) -} - -// ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 -func (AppModule) ConsensusVersion() uint64 { return 1 } - -// BeginBlock contains the logic that is automatically triggered at the beginning of each block -func (am AppModule) BeginBlock(ctx context.Context) error { - return BeginBlocker(ctx, am.keeper) -} - -// EndBlock contains the logic that is automatically triggered at the end of each block -func (am AppModule) EndBlock(ctx context.Context) ([]abci.ValidatorUpdate, error) { - return EndBlocker(ctx, am.keeper) -} - -// IsOnePerModuleType implements the depinject.OnePerModuleType interface. -func (am AppModule) IsOnePerModuleType() { // marker -} - -// IsAppModule implements the appmodule.AppModule interface. -func (am AppModule) IsAppModule() { // marker -} diff --git a/x/zoneconcierge/module_ibc.go b/x/zoneconcierge/module_ibc.go deleted file mode 100644 index 1484bc32..00000000 --- a/x/zoneconcierge/module_ibc.go +++ /dev/null @@ -1,228 +0,0 @@ -package zoneconcierge - -import ( - errorsmod "cosmossdk.io/errors" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/keeper" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - capabilitytypes "github.com/cosmos/ibc-go/modules/capability/types" - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" - porttypes "github.com/cosmos/ibc-go/v8/modules/core/05-port/types" - host "github.com/cosmos/ibc-go/v8/modules/core/24-host" - ibcexported "github.com/cosmos/ibc-go/v8/modules/core/exported" -) - -type IBCModule struct { - keeper keeper.Keeper -} - -func NewIBCModule(k keeper.Keeper) IBCModule { - return IBCModule{ - keeper: k, - } -} - -// OnChanOpenInit implements the IBCModule interface -func (im IBCModule) OnChanOpenInit( - ctx sdk.Context, - order channeltypes.Order, - connectionHops []string, - portID string, - channelID string, - chanCap *capabilitytypes.Capability, - counterparty channeltypes.Counterparty, - version string, -) (string, error) { - // the IBC channel has to be ordered - if order != channeltypes.ORDERED { - return "", errorsmod.Wrapf(channeltypes.ErrInvalidChannelOrdering, "expected %s channel, got %s ", channeltypes.ORDERED, order) - } - - // Require portID to be the one that ZoneConcierge is bound to - boundPort := im.keeper.GetPort(ctx) - if boundPort != portID { - return "", errorsmod.Wrapf(porttypes.ErrInvalidPort, "invalid port: %s, expected %s", portID, boundPort) - } - - // ensure consistency of the protocol version - if version != types.Version { - return "", errorsmod.Wrapf(types.ErrInvalidVersion, "got %s, expected %s", version, types.Version) - } - - // Claim channel capability passed back by IBC module - if err := im.keeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil { - return "", err - } - - return version, nil -} - -// OnChanOpenTry implements the IBCModule interface -func (im IBCModule) OnChanOpenTry( - ctx sdk.Context, - order channeltypes.Order, - connectionHops []string, - portID, - channelID string, - chanCap *capabilitytypes.Capability, - counterparty channeltypes.Counterparty, - counterpartyVersion string, -) (string, error) { - // the IBC channel has to be ordered - if order != channeltypes.ORDERED { - return "", errorsmod.Wrapf(channeltypes.ErrInvalidChannelOrdering, "expected %s channel, got %s ", channeltypes.ORDERED, order) - } - - // Require portID to be the one that ZoneConcierge is bound to - boundPort := im.keeper.GetPort(ctx) - if boundPort != portID { - return "", errorsmod.Wrapf(porttypes.ErrInvalidPort, "invalid port: %s, expected %s", portID, boundPort) - } - - // ensure consistency of the protocol version - if counterpartyVersion != types.Version { - return "", errorsmod.Wrapf(types.ErrInvalidVersion, "invalid counterparty version: got: %s, expected %s", counterpartyVersion, types.Version) - } - - // Module may have already claimed capability in OnChanOpenInit in the case of crossing hellos - // (ie chainA and chainB both call ChanOpenInit before one of them calls ChanOpenTry) - // If module can already authenticate the capability then module already owns it so we don't need to claim - // Otherwise, module does not have channel capability and we must claim it from IBC - if !im.keeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) { - // Only claim channel capability passed back by IBC module if we do not already own it - if err := im.keeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil { - return "", err - } - } - - return types.Version, nil -} - -// OnChanOpenAck implements the IBCModule interface -func (im IBCModule) OnChanOpenAck( - ctx sdk.Context, - portID, - channelID string, - _, - counterpartyVersion string, -) error { - // check version consistency - if counterpartyVersion != types.Version { - return errorsmod.Wrapf(types.ErrInvalidVersion, "invalid counterparty version: %s, expected %s", counterpartyVersion, types.Version) - } - - return nil -} - -// OnChanOpenConfirm implements the IBCModule interface -func (im IBCModule) OnChanOpenConfirm( - ctx sdk.Context, - portID, - channelID string, -) error { - return nil -} - -// OnChanCloseInit implements the IBCModule interface -func (im IBCModule) OnChanCloseInit( - ctx sdk.Context, - portID, - channelID string, -) error { - // Disallow user-initiated channel closing for channels - return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "user cannot close channel") -} - -// OnChanCloseConfirm implements the IBCModule interface -func (im IBCModule) OnChanCloseConfirm( - ctx sdk.Context, - portID, - channelID string, -) error { - return nil -} - -// OnRecvPacket implements the IBCModule interface -func (im IBCModule) OnRecvPacket( - ctx sdk.Context, - modulePacket channeltypes.Packet, - relayer sdk.AccAddress, -) ibcexported.Acknowledgement { - // Babylon is supposed to not take any IBC packet - // NOTE: acknowledgement will be written synchronously during IBC handler execution. - return channeltypes.NewErrorAcknowledgement(errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "Babylon is supposed to not take any IBC packet")) -} - -// OnAcknowledgementPacket implements the IBCModule interface -func (im IBCModule) OnAcknowledgementPacket( - ctx sdk.Context, - modulePacket channeltypes.Packet, - acknowledgement []byte, - relayer sdk.AccAddress, -) error { - var ack channeltypes.Acknowledgement - // `x/wasm` uses both protobuf and json to encoded acknowledgement, so we need to try both here - // - for acknowledgment message with errors defined in `x/wasm`, it uses json - // - for all other acknowledgement messages, it uses protobuf - if errProto := types.ModuleCdc.Unmarshal(acknowledgement, &ack); errProto != nil { - im.keeper.Logger(ctx).Error("cannot unmarshal packet acknowledgement with protobuf", "error", errProto) - if errJson := types.ModuleCdc.UnmarshalJSON(acknowledgement, &ack); errJson != nil { - im.keeper.Logger(ctx).Error("cannot unmarshal packet acknowledgement with json", "error", errJson) - return errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal packet acknowledgement with protobuf (error: %v) or json (error: %v)", errProto, errJson) - } - } - - // // TODO (Babylon): Dispatch and process packet - // switch packet := modulePacketData.Packet.(type) { - // default: - // errMsg := fmt.Sprintf("unrecognized %s packet type: %T", types.ModuleName, packet) - // return errorsmod.Wrap(sdkerrors.ErrUnknownRequest, errMsg) - // } - - switch resp := ack.Response.(type) { - case *channeltypes.Acknowledgement_Result: - im.keeper.Logger(ctx).Info("received an Acknowledgement message", "result", string(resp.Result)) - ctx.EventManager().EmitEvent( - sdk.NewEvent( - types.EventTypeAck, - sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), - sdk.NewAttribute(types.AttributeKeyAckSuccess, string(resp.Result)), - ), - ) - case *channeltypes.Acknowledgement_Error: - im.keeper.Logger(ctx).Error("received an Acknowledgement error message", "error", resp.Error) - ctx.EventManager().EmitEvent( - sdk.NewEvent( - types.EventTypeAck, - sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), - sdk.NewAttribute(types.AttributeKeyAckError, resp.Error), - ), - ) - } - - return nil -} - -// OnTimeoutPacket implements the IBCModule interface -func (im IBCModule) OnTimeoutPacket( - ctx sdk.Context, - modulePacket channeltypes.Packet, - relayer sdk.AccAddress, -) error { - var modulePacketData types.ZoneconciergePacketData - if err := modulePacketData.Unmarshal(modulePacket.GetData()); err != nil { - return errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal packet data: %s", err.Error()) - } - - // // TODO (Babylon): Dispatch and process packet - // switch packet := modulePacketData.Packet.(type) { - // default: - // errMsg := fmt.Sprintf("unrecognized %s packet type: %T", types.ModuleName, packet) - // return errorsmod.Wrap(sdkerrors.ErrUnknownRequest, errMsg) - // } - - // TODO: close channel upon timeout - - return nil -} diff --git a/x/zoneconcierge/types/btc_timestamp.go b/x/zoneconcierge/types/btc_timestamp.go deleted file mode 100644 index de88561d..00000000 --- a/x/zoneconcierge/types/btc_timestamp.go +++ /dev/null @@ -1,337 +0,0 @@ -package types - -import ( - "context" - "fmt" - "math/big" - - errorsmod "cosmossdk.io/errors" - "github.com/btcsuite/btcd/wire" - cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - sdk "github.com/cosmos/cosmos-sdk/types" - - txformat "github.com/babylonlabs-io/babylon/btctxformatter" - "github.com/babylonlabs-io/babylon/crypto/bls12381" - bbn "github.com/babylonlabs-io/babylon/types" - btcctypes "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - btclckeeper "github.com/babylonlabs-io/babylon/x/btclightclient/keeper" - checkpointingtypes "github.com/babylonlabs-io/babylon/x/checkpointing/types" - epochingtypes "github.com/babylonlabs-io/babylon/x/epoching/types" -) - -func GetCZHeaderKey(consumerID string, height uint64) []byte { - key := CanonicalChainKey - key = append(key, []byte(consumerID)...) - key = append(key, sdk.Uint64ToBigEndian(height)...) - return key -} - -func GetEpochInfoKey(epochNumber uint64) []byte { - epochInfoKey := epochingtypes.EpochInfoKey - epochInfoKey = append(epochInfoKey, sdk.Uint64ToBigEndian(epochNumber)...) - return epochInfoKey -} - -func GetValSetKey(epochNumber uint64) []byte { - valSetKey := checkpointingtypes.ValidatorBlsKeySetPrefix - valSetKey = append(valSetKey, sdk.Uint64ToBigEndian(epochNumber)...) - return valSetKey -} - -func VerifyEpochInfo(epoch *epochingtypes.Epoch, proof *cmtcrypto.ProofOps) error { - // get the Merkle root, i.e., the BlockHash of the sealer header - root := epoch.SealerAppHash - - // Ensure The epoch medatata is committed to the app_hash of the sealer header - // NOTE: the proof is generated when sealer header is generated. At that time - // sealer header hash is not given to epoch metadata. Thus we need to clear the - // sealer header hash when verifying the proof. - epoch.SealerAppHash = []byte{} - epochBytes, err := epoch.Marshal() - if err != nil { - return err - } - epoch.SealerAppHash = root - if err := VerifyStore(root, epochingtypes.StoreKey, GetEpochInfoKey(epoch.EpochNumber), epochBytes, proof); err != nil { - return errorsmod.Wrapf(ErrInvalidMerkleProof, "invalid inclusion proof for epoch metadata: %v", err) - } - - return nil -} - -func VerifyValSet(epoch *epochingtypes.Epoch, valSet *checkpointingtypes.ValidatorWithBlsKeySet, proof *cmtcrypto.ProofOps) error { - valSetBytes, err := valSet.Marshal() - if err != nil { - return err - } - if err := VerifyStore(epoch.SealerAppHash, checkpointingtypes.StoreKey, GetValSetKey(epoch.EpochNumber), valSetBytes, proof); err != nil { - return errorsmod.Wrapf(ErrInvalidMerkleProof, "invalid inclusion proof for validator set: %v", err) - } - - return nil -} - -// VerifyEpochSealed verifies that the given `epoch` is sealed by the `rawCkpt` by using the given `proof` -// The verification rules include: -// - basic sanity checks -// - The raw checkpoint's BlockHash is same as the sealer_block_hash of the sealed epoch -// - More than 2/3 (in voting power) validators in the validator set of this epoch have signed sealer_block_hash of the sealed epoch -// - The epoch medatata is committed to the sealer_app_hash of the sealed epoch -// - The validator set is committed to the sealer_app_hash of the sealed epoch -func VerifyEpochSealed(epoch *epochingtypes.Epoch, rawCkpt *checkpointingtypes.RawCheckpoint, proof *ProofEpochSealed) error { - // nil check - if epoch == nil { - return fmt.Errorf("epoch is nil") - } else if rawCkpt == nil { - return fmt.Errorf("rawCkpt is nil") - } else if proof == nil { - return fmt.Errorf("proof is nil") - } - - // sanity check - if err := epoch.ValidateBasic(); err != nil { - return err - } else if err := rawCkpt.ValidateBasic(); err != nil { - return err - } else if err = proof.ValidateBasic(); err != nil { - return err - } - - // ensure epoch number is same in epoch and rawCkpt - if epoch.EpochNumber != rawCkpt.EpochNum { - return fmt.Errorf("epoch.EpochNumber (%d) is not equal to rawCkpt.EpochNum (%d)", epoch.EpochNumber, rawCkpt.EpochNum) - } - - // ensure the raw checkpoint's block_hash is same as the sealer_block_hash of the sealed epoch - // NOTE: since this proof is assembled by a Babylon node who has verified the checkpoint, - // the two blockhash values should always be the same, otherwise this Babylon node is malicious. - // This is different from the checkpoint verification rules in checkpointing, - // where a checkpoint with valid BLS multisig but different blockhashes signals a dishonest majority equivocation. - blockHashInCkpt := rawCkpt.BlockHash - blockHashInSealerHeader := checkpointingtypes.BlockHash(epoch.SealerBlockHash) - if !blockHashInCkpt.Equal(blockHashInSealerHeader) { - return fmt.Errorf("BlockHash is not same in rawCkpt (%s) and epoch's SealerHeader (%s)", blockHashInCkpt.String(), blockHashInSealerHeader.String()) - } - - /* - Ensure more than 2/3 (in voting power) validators of this epoch have signed (epoch_num || block_hash) in the raw checkpoint - */ - valSet := &checkpointingtypes.ValidatorWithBlsKeySet{ValSet: proof.ValidatorSet} - // filter validator set that contributes to the signature - signerSet, signerSetPower, err := valSet.FindSubsetWithPowerSum(rawCkpt.Bitmap) - if err != nil { - return err - } - // ensure the signerSet has > 2/3 voting power - if signerSetPower*3 <= valSet.GetTotalPower()*2 { - return checkpointingtypes.ErrInsufficientVotingPower - } - // verify BLS multisig - signedMsgBytes := rawCkpt.SignedMsg() - ok, err := bls12381.VerifyMultiSig(*rawCkpt.BlsMultiSig, signerSet.GetBLSKeySet(), signedMsgBytes) - if err != nil { - return err - } - if !ok { - return fmt.Errorf("BLS signature does not match the public key") - } - - // Ensure The epoch medatata is committed to the app_hash of the sealer header - if err := VerifyEpochInfo(epoch, proof.ProofEpochInfo); err != nil { - return err - } - - // Ensure The validator set is committed to the app_hash of the sealer header - if err := VerifyValSet(epoch, valSet, proof.ProofEpochValSet); err != nil { - return err - } - - return nil -} - -func VerifyCZHeaderInEpoch(header *IndexedHeader, epoch *epochingtypes.Epoch, proof *cmtcrypto.ProofOps) error { - // nil check - if header == nil { - return fmt.Errorf("header is nil") - } else if epoch == nil { - return fmt.Errorf("epoch is nil") - } else if proof == nil { - return fmt.Errorf("proof is nil") - } - - // sanity check - if err := header.ValidateBasic(); err != nil { - return err - } else if err := epoch.ValidateBasic(); err != nil { - return err - } - - // ensure epoch number is same in epoch and CZ header - if epoch.EpochNumber != header.BabylonEpoch { - return fmt.Errorf("epoch.EpochNumber (%d) is not equal to header.BabylonEpoch (%d)", epoch.EpochNumber, header.BabylonEpoch) - } - - // get the Merkle root, i.e., the BlockHash of the sealer header - root := epoch.SealerAppHash - - // Ensure The header is committed to the BlockHash of the sealer header - headerBytes, err := header.Marshal() - if err != nil { - return err - } - - if err := VerifyStore(root, StoreKey, GetCZHeaderKey(header.ConsumerId, header.Height), headerBytes, proof); err != nil { - return errorsmod.Wrapf(ErrInvalidMerkleProof, "invalid inclusion proof for CZ header: %v", err) - } - - return nil -} - -// VerifyEpochSubmitted verifies whether an epoch's checkpoint has been included in BTC or not -// verifications include: -// - basic sanity checks -// - Merkle proofs in txsInfo are valid -// - the raw ckpt decoded from txsInfo is same as the expected rawCkpt -func VerifyEpochSubmitted(rawCkpt *checkpointingtypes.RawCheckpoint, txsInfo []*btcctypes.TransactionInfo, btcHeaders []*wire.BlockHeader, powLimit *big.Int, babylonTag txformat.BabylonTag) error { - // basic sanity check - if rawCkpt == nil { - return fmt.Errorf("rawCkpt is nil") - } else if len(txsInfo) != txformat.NumberOfParts { - return fmt.Errorf("txsInfo contains %d parts rather than %d", len(txsInfo), txformat.NumberOfParts) - } else if len(btcHeaders) != txformat.NumberOfParts { - return fmt.Errorf("btcHeaders contains %d parts rather than %d", len(btcHeaders), txformat.NumberOfParts) - } - - // sanity check of each tx info - for _, txInfo := range txsInfo { - if err := txInfo.ValidateBasic(); err != nil { - return err - } - } - - // verify Merkle proofs for each tx info - parsedProofs := []*btcctypes.ParsedProof{} - for i, txInfo := range txsInfo { - btcHeaderBytes := bbn.NewBTCHeaderBytesFromBlockHeader(btcHeaders[i]) - parsedProof, err := btcctypes.ParseProof( - txInfo.Transaction, - txInfo.Key.Index, - txInfo.Proof, - &btcHeaderBytes, - powLimit, - ) - if err != nil { - return err - } - parsedProofs = append(parsedProofs, parsedProof) - } - - // decode parsedProof to checkpoint data - checkpointData := [][]byte{} - for i, proof := range parsedProofs { - data, err := txformat.GetCheckpointData( - babylonTag, - txformat.CurrentVersion, - uint8(i), - proof.OpReturnData, - ) - - if err != nil { - return err - } - checkpointData = append(checkpointData, data) - } - rawCkptData, err := txformat.ConnectParts(txformat.CurrentVersion, checkpointData[0], checkpointData[1]) - if err != nil { - return err - } - decodedRawCkpt, err := checkpointingtypes.FromBTCCkptBytesToRawCkpt(rawCkptData) - if err != nil { - return err - } - - // check if decodedRawCkpt is same as the expected rawCkpt - if !decodedRawCkpt.Equal(rawCkpt) { - return fmt.Errorf("the decoded rawCkpt (%v) is different from the expected rawCkpt (%v)", decodedRawCkpt, rawCkpt) - } - - return nil -} - -func (ts *BTCTimestamp) Verify( - ctx context.Context, - btclcKeeper *btclckeeper.Keeper, - wValue uint64, - ckptTag txformat.BabylonTag, -) error { - // BTC net - btcNet := btclcKeeper.GetBTCNet() - - // verify and insert all BTC headers - headersBytes := []bbn.BTCHeaderBytes{} - for _, headerInfo := range ts.BtcHeaders { - headerBytes := bbn.NewBTCHeaderBytesFromBlockHeader(headerInfo.Header.ToBlockHeader()) - headersBytes = append(headersBytes, headerBytes) - } - if err := btclcKeeper.InsertHeadersWithHookAndEvents(ctx, headersBytes); err != nil { - return err - } - - // get BTC headers that include the checkpoint, and ensure at least 1 of them is w-deep - btcHeadersWithCkpt := []*wire.BlockHeader{} - wDeep := false - for _, key := range ts.BtcSubmissionKey.Key { - header := btclcKeeper.GetHeaderByHash(ctx, key.Hash) - if header == nil { - return fmt.Errorf("header corresponding to the inclusion proof is not on BTC light client") - } - btcHeadersWithCkpt = append(btcHeadersWithCkpt, header.Header.ToBlockHeader()) - - depth, err := btclcKeeper.MainChainDepth(ctx, header.Hash) - if err != nil { - return err - } - if depth >= wValue { - wDeep = true - } - } - if !wDeep { - return fmt.Errorf("checkpoint is not w-deep") - } - - // perform stateless checks that do not rely on BTC light client - return ts.VerifyStateless(btcHeadersWithCkpt, btcNet.PowLimit, ckptTag) -} - -func (ts *BTCTimestamp) VerifyStateless( - btcHeadersWithCkpt []*wire.BlockHeader, - powLimit *big.Int, - ckptTag txformat.BabylonTag, -) error { - // ensure raw checkpoint corresponds to the epoch - if ts.EpochInfo.EpochNumber != ts.RawCheckpoint.EpochNum { - return fmt.Errorf("epoch number in epoch metadata and raw checkpoint is not same") - } - - if len(ts.BtcSubmissionKey.Key) != txformat.NumberOfParts { - return fmt.Errorf("incorrect number of txs for a checkpoint") - } - - // verify the checkpoint txs are committed to the two headers - err := VerifyEpochSubmitted(ts.RawCheckpoint, ts.Proof.ProofEpochSubmitted, btcHeadersWithCkpt, powLimit, ckptTag) - if err != nil { - return err - } - - // verify the epoch is sealed - if err := VerifyEpochSealed(ts.EpochInfo, ts.RawCheckpoint, ts.Proof.ProofEpochSealed); err != nil { - return err - } - - // verify CZ header is committed to the epoch - if err := VerifyCZHeaderInEpoch(ts.Header, ts.EpochInfo, ts.Proof.ProofCzHeaderInEpoch); err != nil { - return err - } - - return nil -} diff --git a/x/zoneconcierge/types/btc_timestamp_test.go b/x/zoneconcierge/types/btc_timestamp_test.go deleted file mode 100644 index 4d62fe19..00000000 --- a/x/zoneconcierge/types/btc_timestamp_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package types_test - -import ( - "encoding/hex" - "math/rand" - "testing" - - "github.com/boljen/go-bitmap" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/wire" - "github.com/stretchr/testify/require" - - txformat "github.com/babylonlabs-io/babylon/btctxformatter" - "github.com/babylonlabs-io/babylon/crypto/bls12381" - "github.com/babylonlabs-io/babylon/testutil/datagen" - testhelper "github.com/babylonlabs-io/babylon/testutil/helper" - btcctypes "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - checkpointingtypes "github.com/babylonlabs-io/babylon/x/checkpointing/types" - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" -) - -func signBLSWithBitmap(blsSKs []bls12381.PrivateKey, bm bitmap.Bitmap, msg []byte) (bls12381.Signature, error) { - sigs := []bls12381.Signature{} - for i := 0; i < len(blsSKs); i++ { - if bitmap.Get(bm, i) { - sig := bls12381.Sign(blsSKs[i], msg) - sigs = append(sigs, sig) - } - } - return bls12381.AggrSigList(sigs) -} - -func FuzzBTCTimestamp(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 10) - - f.Fuzz(func(t *testing.T, seed int64) { - r := rand.New(rand.NewSource(seed)) - // generate the validator set with 10 validators as genesis - genesisValSet, privSigner, err := datagen.GenesisValidatorSetWithPrivSigner(10) - require.NoError(t, err) - h := testhelper.NewHelperWithValSet(t, genesisValSet, privSigner) - ek := &h.App.EpochingKeeper - zck := h.App.ZoneConciergeKeeper - - // empty BTC timestamp - btcTs := &types.BTCTimestamp{} - btcTs.Proof = &types.ProofFinalizedChainInfo{} - - // chain is at height 1 thus epoch 1 - - /* - generate CZ header and its inclusion proof to an epoch - */ - // enter block 11, 1st block of epoch 2 - epochInterval := ek.GetParams(h.Ctx).EpochInterval - for j := 0; j < int(epochInterval)-2; j++ { - h.Ctx, err = h.ApplyEmptyBlockWithVoteExtension(r) - h.NoError(err) - } - - // handle a random header from a random consumer chain - consumerID := datagen.GenRandomHexStr(r, 10) - height := datagen.RandomInt(r, 100) + 1 - ibctmHeader := datagen.GenRandomIBCTMHeader(r, height) - headerInfo := datagen.NewZCHeaderInfo(ibctmHeader, consumerID) - zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, false) - - // ensure the header is successfully inserted - indexedHeader, err := zck.GetHeader(h.Ctx, consumerID, height) - h.NoError(err) - - // enter block 21, 1st block of epoch 3 - for j := 0; j < int(epochInterval); j++ { - h.Ctx, err = h.ApplyEmptyBlockWithVoteExtension(r) - h.NoError(err) - } - // seal last epoch - h.Ctx, err = h.ApplyEmptyBlockWithVoteExtension(r) - h.NoError(err) - - epochWithHeader, err := ek.GetHistoricalEpoch(h.Ctx, indexedHeader.BabylonEpoch) - h.NoError(err) - - // generate inclusion proof - proof, err := zck.ProveCZHeaderInEpoch(h.Ctx, indexedHeader, epochWithHeader) - h.NoError(err) - - btcTs.EpochInfo = epochWithHeader - btcTs.Header = indexedHeader - btcTs.Proof.ProofCzHeaderInEpoch = proof - - /* - seal the epoch and generate ProofEpochSealed - */ - // construct the rawCkpt - // Note that the BlsMultiSig will be generated and assigned later - bm := datagen.GenFullBitmap() - blockHash := checkpointingtypes.BlockHash(epochWithHeader.SealerBlockHash) - rawCkpt := &checkpointingtypes.RawCheckpoint{ - EpochNum: epochWithHeader.EpochNumber, - BlockHash: &blockHash, - Bitmap: bm, - BlsMultiSig: nil, - } - // let the subset generate a BLS multisig over sealer header's app_hash - multiSig, err := signBLSWithBitmap(h.GenValidators.GetBLSPrivKeys(), bm, rawCkpt.SignedMsg()) - require.NoError(t, err) - // assign multiSig to rawCkpt - rawCkpt.BlsMultiSig = &multiSig - - // prove - btcTs.Proof.ProofEpochSealed, err = zck.ProveEpochSealed(h.Ctx, epochWithHeader.EpochNumber) - require.NoError(t, err) - - btcTs.RawCheckpoint = rawCkpt - - /* - forge two BTC headers including the checkpoint - */ - // encode ckpt to BTC txs in BTC blocks - submitterAddr := datagen.GenRandomByteArray(r, txformat.AddressLength) - rawBTCCkpt, err := checkpointingtypes.FromRawCkptToBTCCkpt(rawCkpt, submitterAddr) - h.NoError(err) - testRawCkptData := datagen.EncodeRawCkptToTestData(rawBTCCkpt) - idxs := []uint64{datagen.RandomInt(r, 5) + 1, datagen.RandomInt(r, 5) + 1} - offsets := []uint64{datagen.RandomInt(r, 5) + 1, datagen.RandomInt(r, 5) + 1} - btcBlocks := []*datagen.BlockCreationResult{ - datagen.CreateBlock(r, 1, uint32(idxs[0]+offsets[0]), uint32(idxs[0]), testRawCkptData.FirstPart), - datagen.CreateBlock(r, 2, uint32(idxs[1]+offsets[1]), uint32(idxs[1]), testRawCkptData.SecondPart), - } - // create MsgInsertBtcSpvProof for the rawCkpt - msgInsertBtcSpvProof := datagen.GenerateMessageWithRandomSubmitter([]*datagen.BlockCreationResult{btcBlocks[0], btcBlocks[1]}) - - // assign BTC submission key and ProofEpochSubmitted - btcTs.BtcSubmissionKey = &btcctypes.SubmissionKey{ - Key: []*btcctypes.TransactionKey{ - &btcctypes.TransactionKey{Index: uint32(idxs[0]), Hash: btcBlocks[0].HeaderBytes.Hash()}, - &btcctypes.TransactionKey{Index: uint32(idxs[1]), Hash: btcBlocks[1].HeaderBytes.Hash()}, - }, - } - btcTs.Proof.ProofEpochSubmitted = []*btcctypes.TransactionInfo{ - { - Key: btcTs.BtcSubmissionKey.Key[0], - Transaction: msgInsertBtcSpvProof.Proofs[0].BtcTransaction, - Proof: msgInsertBtcSpvProof.Proofs[0].MerkleNodes, - }, - { - Key: btcTs.BtcSubmissionKey.Key[1], - Transaction: msgInsertBtcSpvProof.Proofs[1].BtcTransaction, - Proof: msgInsertBtcSpvProof.Proofs[1].MerkleNodes, - }, - } - - // get headers for verification - btcHeaders := []*wire.BlockHeader{ - btcBlocks[0].HeaderBytes.ToBlockHeader(), - btcBlocks[1].HeaderBytes.ToBlockHeader(), - } - - // net param, babylonTag - powLimit := chaincfg.SimNetParams.PowLimit - babylonTag := btcctypes.DefaultCheckpointTag - tagAsBytes, _ := hex.DecodeString(babylonTag) - - err = btcTs.VerifyStateless(btcHeaders, powLimit, tagAsBytes) - h.NoError(err) - }) -} diff --git a/x/zoneconcierge/types/codec.go b/x/zoneconcierge/types/codec.go deleted file mode 100644 index c88ab858..00000000 --- a/x/zoneconcierge/types/codec.go +++ /dev/null @@ -1,19 +0,0 @@ -package types - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -func RegisterCodec(_ *codec.LegacyAmino) {} - -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} - -var ( - Amino = codec.NewLegacyAmino() - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) diff --git a/x/zoneconcierge/types/errors.go b/x/zoneconcierge/types/errors.go deleted file mode 100644 index 5a224e55..00000000 --- a/x/zoneconcierge/types/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package types - -import ( - errorsmod "cosmossdk.io/errors" -) - -// x/zoneconcierge module sentinel errors -var ( - ErrInvalidVersion = errorsmod.Register(ModuleName, 1101, "invalid version") - ErrHeaderNotFound = errorsmod.Register(ModuleName, 1102, "no header exists at this height") - ErrInvalidHeader = errorsmod.Register(ModuleName, 1103, "input header is invalid") - ErrChainInfoNotFound = errorsmod.Register(ModuleName, 1104, "no chain info exists") - ErrEpochChainInfoNotFound = errorsmod.Register(ModuleName, 1105, "no chain info exists at this epoch") - ErrEpochHeadersNotFound = errorsmod.Register(ModuleName, 1106, "no timestamped header exists at this epoch") - ErrInvalidProofEpochSealed = errorsmod.Register(ModuleName, 1107, "invalid ProofEpochSealed") - ErrInvalidMerkleProof = errorsmod.Register(ModuleName, 1108, "invalid Merkle inclusion proof") - ErrInvalidChainInfo = errorsmod.Register(ModuleName, 1109, "invalid chain info") - ErrInvalidConsumerIDs = errorsmod.Register(ModuleName, 1110, "chain ids contain duplicates or empty strings") -) diff --git a/x/zoneconcierge/types/events_ibc.go b/x/zoneconcierge/types/events_ibc.go deleted file mode 100644 index 07b499e5..00000000 --- a/x/zoneconcierge/types/events_ibc.go +++ /dev/null @@ -1,9 +0,0 @@ -package types - -// IBC events -const ( - EventTypeAck = "acknowledgement" - - AttributeKeyAckSuccess = "success" - AttributeKeyAckError = "error" -) diff --git a/x/zoneconcierge/types/expected_keepers.go b/x/zoneconcierge/types/expected_keepers.go deleted file mode 100644 index 574a0b7f..00000000 --- a/x/zoneconcierge/types/expected_keepers.go +++ /dev/null @@ -1,111 +0,0 @@ -package types - -import ( - "context" - - bbn "github.com/babylonlabs-io/babylon/types" - clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" //nolint:staticcheck - - btcctypes "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - btclctypes "github.com/babylonlabs-io/babylon/x/btclightclient/types" - checkpointingtypes "github.com/babylonlabs-io/babylon/x/checkpointing/types" - epochingtypes "github.com/babylonlabs-io/babylon/x/epoching/types" - ctypes "github.com/cometbft/cometbft/rpc/core/types" - sdk "github.com/cosmos/cosmos-sdk/types" - capabilitytypes "github.com/cosmos/ibc-go/modules/capability/types" - connectiontypes "github.com/cosmos/ibc-go/v8/modules/core/03-connection/types" - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" - ibcexported "github.com/cosmos/ibc-go/v8/modules/core/exported" -) - -// AccountKeeper defines the contract required for account APIs. -type AccountKeeper interface { - GetModuleAddress(name string) sdk.AccAddress - GetModuleAccount(ctx context.Context, name string) sdk.ModuleAccountI -} - -// BankKeeper defines the expected bank keeper -type BankKeeper interface { - SendCoins(ctx context.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error - MintCoins(ctx context.Context, moduleName string, amt sdk.Coins) error - BurnCoins(ctx context.Context, moduleName string, amt sdk.Coins) error - SendCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error - SendCoinsFromAccountToModule(ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error - BlockedAddr(addr sdk.AccAddress) bool -} - -// ICS4Wrapper defines the expected ICS4Wrapper for middleware -type ICS4Wrapper interface { - SendPacket( - ctx sdk.Context, - channelCap *capabilitytypes.Capability, - sourcePort string, - sourceChannel string, - timeoutHeight clienttypes.Height, - timeoutTimestamp uint64, - data []byte, - ) (uint64, error) -} - -// ChannelKeeper defines the expected IBC channel keeper -type ChannelKeeper interface { - GetChannel(ctx sdk.Context, srcPort, srcChan string) (channel channeltypes.Channel, found bool) - GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool) - GetAllChannels(ctx sdk.Context) (channels []channeltypes.IdentifiedChannel) - GetChannelClientState(ctx sdk.Context, portID, channelID string) (string, ibcexported.ClientState, error) -} - -// ClientKeeper defines the expected IBC client keeper -type ClientKeeper interface { - GetClientState(ctx sdk.Context, clientID string) (ibcexported.ClientState, bool) - SetClientState(ctx sdk.Context, clientID string, clientState ibcexported.ClientState) -} - -// ConnectionKeeper defines the expected IBC connection keeper -type ConnectionKeeper interface { - GetConnection(ctx sdk.Context, connectionID string) (connection connectiontypes.ConnectionEnd, found bool) -} - -// PortKeeper defines the expected IBC port keeper -type PortKeeper interface { - BindPort(ctx sdk.Context, portID string) *capabilitytypes.Capability -} - -// ScopedKeeper defines the expected x/capability scoped keeper interface -type ScopedKeeper interface { - GetCapability(ctx sdk.Context, name string) (*capabilitytypes.Capability, bool) - AuthenticateCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) bool - LookupModules(ctx sdk.Context, name string) ([]string, *capabilitytypes.Capability, error) - ClaimCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) error -} - -type BTCLightClientKeeper interface { - GetTipInfo(ctx context.Context) *btclctypes.BTCHeaderInfo - GetMainChainFrom(ctx context.Context, startHeight uint64) []*btclctypes.BTCHeaderInfo - GetMainChainUpTo(ctx context.Context, depth uint64) []*btclctypes.BTCHeaderInfo - GetHeaderByHash(ctx context.Context, hash *bbn.BTCHeaderHashBytes) *btclctypes.BTCHeaderInfo -} - -type BtcCheckpointKeeper interface { - GetParams(ctx context.Context) (p btcctypes.Params) - GetEpochData(ctx context.Context, e uint64) *btcctypes.EpochData - GetBestSubmission(ctx context.Context, e uint64) (btcctypes.BtcStatus, *btcctypes.SubmissionKey, error) - GetSubmissionData(ctx context.Context, sk btcctypes.SubmissionKey) *btcctypes.SubmissionData - GetEpochBestSubmissionBtcInfo(ctx context.Context, ed *btcctypes.EpochData) *btcctypes.SubmissionBtcInfo -} - -type CheckpointingKeeper interface { - GetBLSPubKeySet(ctx context.Context, epochNumber uint64) ([]*checkpointingtypes.ValidatorWithBlsKey, error) - GetRawCheckpoint(ctx context.Context, epochNumber uint64) (*checkpointingtypes.RawCheckpointWithMeta, error) - GetLastFinalizedEpoch(ctx context.Context) uint64 -} - -type EpochingKeeper interface { - GetHistoricalEpoch(ctx context.Context, epochNumber uint64) (*epochingtypes.Epoch, error) - GetEpoch(ctx context.Context) *epochingtypes.Epoch -} - -// CometClient is a Comet client that allows to query tx inclusion proofs -type CometClient interface { - Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) -} diff --git a/x/zoneconcierge/types/genesis.go b/x/zoneconcierge/types/genesis.go deleted file mode 100644 index 1bf65e10..00000000 --- a/x/zoneconcierge/types/genesis.go +++ /dev/null @@ -1,33 +0,0 @@ -package types - -import ( - host "github.com/cosmos/ibc-go/v8/modules/core/24-host" -) - -// DefaultGenesis returns the default genesis state -func DefaultGenesis() *GenesisState { - return &GenesisState{ - PortId: PortID, - Params: DefaultParams(), - } -} - -// NewGenesis creates a new GenesisState instance -func NewGenesis(params Params) *GenesisState { - return &GenesisState{ - PortId: PortID, - Params: params, - } -} - -// Validate performs basic genesis state validation returning an error upon any -// failure. -func (gs GenesisState) Validate() error { - if err := host.PortIdentifierValidator(gs.PortId); err != nil { - return err - } - if err := gs.Params.Validate(); err != nil { - return err - } - return nil -} diff --git a/x/zoneconcierge/types/genesis.pb.go b/x/zoneconcierge/types/genesis.pb.go deleted file mode 100644 index a0ae3e52..00000000 --- a/x/zoneconcierge/types/genesis.pb.go +++ /dev/null @@ -1,376 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: babylon/zoneconcierge/v1/genesis.proto - -package types - -import ( - fmt "fmt" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the zoneconcierge module's genesis state. -type GenesisState struct { - PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` - Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_56f290ad7c2c7dc7, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetPortId() string { - if m != nil { - return m.PortId - } - return "" -} - -func (m *GenesisState) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "babylon.zoneconcierge.v1.GenesisState") -} - -func init() { - proto.RegisterFile("babylon/zoneconcierge/v1/genesis.proto", fileDescriptor_56f290ad7c2c7dc7) -} - -var fileDescriptor_56f290ad7c2c7dc7 = []byte{ - // 230 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0x4a, 0x4c, 0xaa, - 0xcc, 0xc9, 0xcf, 0xd3, 0xaf, 0xca, 0xcf, 0x4b, 0x4d, 0xce, 0xcf, 0x4b, 0xce, 0x4c, 0x2d, 0x4a, - 0x4f, 0xd5, 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, - 0x2f, 0xc9, 0x17, 0x92, 0x80, 0xaa, 0xd3, 0x43, 0x51, 0xa7, 0x57, 0x66, 0x28, 0x25, 0x92, 0x9e, - 0x9f, 0x9e, 0x0f, 0x56, 0xa4, 0x0f, 0x62, 0x41, 0xd4, 0x4b, 0xa9, 0xe2, 0x34, 0xb7, 0x20, 0xb1, - 0x28, 0x31, 0x17, 0x6a, 0xac, 0x52, 0x3a, 0x17, 0x8f, 0x3b, 0xc4, 0x9e, 0xe0, 0x92, 0xc4, 0x92, - 0x54, 0x21, 0x71, 0x2e, 0xf6, 0x82, 0xfc, 0xa2, 0x92, 0xf8, 0xcc, 0x14, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x36, 0x10, 0xd7, 0x33, 0x45, 0xc8, 0x8e, 0x8b, 0x0d, 0xa2, 0x51, 0x82, 0x49, - 0x81, 0x51, 0x83, 0xdb, 0x48, 0x41, 0x0f, 0x97, 0x83, 0xf4, 0x02, 0xc0, 0xea, 0x9c, 0x58, 0x4e, - 0xdc, 0x93, 0x67, 0x08, 0x82, 0xea, 0x72, 0x0a, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, - 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, - 0x39, 0x86, 0x28, 0xf3, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xa8, - 0x99, 0x39, 0x89, 0x49, 0xc5, 0xba, 0x99, 0xf9, 0x30, 0xae, 0x7e, 0x05, 0x9a, 0x2f, 0x4a, 0x2a, - 0x0b, 0x52, 0x8b, 0x93, 0xd8, 0xc0, 0x5e, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x70, - 0xce, 0xb1, 0x43, 0x01, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.PortId) > 0 { - i -= len(m.PortId) - copy(dAtA[i:], m.PortId) - i = encodeVarintGenesis(dAtA, i, uint64(len(m.PortId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PortId) - if l > 0 { - n += 1 + l + sovGenesis(uint64(l)) - } - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PortId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/zoneconcierge/types/genesis_test.go b/x/zoneconcierge/types/genesis_test.go deleted file mode 100644 index 859e66e1..00000000 --- a/x/zoneconcierge/types/genesis_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package types_test - -import ( - "testing" - - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - "github.com/stretchr/testify/require" -) - -func TestGenesisState_Validate(t *testing.T) { - for _, tc := range []struct { - desc string - genState *types.GenesisState - valid bool - }{ - { - desc: "default is valid", - genState: types.DefaultGenesis(), - valid: true, - }, - { - desc: "valid genesis state", - genState: &types.GenesisState{ - PortId: types.PortID, - Params: types.Params{IbcPacketTimeoutSeconds: 100}, - }, - valid: true, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - err := tc.genState.Validate() - if tc.valid { - require.NoError(t, err) - } else { - require.Error(t, err) - } - }) - } -} diff --git a/x/zoneconcierge/types/keys.go b/x/zoneconcierge/types/keys.go deleted file mode 100644 index 1c0e0df5..00000000 --- a/x/zoneconcierge/types/keys.go +++ /dev/null @@ -1,43 +0,0 @@ -package types - -import ( - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" -) - -const ( - // ModuleName defines the module name - ModuleName = "zoneconcierge" - - // StoreKey defines the primary module store key - StoreKey = ModuleName - - // RouterKey defines the module's message routing key - RouterKey = ModuleName - - // MemStoreKey defines the in-memory store key - MemStoreKey = "mem_zoneconcierge" - - // Version defines the current version the IBC module supports - Version = "zoneconcierge-1" - - // Ordering defines the ordering the IBC module supports - Ordering = channeltypes.ORDERED - - // PortID is the default port id that module binds to - PortID = "zoneconcierge" -) - -var ( - PortKey = []byte{0x11} // PortKey defines the key to store the port ID in store - ChainInfoKey = []byte{0x12} // ChainInfoKey defines the key to store the chain info for each CZ in store - CanonicalChainKey = []byte{0x13} // CanonicalChainKey defines the key to store the canonical chain for each CZ in store - ForkKey = []byte{0x14} // ForkKey defines the key to store the forks for each CZ in store - EpochChainInfoKey = []byte{0x15} // EpochChainInfoKey defines the key to store each epoch's latests chain info for each CZ in store - LastSentBTCSegmentKey = []byte{0x16} // LastSentBTCSegmentKey is key holding last btc light client segment sent to other cosmos zones - ParamsKey = []byte{0x17} // key prefix for the parameters - SealedEpochProofKey = []byte{0x18} // key prefix for proof of sealed epochs -) - -func KeyPrefix(p string) []byte { - return []byte(p) -} diff --git a/x/zoneconcierge/types/mocked_keepers.go b/x/zoneconcierge/types/mocked_keepers.go deleted file mode 100644 index 2daa01fa..00000000 --- a/x/zoneconcierge/types/mocked_keepers.go +++ /dev/null @@ -1,841 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: x/zoneconcierge/types/expected_keepers.go - -// Package types is a generated GoMock package. -package types - -import ( - context "context" - reflect "reflect" - - types "github.com/babylonlabs-io/babylon/types" - types0 "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - types1 "github.com/babylonlabs-io/babylon/x/btclightclient/types" - types2 "github.com/babylonlabs-io/babylon/x/checkpointing/types" - types3 "github.com/babylonlabs-io/babylon/x/epoching/types" - coretypes "github.com/cometbft/cometbft/rpc/core/types" - types4 "github.com/cosmos/cosmos-sdk/types" - types5 "github.com/cosmos/ibc-go/modules/capability/types" - types6 "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" - types7 "github.com/cosmos/ibc-go/v8/modules/core/03-connection/types" - types8 "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" - exported "github.com/cosmos/ibc-go/v8/modules/core/exported" - gomock "github.com/golang/mock/gomock" -) - -// MockAccountKeeper is a mock of AccountKeeper interface. -type MockAccountKeeper struct { - ctrl *gomock.Controller - recorder *MockAccountKeeperMockRecorder -} - -// MockAccountKeeperMockRecorder is the mock recorder for MockAccountKeeper. -type MockAccountKeeperMockRecorder struct { - mock *MockAccountKeeper -} - -// NewMockAccountKeeper creates a new mock instance. -func NewMockAccountKeeper(ctrl *gomock.Controller) *MockAccountKeeper { - mock := &MockAccountKeeper{ctrl: ctrl} - mock.recorder = &MockAccountKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockAccountKeeper) EXPECT() *MockAccountKeeperMockRecorder { - return m.recorder -} - -// GetModuleAccount mocks base method. -func (m *MockAccountKeeper) GetModuleAccount(ctx context.Context, name string) types4.ModuleAccountI { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetModuleAccount", ctx, name) - ret0, _ := ret[0].(types4.ModuleAccountI) - return ret0 -} - -// GetModuleAccount indicates an expected call of GetModuleAccount. -func (mr *MockAccountKeeperMockRecorder) GetModuleAccount(ctx, name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetModuleAccount", reflect.TypeOf((*MockAccountKeeper)(nil).GetModuleAccount), ctx, name) -} - -// GetModuleAddress mocks base method. -func (m *MockAccountKeeper) GetModuleAddress(name string) types4.AccAddress { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetModuleAddress", name) - ret0, _ := ret[0].(types4.AccAddress) - return ret0 -} - -// GetModuleAddress indicates an expected call of GetModuleAddress. -func (mr *MockAccountKeeperMockRecorder) GetModuleAddress(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetModuleAddress", reflect.TypeOf((*MockAccountKeeper)(nil).GetModuleAddress), name) -} - -// MockBankKeeper is a mock of BankKeeper interface. -type MockBankKeeper struct { - ctrl *gomock.Controller - recorder *MockBankKeeperMockRecorder -} - -// MockBankKeeperMockRecorder is the mock recorder for MockBankKeeper. -type MockBankKeeperMockRecorder struct { - mock *MockBankKeeper -} - -// NewMockBankKeeper creates a new mock instance. -func NewMockBankKeeper(ctrl *gomock.Controller) *MockBankKeeper { - mock := &MockBankKeeper{ctrl: ctrl} - mock.recorder = &MockBankKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBankKeeper) EXPECT() *MockBankKeeperMockRecorder { - return m.recorder -} - -// BlockedAddr mocks base method. -func (m *MockBankKeeper) BlockedAddr(addr types4.AccAddress) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BlockedAddr", addr) - ret0, _ := ret[0].(bool) - return ret0 -} - -// BlockedAddr indicates an expected call of BlockedAddr. -func (mr *MockBankKeeperMockRecorder) BlockedAddr(addr interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockedAddr", reflect.TypeOf((*MockBankKeeper)(nil).BlockedAddr), addr) -} - -// BurnCoins mocks base method. -func (m *MockBankKeeper) BurnCoins(ctx context.Context, moduleName string, amt types4.Coins) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BurnCoins", ctx, moduleName, amt) - ret0, _ := ret[0].(error) - return ret0 -} - -// BurnCoins indicates an expected call of BurnCoins. -func (mr *MockBankKeeperMockRecorder) BurnCoins(ctx, moduleName, amt interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BurnCoins", reflect.TypeOf((*MockBankKeeper)(nil).BurnCoins), ctx, moduleName, amt) -} - -// MintCoins mocks base method. -func (m *MockBankKeeper) MintCoins(ctx context.Context, moduleName string, amt types4.Coins) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MintCoins", ctx, moduleName, amt) - ret0, _ := ret[0].(error) - return ret0 -} - -// MintCoins indicates an expected call of MintCoins. -func (mr *MockBankKeeperMockRecorder) MintCoins(ctx, moduleName, amt interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MintCoins", reflect.TypeOf((*MockBankKeeper)(nil).MintCoins), ctx, moduleName, amt) -} - -// SendCoins mocks base method. -func (m *MockBankKeeper) SendCoins(ctx context.Context, fromAddr, toAddr types4.AccAddress, amt types4.Coins) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendCoins", ctx, fromAddr, toAddr, amt) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendCoins indicates an expected call of SendCoins. -func (mr *MockBankKeeperMockRecorder) SendCoins(ctx, fromAddr, toAddr, amt interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCoins", reflect.TypeOf((*MockBankKeeper)(nil).SendCoins), ctx, fromAddr, toAddr, amt) -} - -// SendCoinsFromAccountToModule mocks base method. -func (m *MockBankKeeper) SendCoinsFromAccountToModule(ctx context.Context, senderAddr types4.AccAddress, recipientModule string, amt types4.Coins) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendCoinsFromAccountToModule", ctx, senderAddr, recipientModule, amt) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendCoinsFromAccountToModule indicates an expected call of SendCoinsFromAccountToModule. -func (mr *MockBankKeeperMockRecorder) SendCoinsFromAccountToModule(ctx, senderAddr, recipientModule, amt interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCoinsFromAccountToModule", reflect.TypeOf((*MockBankKeeper)(nil).SendCoinsFromAccountToModule), ctx, senderAddr, recipientModule, amt) -} - -// SendCoinsFromModuleToAccount mocks base method. -func (m *MockBankKeeper) SendCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr types4.AccAddress, amt types4.Coins) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendCoinsFromModuleToAccount", ctx, senderModule, recipientAddr, amt) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendCoinsFromModuleToAccount indicates an expected call of SendCoinsFromModuleToAccount. -func (mr *MockBankKeeperMockRecorder) SendCoinsFromModuleToAccount(ctx, senderModule, recipientAddr, amt interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCoinsFromModuleToAccount", reflect.TypeOf((*MockBankKeeper)(nil).SendCoinsFromModuleToAccount), ctx, senderModule, recipientAddr, amt) -} - -// MockICS4Wrapper is a mock of ICS4Wrapper interface. -type MockICS4Wrapper struct { - ctrl *gomock.Controller - recorder *MockICS4WrapperMockRecorder -} - -// MockICS4WrapperMockRecorder is the mock recorder for MockICS4Wrapper. -type MockICS4WrapperMockRecorder struct { - mock *MockICS4Wrapper -} - -// NewMockICS4Wrapper creates a new mock instance. -func NewMockICS4Wrapper(ctrl *gomock.Controller) *MockICS4Wrapper { - mock := &MockICS4Wrapper{ctrl: ctrl} - mock.recorder = &MockICS4WrapperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockICS4Wrapper) EXPECT() *MockICS4WrapperMockRecorder { - return m.recorder -} - -// SendPacket mocks base method. -func (m *MockICS4Wrapper) SendPacket(ctx types4.Context, channelCap *types5.Capability, sourcePort, sourceChannel string, timeoutHeight types6.Height, timeoutTimestamp uint64, data []byte) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendPacket", ctx, channelCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SendPacket indicates an expected call of SendPacket. -func (mr *MockICS4WrapperMockRecorder) SendPacket(ctx, channelCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPacket", reflect.TypeOf((*MockICS4Wrapper)(nil).SendPacket), ctx, channelCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) -} - -// MockChannelKeeper is a mock of ChannelKeeper interface. -type MockChannelKeeper struct { - ctrl *gomock.Controller - recorder *MockChannelKeeperMockRecorder -} - -// MockChannelKeeperMockRecorder is the mock recorder for MockChannelKeeper. -type MockChannelKeeperMockRecorder struct { - mock *MockChannelKeeper -} - -// NewMockChannelKeeper creates a new mock instance. -func NewMockChannelKeeper(ctrl *gomock.Controller) *MockChannelKeeper { - mock := &MockChannelKeeper{ctrl: ctrl} - mock.recorder = &MockChannelKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockChannelKeeper) EXPECT() *MockChannelKeeperMockRecorder { - return m.recorder -} - -// GetAllChannels mocks base method. -func (m *MockChannelKeeper) GetAllChannels(ctx types4.Context) []types8.IdentifiedChannel { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllChannels", ctx) - ret0, _ := ret[0].([]types8.IdentifiedChannel) - return ret0 -} - -// GetAllChannels indicates an expected call of GetAllChannels. -func (mr *MockChannelKeeperMockRecorder) GetAllChannels(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllChannels", reflect.TypeOf((*MockChannelKeeper)(nil).GetAllChannels), ctx) -} - -// GetChannel mocks base method. -func (m *MockChannelKeeper) GetChannel(ctx types4.Context, srcPort, srcChan string) (types8.Channel, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChannel", ctx, srcPort, srcChan) - ret0, _ := ret[0].(types8.Channel) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetChannel indicates an expected call of GetChannel. -func (mr *MockChannelKeeperMockRecorder) GetChannel(ctx, srcPort, srcChan interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChannel", reflect.TypeOf((*MockChannelKeeper)(nil).GetChannel), ctx, srcPort, srcChan) -} - -// GetChannelClientState mocks base method. -func (m *MockChannelKeeper) GetChannelClientState(ctx types4.Context, portID, channelID string) (string, exported.ClientState, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChannelClientState", ctx, portID, channelID) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(exported.ClientState) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetChannelClientState indicates an expected call of GetChannelClientState. -func (mr *MockChannelKeeperMockRecorder) GetChannelClientState(ctx, portID, channelID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChannelClientState", reflect.TypeOf((*MockChannelKeeper)(nil).GetChannelClientState), ctx, portID, channelID) -} - -// GetNextSequenceSend mocks base method. -func (m *MockChannelKeeper) GetNextSequenceSend(ctx types4.Context, portID, channelID string) (uint64, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextSequenceSend", ctx, portID, channelID) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetNextSequenceSend indicates an expected call of GetNextSequenceSend. -func (mr *MockChannelKeeperMockRecorder) GetNextSequenceSend(ctx, portID, channelID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextSequenceSend", reflect.TypeOf((*MockChannelKeeper)(nil).GetNextSequenceSend), ctx, portID, channelID) -} - -// MockClientKeeper is a mock of ClientKeeper interface. -type MockClientKeeper struct { - ctrl *gomock.Controller - recorder *MockClientKeeperMockRecorder -} - -// MockClientKeeperMockRecorder is the mock recorder for MockClientKeeper. -type MockClientKeeperMockRecorder struct { - mock *MockClientKeeper -} - -// NewMockClientKeeper creates a new mock instance. -func NewMockClientKeeper(ctrl *gomock.Controller) *MockClientKeeper { - mock := &MockClientKeeper{ctrl: ctrl} - mock.recorder = &MockClientKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockClientKeeper) EXPECT() *MockClientKeeperMockRecorder { - return m.recorder -} - -// GetClientState mocks base method. -func (m *MockClientKeeper) GetClientState(ctx types4.Context, clientID string) (exported.ClientState, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetClientState", ctx, clientID) - ret0, _ := ret[0].(exported.ClientState) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetClientState indicates an expected call of GetClientState. -func (mr *MockClientKeeperMockRecorder) GetClientState(ctx, clientID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClientState", reflect.TypeOf((*MockClientKeeper)(nil).GetClientState), ctx, clientID) -} - -// SetClientState mocks base method. -func (m *MockClientKeeper) SetClientState(ctx types4.Context, clientID string, clientState exported.ClientState) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetClientState", ctx, clientID, clientState) -} - -// SetClientState indicates an expected call of SetClientState. -func (mr *MockClientKeeperMockRecorder) SetClientState(ctx, clientID, clientState interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClientState", reflect.TypeOf((*MockClientKeeper)(nil).SetClientState), ctx, clientID, clientState) -} - -// MockConnectionKeeper is a mock of ConnectionKeeper interface. -type MockConnectionKeeper struct { - ctrl *gomock.Controller - recorder *MockConnectionKeeperMockRecorder -} - -// MockConnectionKeeperMockRecorder is the mock recorder for MockConnectionKeeper. -type MockConnectionKeeperMockRecorder struct { - mock *MockConnectionKeeper -} - -// NewMockConnectionKeeper creates a new mock instance. -func NewMockConnectionKeeper(ctrl *gomock.Controller) *MockConnectionKeeper { - mock := &MockConnectionKeeper{ctrl: ctrl} - mock.recorder = &MockConnectionKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockConnectionKeeper) EXPECT() *MockConnectionKeeperMockRecorder { - return m.recorder -} - -// GetConnection mocks base method. -func (m *MockConnectionKeeper) GetConnection(ctx types4.Context, connectionID string) (types7.ConnectionEnd, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetConnection", ctx, connectionID) - ret0, _ := ret[0].(types7.ConnectionEnd) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetConnection indicates an expected call of GetConnection. -func (mr *MockConnectionKeeperMockRecorder) GetConnection(ctx, connectionID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConnection", reflect.TypeOf((*MockConnectionKeeper)(nil).GetConnection), ctx, connectionID) -} - -// MockPortKeeper is a mock of PortKeeper interface. -type MockPortKeeper struct { - ctrl *gomock.Controller - recorder *MockPortKeeperMockRecorder -} - -// MockPortKeeperMockRecorder is the mock recorder for MockPortKeeper. -type MockPortKeeperMockRecorder struct { - mock *MockPortKeeper -} - -// NewMockPortKeeper creates a new mock instance. -func NewMockPortKeeper(ctrl *gomock.Controller) *MockPortKeeper { - mock := &MockPortKeeper{ctrl: ctrl} - mock.recorder = &MockPortKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPortKeeper) EXPECT() *MockPortKeeperMockRecorder { - return m.recorder -} - -// BindPort mocks base method. -func (m *MockPortKeeper) BindPort(ctx types4.Context, portID string) *types5.Capability { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BindPort", ctx, portID) - ret0, _ := ret[0].(*types5.Capability) - return ret0 -} - -// BindPort indicates an expected call of BindPort. -func (mr *MockPortKeeperMockRecorder) BindPort(ctx, portID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BindPort", reflect.TypeOf((*MockPortKeeper)(nil).BindPort), ctx, portID) -} - -// MockScopedKeeper is a mock of ScopedKeeper interface. -type MockScopedKeeper struct { - ctrl *gomock.Controller - recorder *MockScopedKeeperMockRecorder -} - -// MockScopedKeeperMockRecorder is the mock recorder for MockScopedKeeper. -type MockScopedKeeperMockRecorder struct { - mock *MockScopedKeeper -} - -// NewMockScopedKeeper creates a new mock instance. -func NewMockScopedKeeper(ctrl *gomock.Controller) *MockScopedKeeper { - mock := &MockScopedKeeper{ctrl: ctrl} - mock.recorder = &MockScopedKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockScopedKeeper) EXPECT() *MockScopedKeeperMockRecorder { - return m.recorder -} - -// AuthenticateCapability mocks base method. -func (m *MockScopedKeeper) AuthenticateCapability(ctx types4.Context, cap *types5.Capability, name string) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AuthenticateCapability", ctx, cap, name) - ret0, _ := ret[0].(bool) - return ret0 -} - -// AuthenticateCapability indicates an expected call of AuthenticateCapability. -func (mr *MockScopedKeeperMockRecorder) AuthenticateCapability(ctx, cap, name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthenticateCapability", reflect.TypeOf((*MockScopedKeeper)(nil).AuthenticateCapability), ctx, cap, name) -} - -// ClaimCapability mocks base method. -func (m *MockScopedKeeper) ClaimCapability(ctx types4.Context, cap *types5.Capability, name string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClaimCapability", ctx, cap, name) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClaimCapability indicates an expected call of ClaimCapability. -func (mr *MockScopedKeeperMockRecorder) ClaimCapability(ctx, cap, name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClaimCapability", reflect.TypeOf((*MockScopedKeeper)(nil).ClaimCapability), ctx, cap, name) -} - -// GetCapability mocks base method. -func (m *MockScopedKeeper) GetCapability(ctx types4.Context, name string) (*types5.Capability, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCapability", ctx, name) - ret0, _ := ret[0].(*types5.Capability) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetCapability indicates an expected call of GetCapability. -func (mr *MockScopedKeeperMockRecorder) GetCapability(ctx, name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapability", reflect.TypeOf((*MockScopedKeeper)(nil).GetCapability), ctx, name) -} - -// LookupModules mocks base method. -func (m *MockScopedKeeper) LookupModules(ctx types4.Context, name string) ([]string, *types5.Capability, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LookupModules", ctx, name) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(*types5.Capability) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// LookupModules indicates an expected call of LookupModules. -func (mr *MockScopedKeeperMockRecorder) LookupModules(ctx, name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LookupModules", reflect.TypeOf((*MockScopedKeeper)(nil).LookupModules), ctx, name) -} - -// MockBTCLightClientKeeper is a mock of BTCLightClientKeeper interface. -type MockBTCLightClientKeeper struct { - ctrl *gomock.Controller - recorder *MockBTCLightClientKeeperMockRecorder -} - -// MockBTCLightClientKeeperMockRecorder is the mock recorder for MockBTCLightClientKeeper. -type MockBTCLightClientKeeperMockRecorder struct { - mock *MockBTCLightClientKeeper -} - -// NewMockBTCLightClientKeeper creates a new mock instance. -func NewMockBTCLightClientKeeper(ctrl *gomock.Controller) *MockBTCLightClientKeeper { - mock := &MockBTCLightClientKeeper{ctrl: ctrl} - mock.recorder = &MockBTCLightClientKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBTCLightClientKeeper) EXPECT() *MockBTCLightClientKeeperMockRecorder { - return m.recorder -} - -// GetHeaderByHash mocks base method. -func (m *MockBTCLightClientKeeper) GetHeaderByHash(ctx context.Context, hash *types.BTCHeaderHashBytes) *types1.BTCHeaderInfo { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHeaderByHash", ctx, hash) - ret0, _ := ret[0].(*types1.BTCHeaderInfo) - return ret0 -} - -// GetHeaderByHash indicates an expected call of GetHeaderByHash. -func (mr *MockBTCLightClientKeeperMockRecorder) GetHeaderByHash(ctx, hash interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeaderByHash", reflect.TypeOf((*MockBTCLightClientKeeper)(nil).GetHeaderByHash), ctx, hash) -} - -// GetMainChainFrom mocks base method. -func (m *MockBTCLightClientKeeper) GetMainChainFrom(ctx context.Context, startHeight uint64) []*types1.BTCHeaderInfo { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMainChainFrom", ctx, startHeight) - ret0, _ := ret[0].([]*types1.BTCHeaderInfo) - return ret0 -} - -// GetMainChainFrom indicates an expected call of GetMainChainFrom. -func (mr *MockBTCLightClientKeeperMockRecorder) GetMainChainFrom(ctx, startHeight interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMainChainFrom", reflect.TypeOf((*MockBTCLightClientKeeper)(nil).GetMainChainFrom), ctx, startHeight) -} - -// GetMainChainUpTo mocks base method. -func (m *MockBTCLightClientKeeper) GetMainChainUpTo(ctx context.Context, depth uint64) []*types1.BTCHeaderInfo { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMainChainUpTo", ctx, depth) - ret0, _ := ret[0].([]*types1.BTCHeaderInfo) - return ret0 -} - -// GetMainChainUpTo indicates an expected call of GetMainChainUpTo. -func (mr *MockBTCLightClientKeeperMockRecorder) GetMainChainUpTo(ctx, depth interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMainChainUpTo", reflect.TypeOf((*MockBTCLightClientKeeper)(nil).GetMainChainUpTo), ctx, depth) -} - -// GetTipInfo mocks base method. -func (m *MockBTCLightClientKeeper) GetTipInfo(ctx context.Context) *types1.BTCHeaderInfo { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTipInfo", ctx) - ret0, _ := ret[0].(*types1.BTCHeaderInfo) - return ret0 -} - -// GetTipInfo indicates an expected call of GetTipInfo. -func (mr *MockBTCLightClientKeeperMockRecorder) GetTipInfo(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTipInfo", reflect.TypeOf((*MockBTCLightClientKeeper)(nil).GetTipInfo), ctx) -} - -// MockBtcCheckpointKeeper is a mock of BtcCheckpointKeeper interface. -type MockBtcCheckpointKeeper struct { - ctrl *gomock.Controller - recorder *MockBtcCheckpointKeeperMockRecorder -} - -// MockBtcCheckpointKeeperMockRecorder is the mock recorder for MockBtcCheckpointKeeper. -type MockBtcCheckpointKeeperMockRecorder struct { - mock *MockBtcCheckpointKeeper -} - -// NewMockBtcCheckpointKeeper creates a new mock instance. -func NewMockBtcCheckpointKeeper(ctrl *gomock.Controller) *MockBtcCheckpointKeeper { - mock := &MockBtcCheckpointKeeper{ctrl: ctrl} - mock.recorder = &MockBtcCheckpointKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBtcCheckpointKeeper) EXPECT() *MockBtcCheckpointKeeperMockRecorder { - return m.recorder -} - -// GetBestSubmission mocks base method. -func (m *MockBtcCheckpointKeeper) GetBestSubmission(ctx context.Context, e uint64) (types0.BtcStatus, *types0.SubmissionKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBestSubmission", ctx, e) - ret0, _ := ret[0].(types0.BtcStatus) - ret1, _ := ret[1].(*types0.SubmissionKey) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetBestSubmission indicates an expected call of GetBestSubmission. -func (mr *MockBtcCheckpointKeeperMockRecorder) GetBestSubmission(ctx, e interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBestSubmission", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetBestSubmission), ctx, e) -} - -// GetEpochBestSubmissionBtcInfo mocks base method. -func (m *MockBtcCheckpointKeeper) GetEpochBestSubmissionBtcInfo(ctx context.Context, ed *types0.EpochData) *types0.SubmissionBtcInfo { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetEpochBestSubmissionBtcInfo", ctx, ed) - ret0, _ := ret[0].(*types0.SubmissionBtcInfo) - return ret0 -} - -// GetEpochBestSubmissionBtcInfo indicates an expected call of GetEpochBestSubmissionBtcInfo. -func (mr *MockBtcCheckpointKeeperMockRecorder) GetEpochBestSubmissionBtcInfo(ctx, ed interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEpochBestSubmissionBtcInfo", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetEpochBestSubmissionBtcInfo), ctx, ed) -} - -// GetEpochData mocks base method. -func (m *MockBtcCheckpointKeeper) GetEpochData(ctx context.Context, e uint64) *types0.EpochData { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetEpochData", ctx, e) - ret0, _ := ret[0].(*types0.EpochData) - return ret0 -} - -// GetEpochData indicates an expected call of GetEpochData. -func (mr *MockBtcCheckpointKeeperMockRecorder) GetEpochData(ctx, e interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEpochData", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetEpochData), ctx, e) -} - -// GetParams mocks base method. -func (m *MockBtcCheckpointKeeper) GetParams(ctx context.Context) types0.Params { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetParams", ctx) - ret0, _ := ret[0].(types0.Params) - return ret0 -} - -// GetParams indicates an expected call of GetParams. -func (mr *MockBtcCheckpointKeeperMockRecorder) GetParams(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetParams", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetParams), ctx) -} - -// GetSubmissionData mocks base method. -func (m *MockBtcCheckpointKeeper) GetSubmissionData(ctx context.Context, sk types0.SubmissionKey) *types0.SubmissionData { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubmissionData", ctx, sk) - ret0, _ := ret[0].(*types0.SubmissionData) - return ret0 -} - -// GetSubmissionData indicates an expected call of GetSubmissionData. -func (mr *MockBtcCheckpointKeeperMockRecorder) GetSubmissionData(ctx, sk interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubmissionData", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetSubmissionData), ctx, sk) -} - -// MockCheckpointingKeeper is a mock of CheckpointingKeeper interface. -type MockCheckpointingKeeper struct { - ctrl *gomock.Controller - recorder *MockCheckpointingKeeperMockRecorder -} - -// MockCheckpointingKeeperMockRecorder is the mock recorder for MockCheckpointingKeeper. -type MockCheckpointingKeeperMockRecorder struct { - mock *MockCheckpointingKeeper -} - -// NewMockCheckpointingKeeper creates a new mock instance. -func NewMockCheckpointingKeeper(ctrl *gomock.Controller) *MockCheckpointingKeeper { - mock := &MockCheckpointingKeeper{ctrl: ctrl} - mock.recorder = &MockCheckpointingKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCheckpointingKeeper) EXPECT() *MockCheckpointingKeeperMockRecorder { - return m.recorder -} - -// GetBLSPubKeySet mocks base method. -func (m *MockCheckpointingKeeper) GetBLSPubKeySet(ctx context.Context, epochNumber uint64) ([]*types2.ValidatorWithBlsKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBLSPubKeySet", ctx, epochNumber) - ret0, _ := ret[0].([]*types2.ValidatorWithBlsKey) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBLSPubKeySet indicates an expected call of GetBLSPubKeySet. -func (mr *MockCheckpointingKeeperMockRecorder) GetBLSPubKeySet(ctx, epochNumber interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBLSPubKeySet", reflect.TypeOf((*MockCheckpointingKeeper)(nil).GetBLSPubKeySet), ctx, epochNumber) -} - -// GetLastFinalizedEpoch mocks base method. -func (m *MockCheckpointingKeeper) GetLastFinalizedEpoch(ctx context.Context) uint64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLastFinalizedEpoch", ctx) - ret0, _ := ret[0].(uint64) - return ret0 -} - -// GetLastFinalizedEpoch indicates an expected call of GetLastFinalizedEpoch. -func (mr *MockCheckpointingKeeperMockRecorder) GetLastFinalizedEpoch(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastFinalizedEpoch", reflect.TypeOf((*MockCheckpointingKeeper)(nil).GetLastFinalizedEpoch), ctx) -} - -// GetRawCheckpoint mocks base method. -func (m *MockCheckpointingKeeper) GetRawCheckpoint(ctx context.Context, epochNumber uint64) (*types2.RawCheckpointWithMeta, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRawCheckpoint", ctx, epochNumber) - ret0, _ := ret[0].(*types2.RawCheckpointWithMeta) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRawCheckpoint indicates an expected call of GetRawCheckpoint. -func (mr *MockCheckpointingKeeperMockRecorder) GetRawCheckpoint(ctx, epochNumber interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRawCheckpoint", reflect.TypeOf((*MockCheckpointingKeeper)(nil).GetRawCheckpoint), ctx, epochNumber) -} - -// MockEpochingKeeper is a mock of EpochingKeeper interface. -type MockEpochingKeeper struct { - ctrl *gomock.Controller - recorder *MockEpochingKeeperMockRecorder -} - -// MockEpochingKeeperMockRecorder is the mock recorder for MockEpochingKeeper. -type MockEpochingKeeperMockRecorder struct { - mock *MockEpochingKeeper -} - -// NewMockEpochingKeeper creates a new mock instance. -func NewMockEpochingKeeper(ctrl *gomock.Controller) *MockEpochingKeeper { - mock := &MockEpochingKeeper{ctrl: ctrl} - mock.recorder = &MockEpochingKeeperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockEpochingKeeper) EXPECT() *MockEpochingKeeperMockRecorder { - return m.recorder -} - -// GetEpoch mocks base method. -func (m *MockEpochingKeeper) GetEpoch(ctx context.Context) *types3.Epoch { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetEpoch", ctx) - ret0, _ := ret[0].(*types3.Epoch) - return ret0 -} - -// GetEpoch indicates an expected call of GetEpoch. -func (mr *MockEpochingKeeperMockRecorder) GetEpoch(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEpoch", reflect.TypeOf((*MockEpochingKeeper)(nil).GetEpoch), ctx) -} - -// GetHistoricalEpoch mocks base method. -func (m *MockEpochingKeeper) GetHistoricalEpoch(ctx context.Context, epochNumber uint64) (*types3.Epoch, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHistoricalEpoch", ctx, epochNumber) - ret0, _ := ret[0].(*types3.Epoch) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetHistoricalEpoch indicates an expected call of GetHistoricalEpoch. -func (mr *MockEpochingKeeperMockRecorder) GetHistoricalEpoch(ctx, epochNumber interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoricalEpoch", reflect.TypeOf((*MockEpochingKeeper)(nil).GetHistoricalEpoch), ctx, epochNumber) -} - -// MockCometClient is a mock of CometClient interface. -type MockCometClient struct { - ctrl *gomock.Controller - recorder *MockCometClientMockRecorder -} - -// MockCometClientMockRecorder is the mock recorder for MockCometClient. -type MockCometClientMockRecorder struct { - mock *MockCometClient -} - -// NewMockCometClient creates a new mock instance. -func NewMockCometClient(ctrl *gomock.Controller) *MockCometClient { - mock := &MockCometClient{ctrl: ctrl} - mock.recorder = &MockCometClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCometClient) EXPECT() *MockCometClientMockRecorder { - return m.recorder -} - -// Tx mocks base method. -func (m *MockCometClient) Tx(ctx context.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Tx", ctx, hash, prove) - ret0, _ := ret[0].(*coretypes.ResultTx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Tx indicates an expected call of Tx. -func (mr *MockCometClientMockRecorder) Tx(ctx, hash, prove interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tx", reflect.TypeOf((*MockCometClient)(nil).Tx), ctx, hash, prove) -} diff --git a/x/zoneconcierge/types/msg.go b/x/zoneconcierge/types/msg.go deleted file mode 100644 index 782e3dcc..00000000 --- a/x/zoneconcierge/types/msg.go +++ /dev/null @@ -1,10 +0,0 @@ -package types - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// ensure that these message types implement the sdk.Msg interface -var ( - _ sdk.Msg = &MsgUpdateParams{} -) diff --git a/x/zoneconcierge/types/packet.pb.go b/x/zoneconcierge/types/packet.pb.go deleted file mode 100644 index 1ca32360..00000000 --- a/x/zoneconcierge/types/packet.pb.go +++ /dev/null @@ -1,905 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: babylon/zoneconcierge/v1/packet.proto - -package types - -import ( - fmt "fmt" - types3 "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - types "github.com/babylonlabs-io/babylon/x/btclightclient/types" - types2 "github.com/babylonlabs-io/babylon/x/checkpointing/types" - types1 "github.com/babylonlabs-io/babylon/x/epoching/types" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ZoneconciergePacketData is the message that defines the IBC packets of -// ZoneConcierge -type ZoneconciergePacketData struct { - // packet is the actual message carried in the IBC packet - // - // Types that are valid to be assigned to Packet: - // *ZoneconciergePacketData_BtcTimestamp - Packet isZoneconciergePacketData_Packet `protobuf_oneof:"packet"` -} - -func (m *ZoneconciergePacketData) Reset() { *m = ZoneconciergePacketData{} } -func (m *ZoneconciergePacketData) String() string { return proto.CompactTextString(m) } -func (*ZoneconciergePacketData) ProtoMessage() {} -func (*ZoneconciergePacketData) Descriptor() ([]byte, []int) { - return fileDescriptor_be12e124c5c4fdb9, []int{0} -} -func (m *ZoneconciergePacketData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ZoneconciergePacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ZoneconciergePacketData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ZoneconciergePacketData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ZoneconciergePacketData.Merge(m, src) -} -func (m *ZoneconciergePacketData) XXX_Size() int { - return m.Size() -} -func (m *ZoneconciergePacketData) XXX_DiscardUnknown() { - xxx_messageInfo_ZoneconciergePacketData.DiscardUnknown(m) -} - -var xxx_messageInfo_ZoneconciergePacketData proto.InternalMessageInfo - -type isZoneconciergePacketData_Packet interface { - isZoneconciergePacketData_Packet() - MarshalTo([]byte) (int, error) - Size() int -} - -type ZoneconciergePacketData_BtcTimestamp struct { - BtcTimestamp *BTCTimestamp `protobuf:"bytes,1,opt,name=btc_timestamp,json=btcTimestamp,proto3,oneof" json:"btc_timestamp,omitempty"` -} - -func (*ZoneconciergePacketData_BtcTimestamp) isZoneconciergePacketData_Packet() {} - -func (m *ZoneconciergePacketData) GetPacket() isZoneconciergePacketData_Packet { - if m != nil { - return m.Packet - } - return nil -} - -func (m *ZoneconciergePacketData) GetBtcTimestamp() *BTCTimestamp { - if x, ok := m.GetPacket().(*ZoneconciergePacketData_BtcTimestamp); ok { - return x.BtcTimestamp - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ZoneconciergePacketData) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ZoneconciergePacketData_BtcTimestamp)(nil), - } -} - -// BTCTimestamp is a BTC timestamp that carries information of a BTC-finalised epoch -// It includes a number of BTC headers, a raw checkpoint, an epoch metadata, and -// a CZ header if there exists CZ headers checkpointed to this epoch. -// Upon a newly finalised epoch in Babylon, Babylon will send a BTC timestamp to each -// Cosmos zone that has phase-2 integration with Babylon via IBC. -type BTCTimestamp struct { - // header is the last CZ header in the finalized Babylon epoch - Header *IndexedHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // btc_headers is BTC headers between - // - the block AFTER the common ancestor of BTC tip at epoch `lastFinalizedEpoch-1` and BTC tip at epoch `lastFinalizedEpoch` - // - BTC tip at epoch `lastFinalizedEpoch` - // where `lastFinalizedEpoch` is the last finalised epoch in Babylon - BtcHeaders []*types.BTCHeaderInfo `protobuf:"bytes,2,rep,name=btc_headers,json=btcHeaders,proto3" json:"btc_headers,omitempty"` - // epoch_info is the metadata of the sealed epoch - EpochInfo *types1.Epoch `protobuf:"bytes,3,opt,name=epoch_info,json=epochInfo,proto3" json:"epoch_info,omitempty"` - // raw_checkpoint is the raw checkpoint that seals this epoch - RawCheckpoint *types2.RawCheckpoint `protobuf:"bytes,4,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` - // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch - BtcSubmissionKey *types3.SubmissionKey `protobuf:"bytes,5,opt,name=btc_submission_key,json=btcSubmissionKey,proto3" json:"btc_submission_key,omitempty"` - // - //Proofs that the header is finalized - Proof *ProofFinalizedChainInfo `protobuf:"bytes,6,opt,name=proof,proto3" json:"proof,omitempty"` -} - -func (m *BTCTimestamp) Reset() { *m = BTCTimestamp{} } -func (m *BTCTimestamp) String() string { return proto.CompactTextString(m) } -func (*BTCTimestamp) ProtoMessage() {} -func (*BTCTimestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_be12e124c5c4fdb9, []int{1} -} -func (m *BTCTimestamp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BTCTimestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BTCTimestamp.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BTCTimestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_BTCTimestamp.Merge(m, src) -} -func (m *BTCTimestamp) XXX_Size() int { - return m.Size() -} -func (m *BTCTimestamp) XXX_DiscardUnknown() { - xxx_messageInfo_BTCTimestamp.DiscardUnknown(m) -} - -var xxx_messageInfo_BTCTimestamp proto.InternalMessageInfo - -func (m *BTCTimestamp) GetHeader() *IndexedHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *BTCTimestamp) GetBtcHeaders() []*types.BTCHeaderInfo { - if m != nil { - return m.BtcHeaders - } - return nil -} - -func (m *BTCTimestamp) GetEpochInfo() *types1.Epoch { - if m != nil { - return m.EpochInfo - } - return nil -} - -func (m *BTCTimestamp) GetRawCheckpoint() *types2.RawCheckpoint { - if m != nil { - return m.RawCheckpoint - } - return nil -} - -func (m *BTCTimestamp) GetBtcSubmissionKey() *types3.SubmissionKey { - if m != nil { - return m.BtcSubmissionKey - } - return nil -} - -func (m *BTCTimestamp) GetProof() *ProofFinalizedChainInfo { - if m != nil { - return m.Proof - } - return nil -} - -func init() { - proto.RegisterType((*ZoneconciergePacketData)(nil), "babylon.zoneconcierge.v1.ZoneconciergePacketData") - proto.RegisterType((*BTCTimestamp)(nil), "babylon.zoneconcierge.v1.BTCTimestamp") -} - -func init() { - proto.RegisterFile("babylon/zoneconcierge/v1/packet.proto", fileDescriptor_be12e124c5c4fdb9) -} - -var fileDescriptor_be12e124c5c4fdb9 = []byte{ - // 474 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x6f, 0xd3, 0x30, - 0x18, 0xc6, 0x1b, 0xca, 0x2a, 0x70, 0x37, 0x84, 0x7c, 0x21, 0xda, 0x21, 0x9a, 0x2a, 0x01, 0x45, - 0x1a, 0x8e, 0x32, 0x0e, 0x88, 0x13, 0x52, 0xcb, 0x9f, 0x55, 0x08, 0x34, 0xc2, 0xb8, 0xec, 0x52, - 0xd9, 0xee, 0xdb, 0xc6, 0x6a, 0x6b, 0x47, 0x89, 0xd7, 0xad, 0xfb, 0x14, 0x7c, 0x29, 0x24, 0x8e, - 0x3b, 0x72, 0x44, 0xed, 0x17, 0x41, 0x76, 0xfe, 0x2c, 0xe9, 0x94, 0x4b, 0xe4, 0xf7, 0xc9, 0xcf, - 0x8f, 0xfd, 0x3e, 0x7e, 0xd1, 0x73, 0x46, 0xd9, 0x7a, 0xa1, 0xa4, 0x7f, 0xa3, 0x24, 0x70, 0x25, - 0xb9, 0x80, 0x64, 0x06, 0xfe, 0x2a, 0xf0, 0x63, 0xca, 0xe7, 0xa0, 0x49, 0x9c, 0x28, 0xad, 0xb0, - 0x9b, 0x63, 0xa4, 0x86, 0x91, 0x55, 0x70, 0x78, 0x5c, 0x18, 0x30, 0xcd, 0x79, 0x04, 0x7c, 0x1e, - 0x2b, 0x21, 0xb5, 0x31, 0xa8, 0x09, 0x99, 0xcf, 0xe1, 0xab, 0x82, 0xbe, 0xfb, 0x23, 0xe4, 0xcc, - 0xd0, 0xf7, 0x50, 0x52, 0x31, 0x5e, 0x88, 0x59, 0x64, 0xbe, 0x50, 0x3a, 0x57, 0x94, 0x9c, 0xef, - 0x15, 0x3c, 0xc4, 0x8a, 0x47, 0xb9, 0x6b, 0xb1, 0xce, 0x99, 0xe3, 0xc6, 0x6e, 0xeb, 0x7d, 0x59, - 0xba, 0x97, 0xa0, 0x67, 0x17, 0x55, 0xf9, 0xcc, 0x26, 0xf2, 0x81, 0x6a, 0x8a, 0xbf, 0xa2, 0x03, - 0xa6, 0xf9, 0x58, 0x8b, 0x25, 0xa4, 0x9a, 0x2e, 0x63, 0xd7, 0x39, 0x72, 0xfa, 0xdd, 0x93, 0x17, - 0xa4, 0x29, 0x27, 0x32, 0x38, 0x1f, 0x9e, 0x17, 0xf4, 0x69, 0x2b, 0xdc, 0x67, 0x9a, 0x97, 0xf5, - 0xe0, 0x11, 0xea, 0x64, 0x71, 0xf7, 0x7e, 0xb7, 0xd1, 0x7e, 0x15, 0xc5, 0xef, 0x51, 0x27, 0x02, - 0x3a, 0x81, 0x24, 0x3f, 0xe2, 0x65, 0xf3, 0x11, 0x23, 0x39, 0x81, 0x6b, 0x98, 0x9c, 0x5a, 0x3c, - 0xcc, 0xb7, 0xe1, 0x11, 0xea, 0x9a, 0xab, 0x66, 0x55, 0xea, 0x3e, 0x38, 0x6a, 0xf7, 0xbb, 0x27, - 0xfd, 0xd2, 0x65, 0x27, 0xcb, 0xec, 0xa6, 0x99, 0xc5, 0x48, 0x4e, 0x55, 0x88, 0x98, 0xe6, 0x59, - 0x99, 0xe2, 0x77, 0x08, 0xd9, 0x40, 0xc7, 0x42, 0x4e, 0x95, 0xdb, 0xb6, 0xf7, 0x29, 0xdf, 0x89, - 0x94, 0x59, 0xaf, 0x02, 0xf2, 0xd1, 0xac, 0xc3, 0xc7, 0x56, 0x32, 0x36, 0xf8, 0x1b, 0x7a, 0x92, - 0xd0, 0xab, 0xf1, 0xdd, 0x2b, 0xbb, 0x0f, 0x77, 0xda, 0xa9, 0x4d, 0x84, 0xf1, 0x08, 0xe9, 0xd5, - 0xb0, 0xd4, 0xc2, 0x83, 0xa4, 0x5a, 0xe2, 0x9f, 0x08, 0x9b, 0xae, 0xd2, 0x4b, 0xb6, 0x14, 0x69, - 0x2a, 0x94, 0x1c, 0xcf, 0x61, 0xed, 0xee, 0xed, 0x78, 0xd6, 0x47, 0x70, 0x15, 0x90, 0x1f, 0x25, - 0xff, 0x05, 0xd6, 0xe1, 0x53, 0xa6, 0x79, 0x4d, 0xc1, 0x9f, 0xd1, 0x5e, 0x9c, 0x28, 0x35, 0x75, - 0x3b, 0xd6, 0x29, 0x68, 0x0e, 0xfb, 0xcc, 0x60, 0x9f, 0x84, 0xa4, 0x0b, 0x71, 0x03, 0x93, 0x61, - 0x44, 0x85, 0xb4, 0x79, 0x65, 0xfb, 0x07, 0xdf, 0xff, 0x6c, 0x3c, 0xe7, 0x76, 0xe3, 0x39, 0xff, - 0x36, 0x9e, 0xf3, 0x6b, 0xeb, 0xb5, 0x6e, 0xb7, 0x5e, 0xeb, 0xef, 0xd6, 0x6b, 0x5d, 0xbc, 0x9d, - 0x09, 0x1d, 0x5d, 0x32, 0xc2, 0xd5, 0xd2, 0xcf, 0xdd, 0x17, 0x94, 0xa5, 0xaf, 0x85, 0x2a, 0x4a, - 0xff, 0x7a, 0x67, 0x3e, 0xf5, 0x3a, 0x86, 0x94, 0x75, 0xec, 0x54, 0xbe, 0xf9, 0x1f, 0x00, 0x00, - 0xff, 0xff, 0x7d, 0x8d, 0x6b, 0xdb, 0xb3, 0x03, 0x00, 0x00, -} - -func (m *ZoneconciergePacketData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ZoneconciergePacketData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ZoneconciergePacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Packet != nil { - { - size := m.Packet.Size() - i -= size - if _, err := m.Packet.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *ZoneconciergePacketData_BtcTimestamp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ZoneconciergePacketData_BtcTimestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.BtcTimestamp != nil { - { - size, err := m.BtcTimestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPacket(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *BTCTimestamp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BTCTimestamp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BTCTimestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Proof != nil { - { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPacket(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.BtcSubmissionKey != nil { - { - size, err := m.BtcSubmissionKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPacket(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.RawCheckpoint != nil { - { - size, err := m.RawCheckpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPacket(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.EpochInfo != nil { - { - size, err := m.EpochInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPacket(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.BtcHeaders) > 0 { - for iNdEx := len(m.BtcHeaders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.BtcHeaders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPacket(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPacket(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintPacket(dAtA []byte, offset int, v uint64) int { - offset -= sovPacket(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ZoneconciergePacketData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Packet != nil { - n += m.Packet.Size() - } - return n -} - -func (m *ZoneconciergePacketData_BtcTimestamp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BtcTimestamp != nil { - l = m.BtcTimestamp.Size() - n += 1 + l + sovPacket(uint64(l)) - } - return n -} -func (m *BTCTimestamp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovPacket(uint64(l)) - } - if len(m.BtcHeaders) > 0 { - for _, e := range m.BtcHeaders { - l = e.Size() - n += 1 + l + sovPacket(uint64(l)) - } - } - if m.EpochInfo != nil { - l = m.EpochInfo.Size() - n += 1 + l + sovPacket(uint64(l)) - } - if m.RawCheckpoint != nil { - l = m.RawCheckpoint.Size() - n += 1 + l + sovPacket(uint64(l)) - } - if m.BtcSubmissionKey != nil { - l = m.BtcSubmissionKey.Size() - n += 1 + l + sovPacket(uint64(l)) - } - if m.Proof != nil { - l = m.Proof.Size() - n += 1 + l + sovPacket(uint64(l)) - } - return n -} - -func sovPacket(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPacket(x uint64) (n int) { - return sovPacket(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ZoneconciergePacketData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPacket - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ZoneconciergePacketData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ZoneconciergePacketData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BtcTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPacket - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPacket - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPacket - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &BTCTimestamp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Packet = &ZoneconciergePacketData_BtcTimestamp{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPacket(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPacket - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BTCTimestamp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPacket - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BTCTimestamp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BTCTimestamp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPacket - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPacket - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPacket - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &IndexedHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BtcHeaders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPacket - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPacket - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPacket - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BtcHeaders = append(m.BtcHeaders, &types.BTCHeaderInfo{}) - if err := m.BtcHeaders[len(m.BtcHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPacket - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPacket - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPacket - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EpochInfo == nil { - m.EpochInfo = &types1.Epoch{} - } - if err := m.EpochInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPacket - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPacket - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPacket - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RawCheckpoint == nil { - m.RawCheckpoint = &types2.RawCheckpoint{} - } - if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BtcSubmissionKey", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPacket - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPacket - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPacket - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BtcSubmissionKey == nil { - m.BtcSubmissionKey = &types3.SubmissionKey{} - } - if err := m.BtcSubmissionKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPacket - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPacket - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPacket - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Proof == nil { - m.Proof = &ProofFinalizedChainInfo{} - } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPacket(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPacket - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPacket(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPacket - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPacket - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPacket - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPacket - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPacket - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPacket - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPacket = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPacket = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPacket = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/zoneconcierge/types/params.go b/x/zoneconcierge/types/params.go deleted file mode 100644 index 298ade2a..00000000 --- a/x/zoneconcierge/types/params.go +++ /dev/null @@ -1,34 +0,0 @@ -package types - -import ( - "fmt" -) - -const ( - DefaultIbcPacketTimeoutSeconds uint32 = 60 * 60 * 24 // 24 hours - MaxIbcPacketTimeoutSeconds uint32 = 60 * 60 * 24 * 365 // 1 year -) - -// NewParams creates a new Params instance -func NewParams(ibcPacketTimeoutSeconds uint32) Params { - return Params{ - IbcPacketTimeoutSeconds: ibcPacketTimeoutSeconds, - } -} - -// DefaultParams returns a default set of parameters -func DefaultParams() Params { - return NewParams(DefaultIbcPacketTimeoutSeconds) -} - -// Validate validates the set of params -func (p Params) Validate() error { - if p.IbcPacketTimeoutSeconds == 0 { - return fmt.Errorf("IbcPacketTimeoutSeconds must be positive") - } - if p.IbcPacketTimeoutSeconds > MaxIbcPacketTimeoutSeconds { - return fmt.Errorf("IbcPacketTimeoutSeconds must be no larger than %d", MaxIbcPacketTimeoutSeconds) - } - - return nil -} diff --git a/x/zoneconcierge/types/params.pb.go b/x/zoneconcierge/types/params.pb.go deleted file mode 100644 index 732cfcec..00000000 --- a/x/zoneconcierge/types/params.pb.go +++ /dev/null @@ -1,333 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: babylon/zoneconcierge/v1/params.proto - -package types - -import ( - fmt "fmt" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Params defines the parameters for the module. -type Params struct { - // ibc_packet_timeout_seconds is the time period after which an unrelayed - // IBC packet becomes timeout, measured in seconds - IbcPacketTimeoutSeconds uint32 `protobuf:"varint,1,opt,name=ibc_packet_timeout_seconds,json=ibcPacketTimeoutSeconds,proto3" json:"ibc_packet_timeout_seconds,omitempty" yaml:"ibc_packet_timeout_seconds"` -} - -func (m *Params) Reset() { *m = Params{} } -func (m *Params) String() string { return proto.CompactTextString(m) } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_c0696c936eb15fe4, []int{0} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func (m *Params) GetIbcPacketTimeoutSeconds() uint32 { - if m != nil { - return m.IbcPacketTimeoutSeconds - } - return 0 -} - -func init() { - proto.RegisterType((*Params)(nil), "babylon.zoneconcierge.v1.Params") -} - -func init() { - proto.RegisterFile("babylon/zoneconcierge/v1/params.proto", fileDescriptor_c0696c936eb15fe4) -} - -var fileDescriptor_c0696c936eb15fe4 = []byte{ - // 229 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0x4a, 0x4c, 0xaa, - 0xcc, 0xc9, 0xcf, 0xd3, 0xaf, 0xca, 0xcf, 0x4b, 0x4d, 0xce, 0xcf, 0x4b, 0xce, 0x4c, 0x2d, 0x4a, - 0x4f, 0xd5, 0x2f, 0x33, 0xd4, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x92, 0x80, 0x2a, 0xd3, 0x43, 0x51, 0xa6, 0x57, 0x66, 0x28, 0x25, 0x92, 0x9e, 0x9f, - 0x9e, 0x0f, 0x56, 0xa4, 0x0f, 0x62, 0x41, 0xd4, 0x2b, 0x15, 0x71, 0xb1, 0x05, 0x80, 0xf5, 0x0b, - 0x25, 0x71, 0x49, 0x65, 0x26, 0x25, 0xc7, 0x17, 0x24, 0x26, 0x67, 0xa7, 0x96, 0xc4, 0x97, 0x64, - 0xe6, 0xa6, 0xe6, 0x97, 0x96, 0xc4, 0x17, 0x83, 0x0c, 0x49, 0x29, 0x96, 0x60, 0x54, 0x60, 0xd4, - 0xe0, 0x75, 0x52, 0xfd, 0x74, 0x4f, 0x5e, 0xb1, 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0xb7, 0x5a, - 0xa5, 0x20, 0xf1, 0xcc, 0xa4, 0xe4, 0x00, 0xb0, 0x5c, 0x08, 0x44, 0x2a, 0x18, 0x22, 0x63, 0xc5, - 0xf2, 0x62, 0x81, 0x3c, 0xa3, 0x53, 0xe0, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, - 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, - 0x44, 0x99, 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x43, 0x3d, 0x92, - 0x93, 0x98, 0x54, 0xac, 0x9b, 0x99, 0x0f, 0xe3, 0xea, 0x57, 0xa0, 0x05, 0x40, 0x49, 0x65, 0x41, - 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x37, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x2b, 0x77, - 0x59, 0x26, 0x01, 0x00, 0x00, -} - -func (this *Params) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Params) - if !ok { - that2, ok := that.(Params) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.IbcPacketTimeoutSeconds != that1.IbcPacketTimeoutSeconds { - return false - } - return true -} -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.IbcPacketTimeoutSeconds != 0 { - i = encodeVarintParams(dAtA, i, uint64(m.IbcPacketTimeoutSeconds)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IbcPacketTimeoutSeconds != 0 { - n += 1 + sovParams(uint64(m.IbcPacketTimeoutSeconds)) - } - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IbcPacketTimeoutSeconds", wireType) - } - m.IbcPacketTimeoutSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IbcPacketTimeoutSeconds |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/zoneconcierge/types/params_test.go b/x/zoneconcierge/types/params_test.go deleted file mode 100644 index 863b9cb3..00000000 --- a/x/zoneconcierge/types/params_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package types_test - -import ( - "testing" - - "github.com/babylonlabs-io/babylon/x/zoneconcierge/types" - "github.com/stretchr/testify/require" -) - -func TestParamsEqual(t *testing.T) { - p1 := types.DefaultParams() - p2 := types.DefaultParams() - - ok := p1.Equal(p2) - require.True(t, ok) - - p2.IbcPacketTimeoutSeconds = 100 - - ok = p1.Equal(p2) - require.False(t, ok) -} diff --git a/x/zoneconcierge/types/query.pb.go b/x/zoneconcierge/types/query.pb.go deleted file mode 100644 index ae44fc9d..00000000 --- a/x/zoneconcierge/types/query.pb.go +++ /dev/null @@ -1,4483 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: babylon/zoneconcierge/v1/query.proto - -package types - -import ( - context "context" - fmt "fmt" - types2 "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - types1 "github.com/babylonlabs-io/babylon/x/checkpointing/types" - types "github.com/babylonlabs-io/babylon/x/epoching/types" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/cosmos/gogoproto/gogoproto" - grpc1 "github.com/cosmos/gogoproto/grpc" - proto "github.com/cosmos/gogoproto/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryParamsRequest is the request type for the Query/Params RPC method. -type QueryParamsRequest struct { -} - -func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } -func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryParamsRequest) ProtoMessage() {} -func (*QueryParamsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{0} -} -func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryParamsRequest.Merge(m, src) -} -func (m *QueryParamsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryParamsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo - -// QueryParamsResponse is the response type for the Query/Params RPC method. -type QueryParamsResponse struct { - // params holds all the parameters of this module. - Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` -} - -func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } -func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryParamsResponse) ProtoMessage() {} -func (*QueryParamsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{1} -} -func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryParamsResponse.Merge(m, src) -} -func (m *QueryParamsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryParamsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo - -func (m *QueryParamsResponse) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -// QueryHeaderRequest is request type for the Query/Header RPC method. -type QueryHeaderRequest struct { - ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"` - Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` -} - -func (m *QueryHeaderRequest) Reset() { *m = QueryHeaderRequest{} } -func (m *QueryHeaderRequest) String() string { return proto.CompactTextString(m) } -func (*QueryHeaderRequest) ProtoMessage() {} -func (*QueryHeaderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{2} -} -func (m *QueryHeaderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryHeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryHeaderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryHeaderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryHeaderRequest.Merge(m, src) -} -func (m *QueryHeaderRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryHeaderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryHeaderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryHeaderRequest proto.InternalMessageInfo - -func (m *QueryHeaderRequest) GetConsumerId() string { - if m != nil { - return m.ConsumerId - } - return "" -} - -func (m *QueryHeaderRequest) GetHeight() uint64 { - if m != nil { - return m.Height - } - return 0 -} - -// QueryHeaderResponse is response type for the Query/Header RPC method. -type QueryHeaderResponse struct { - Header *IndexedHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - ForkHeaders *Forks `protobuf:"bytes,2,opt,name=fork_headers,json=forkHeaders,proto3" json:"fork_headers,omitempty"` -} - -func (m *QueryHeaderResponse) Reset() { *m = QueryHeaderResponse{} } -func (m *QueryHeaderResponse) String() string { return proto.CompactTextString(m) } -func (*QueryHeaderResponse) ProtoMessage() {} -func (*QueryHeaderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{3} -} -func (m *QueryHeaderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryHeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryHeaderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryHeaderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryHeaderResponse.Merge(m, src) -} -func (m *QueryHeaderResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryHeaderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryHeaderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryHeaderResponse proto.InternalMessageInfo - -func (m *QueryHeaderResponse) GetHeader() *IndexedHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *QueryHeaderResponse) GetForkHeaders() *Forks { - if m != nil { - return m.ForkHeaders - } - return nil -} - -// QueryChainListRequest is request type for the Query/ChainList RPC method -type QueryChainListRequest struct { - // pagination defines whether to have the pagination in the request - Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryChainListRequest) Reset() { *m = QueryChainListRequest{} } -func (m *QueryChainListRequest) String() string { return proto.CompactTextString(m) } -func (*QueryChainListRequest) ProtoMessage() {} -func (*QueryChainListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{4} -} -func (m *QueryChainListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryChainListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryChainListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryChainListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryChainListRequest.Merge(m, src) -} -func (m *QueryChainListRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryChainListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryChainListRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryChainListRequest proto.InternalMessageInfo - -func (m *QueryChainListRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryChainListResponse is response type for the Query/ChainList RPC method -type QueryChainListResponse struct { - // consumer_ids are IDs of the chains in ascending alphabetical order - ConsumerIds []string `protobuf:"bytes,1,rep,name=consumer_ids,json=consumerIds,proto3" json:"consumer_ids,omitempty"` - // pagination defines the pagination in the response - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryChainListResponse) Reset() { *m = QueryChainListResponse{} } -func (m *QueryChainListResponse) String() string { return proto.CompactTextString(m) } -func (*QueryChainListResponse) ProtoMessage() {} -func (*QueryChainListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{5} -} -func (m *QueryChainListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryChainListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryChainListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryChainListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryChainListResponse.Merge(m, src) -} -func (m *QueryChainListResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryChainListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryChainListResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryChainListResponse proto.InternalMessageInfo - -func (m *QueryChainListResponse) GetConsumerIds() []string { - if m != nil { - return m.ConsumerIds - } - return nil -} - -func (m *QueryChainListResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryChainsInfoRequest is request type for the Query/ChainsInfo RPC method. -type QueryChainsInfoRequest struct { - ConsumerIds []string `protobuf:"bytes,1,rep,name=consumer_ids,json=consumerIds,proto3" json:"consumer_ids,omitempty"` -} - -func (m *QueryChainsInfoRequest) Reset() { *m = QueryChainsInfoRequest{} } -func (m *QueryChainsInfoRequest) String() string { return proto.CompactTextString(m) } -func (*QueryChainsInfoRequest) ProtoMessage() {} -func (*QueryChainsInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{6} -} -func (m *QueryChainsInfoRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryChainsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryChainsInfoRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryChainsInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryChainsInfoRequest.Merge(m, src) -} -func (m *QueryChainsInfoRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryChainsInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryChainsInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryChainsInfoRequest proto.InternalMessageInfo - -func (m *QueryChainsInfoRequest) GetConsumerIds() []string { - if m != nil { - return m.ConsumerIds - } - return nil -} - -// QueryChainsInfoResponse is response type for the Query/ChainsInfo RPC method. -type QueryChainsInfoResponse struct { - ChainsInfo []*ChainInfo `protobuf:"bytes,1,rep,name=chains_info,json=chainsInfo,proto3" json:"chains_info,omitempty"` -} - -func (m *QueryChainsInfoResponse) Reset() { *m = QueryChainsInfoResponse{} } -func (m *QueryChainsInfoResponse) String() string { return proto.CompactTextString(m) } -func (*QueryChainsInfoResponse) ProtoMessage() {} -func (*QueryChainsInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{7} -} -func (m *QueryChainsInfoResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryChainsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryChainsInfoResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryChainsInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryChainsInfoResponse.Merge(m, src) -} -func (m *QueryChainsInfoResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryChainsInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryChainsInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryChainsInfoResponse proto.InternalMessageInfo - -func (m *QueryChainsInfoResponse) GetChainsInfo() []*ChainInfo { - if m != nil { - return m.ChainsInfo - } - return nil -} - -// QueryEpochChainsInfoRequest is request type for the Query/EpochChainsInfo RPC -// method. -type QueryEpochChainsInfoRequest struct { - EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` - ConsumerIds []string `protobuf:"bytes,2,rep,name=consumer_ids,json=consumerIds,proto3" json:"consumer_ids,omitempty"` -} - -func (m *QueryEpochChainsInfoRequest) Reset() { *m = QueryEpochChainsInfoRequest{} } -func (m *QueryEpochChainsInfoRequest) String() string { return proto.CompactTextString(m) } -func (*QueryEpochChainsInfoRequest) ProtoMessage() {} -func (*QueryEpochChainsInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{8} -} -func (m *QueryEpochChainsInfoRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryEpochChainsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryEpochChainsInfoRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryEpochChainsInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryEpochChainsInfoRequest.Merge(m, src) -} -func (m *QueryEpochChainsInfoRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryEpochChainsInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryEpochChainsInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryEpochChainsInfoRequest proto.InternalMessageInfo - -func (m *QueryEpochChainsInfoRequest) GetEpochNum() uint64 { - if m != nil { - return m.EpochNum - } - return 0 -} - -func (m *QueryEpochChainsInfoRequest) GetConsumerIds() []string { - if m != nil { - return m.ConsumerIds - } - return nil -} - -// QueryEpochChainsInfoResponse is response type for the Query/EpochChainsInfo RPC -// method. -type QueryEpochChainsInfoResponse struct { - // chain_info is the info of the CZ - ChainsInfo []*ChainInfo `protobuf:"bytes,1,rep,name=chains_info,json=chainsInfo,proto3" json:"chains_info,omitempty"` -} - -func (m *QueryEpochChainsInfoResponse) Reset() { *m = QueryEpochChainsInfoResponse{} } -func (m *QueryEpochChainsInfoResponse) String() string { return proto.CompactTextString(m) } -func (*QueryEpochChainsInfoResponse) ProtoMessage() {} -func (*QueryEpochChainsInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{9} -} -func (m *QueryEpochChainsInfoResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryEpochChainsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryEpochChainsInfoResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryEpochChainsInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryEpochChainsInfoResponse.Merge(m, src) -} -func (m *QueryEpochChainsInfoResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryEpochChainsInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryEpochChainsInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryEpochChainsInfoResponse proto.InternalMessageInfo - -func (m *QueryEpochChainsInfoResponse) GetChainsInfo() []*ChainInfo { - if m != nil { - return m.ChainsInfo - } - return nil -} - -// QueryListHeadersRequest is request type for the Query/ListHeaders RPC method. -type QueryListHeadersRequest struct { - ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"` - // pagination defines whether to have the pagination in the request - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryListHeadersRequest) Reset() { *m = QueryListHeadersRequest{} } -func (m *QueryListHeadersRequest) String() string { return proto.CompactTextString(m) } -func (*QueryListHeadersRequest) ProtoMessage() {} -func (*QueryListHeadersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{10} -} -func (m *QueryListHeadersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryListHeadersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryListHeadersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryListHeadersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryListHeadersRequest.Merge(m, src) -} -func (m *QueryListHeadersRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryListHeadersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryListHeadersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryListHeadersRequest proto.InternalMessageInfo - -func (m *QueryListHeadersRequest) GetConsumerId() string { - if m != nil { - return m.ConsumerId - } - return "" -} - -func (m *QueryListHeadersRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryListHeadersResponse is response type for the Query/ListHeaders RPC -// method. -type QueryListHeadersResponse struct { - // headers is the list of headers - Headers []*IndexedHeader `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` - // pagination defines the pagination in the response - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryListHeadersResponse) Reset() { *m = QueryListHeadersResponse{} } -func (m *QueryListHeadersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryListHeadersResponse) ProtoMessage() {} -func (*QueryListHeadersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{11} -} -func (m *QueryListHeadersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryListHeadersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryListHeadersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryListHeadersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryListHeadersResponse.Merge(m, src) -} -func (m *QueryListHeadersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryListHeadersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryListHeadersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryListHeadersResponse proto.InternalMessageInfo - -func (m *QueryListHeadersResponse) GetHeaders() []*IndexedHeader { - if m != nil { - return m.Headers - } - return nil -} - -func (m *QueryListHeadersResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryListEpochHeadersRequest is request type for the Query/ListEpochHeaders -// RPC method. -type QueryListEpochHeadersRequest struct { - EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` - ConsumerId string `protobuf:"bytes,2,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"` -} - -func (m *QueryListEpochHeadersRequest) Reset() { *m = QueryListEpochHeadersRequest{} } -func (m *QueryListEpochHeadersRequest) String() string { return proto.CompactTextString(m) } -func (*QueryListEpochHeadersRequest) ProtoMessage() {} -func (*QueryListEpochHeadersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{12} -} -func (m *QueryListEpochHeadersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryListEpochHeadersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryListEpochHeadersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryListEpochHeadersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryListEpochHeadersRequest.Merge(m, src) -} -func (m *QueryListEpochHeadersRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryListEpochHeadersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryListEpochHeadersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryListEpochHeadersRequest proto.InternalMessageInfo - -func (m *QueryListEpochHeadersRequest) GetEpochNum() uint64 { - if m != nil { - return m.EpochNum - } - return 0 -} - -func (m *QueryListEpochHeadersRequest) GetConsumerId() string { - if m != nil { - return m.ConsumerId - } - return "" -} - -// QueryListEpochHeadersResponse is response type for the Query/ListEpochHeaders -// RPC method. -type QueryListEpochHeadersResponse struct { - // headers is the list of headers - Headers []*IndexedHeader `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` -} - -func (m *QueryListEpochHeadersResponse) Reset() { *m = QueryListEpochHeadersResponse{} } -func (m *QueryListEpochHeadersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryListEpochHeadersResponse) ProtoMessage() {} -func (*QueryListEpochHeadersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{13} -} -func (m *QueryListEpochHeadersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryListEpochHeadersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryListEpochHeadersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryListEpochHeadersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryListEpochHeadersResponse.Merge(m, src) -} -func (m *QueryListEpochHeadersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryListEpochHeadersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryListEpochHeadersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryListEpochHeadersResponse proto.InternalMessageInfo - -func (m *QueryListEpochHeadersResponse) GetHeaders() []*IndexedHeader { - if m != nil { - return m.Headers - } - return nil -} - -// QueryFinalizedChainsInfoRequest is request type for the -// Query/FinalizedChainsInfo RPC method. -type QueryFinalizedChainsInfoRequest struct { - // consumer_ids is the list of ids of CZs - ConsumerIds []string `protobuf:"bytes,1,rep,name=consumer_ids,json=consumerIds,proto3" json:"consumer_ids,omitempty"` - // prove indicates whether the querier wants to get proofs of this timestamp - Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` -} - -func (m *QueryFinalizedChainsInfoRequest) Reset() { *m = QueryFinalizedChainsInfoRequest{} } -func (m *QueryFinalizedChainsInfoRequest) String() string { return proto.CompactTextString(m) } -func (*QueryFinalizedChainsInfoRequest) ProtoMessage() {} -func (*QueryFinalizedChainsInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{14} -} -func (m *QueryFinalizedChainsInfoRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryFinalizedChainsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryFinalizedChainsInfoRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryFinalizedChainsInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryFinalizedChainsInfoRequest.Merge(m, src) -} -func (m *QueryFinalizedChainsInfoRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryFinalizedChainsInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryFinalizedChainsInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryFinalizedChainsInfoRequest proto.InternalMessageInfo - -func (m *QueryFinalizedChainsInfoRequest) GetConsumerIds() []string { - if m != nil { - return m.ConsumerIds - } - return nil -} - -func (m *QueryFinalizedChainsInfoRequest) GetProve() bool { - if m != nil { - return m.Prove - } - return false -} - -// QueryFinalizedChainsInfoResponse is response type for the -// Query/FinalizedChainsInfo RPC method. -type QueryFinalizedChainsInfoResponse struct { - FinalizedChainsInfo []*FinalizedChainInfo `protobuf:"bytes,1,rep,name=finalized_chains_info,json=finalizedChainsInfo,proto3" json:"finalized_chains_info,omitempty"` -} - -func (m *QueryFinalizedChainsInfoResponse) Reset() { *m = QueryFinalizedChainsInfoResponse{} } -func (m *QueryFinalizedChainsInfoResponse) String() string { return proto.CompactTextString(m) } -func (*QueryFinalizedChainsInfoResponse) ProtoMessage() {} -func (*QueryFinalizedChainsInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{15} -} -func (m *QueryFinalizedChainsInfoResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryFinalizedChainsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryFinalizedChainsInfoResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryFinalizedChainsInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryFinalizedChainsInfoResponse.Merge(m, src) -} -func (m *QueryFinalizedChainsInfoResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryFinalizedChainsInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryFinalizedChainsInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryFinalizedChainsInfoResponse proto.InternalMessageInfo - -func (m *QueryFinalizedChainsInfoResponse) GetFinalizedChainsInfo() []*FinalizedChainInfo { - if m != nil { - return m.FinalizedChainsInfo - } - return nil -} - -// QueryFinalizedChainInfoUntilHeightRequest is request type for the -// Query/FinalizedChainInfoUntilHeight RPC method. -type QueryFinalizedChainInfoUntilHeightRequest struct { - // consumer_id is the ID of the CZ - ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"` - // height is the height of the CZ chain - // such that the returned finalised chain info will be no later than this - // height - Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - // prove indicates whether the querier wants to get proofs of this timestamp - Prove bool `protobuf:"varint,3,opt,name=prove,proto3" json:"prove,omitempty"` -} - -func (m *QueryFinalizedChainInfoUntilHeightRequest) Reset() { - *m = QueryFinalizedChainInfoUntilHeightRequest{} -} -func (m *QueryFinalizedChainInfoUntilHeightRequest) String() string { - return proto.CompactTextString(m) -} -func (*QueryFinalizedChainInfoUntilHeightRequest) ProtoMessage() {} -func (*QueryFinalizedChainInfoUntilHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{16} -} -func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest.Merge(m, src) -} -func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest proto.InternalMessageInfo - -func (m *QueryFinalizedChainInfoUntilHeightRequest) GetConsumerId() string { - if m != nil { - return m.ConsumerId - } - return "" -} - -func (m *QueryFinalizedChainInfoUntilHeightRequest) GetHeight() uint64 { - if m != nil { - return m.Height - } - return 0 -} - -func (m *QueryFinalizedChainInfoUntilHeightRequest) GetProve() bool { - if m != nil { - return m.Prove - } - return false -} - -// QueryFinalizedChainInfoUntilHeightResponse is response type for the -// Query/FinalizedChainInfoUntilHeight RPC method. -type QueryFinalizedChainInfoUntilHeightResponse struct { - // finalized_chain_info is the info of the CZ - FinalizedChainInfo *ChainInfo `protobuf:"bytes,1,opt,name=finalized_chain_info,json=finalizedChainInfo,proto3" json:"finalized_chain_info,omitempty"` - // epoch_info is the metadata of the last BTC-finalised epoch - EpochInfo *types.Epoch `protobuf:"bytes,2,opt,name=epoch_info,json=epochInfo,proto3" json:"epoch_info,omitempty"` - // raw_checkpoint is the raw checkpoint of this epoch - RawCheckpoint *types1.RawCheckpoint `protobuf:"bytes,3,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` - // btc_submission_key is position of two BTC txs that include the raw - // checkpoint of this epoch - BtcSubmissionKey *types2.SubmissionKey `protobuf:"bytes,4,opt,name=btc_submission_key,json=btcSubmissionKey,proto3" json:"btc_submission_key,omitempty"` - // proof is the proof that the chain info is finalized - Proof *ProofFinalizedChainInfo `protobuf:"bytes,5,opt,name=proof,proto3" json:"proof,omitempty"` -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) Reset() { - *m = QueryFinalizedChainInfoUntilHeightResponse{} -} -func (m *QueryFinalizedChainInfoUntilHeightResponse) String() string { - return proto.CompactTextString(m) -} -func (*QueryFinalizedChainInfoUntilHeightResponse) ProtoMessage() {} -func (*QueryFinalizedChainInfoUntilHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cd665af90102da38, []int{17} -} -func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse.Merge(m, src) -} -func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse proto.InternalMessageInfo - -func (m *QueryFinalizedChainInfoUntilHeightResponse) GetFinalizedChainInfo() *ChainInfo { - if m != nil { - return m.FinalizedChainInfo - } - return nil -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) GetEpochInfo() *types.Epoch { - if m != nil { - return m.EpochInfo - } - return nil -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) GetRawCheckpoint() *types1.RawCheckpoint { - if m != nil { - return m.RawCheckpoint - } - return nil -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) GetBtcSubmissionKey() *types2.SubmissionKey { - if m != nil { - return m.BtcSubmissionKey - } - return nil -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProof() *ProofFinalizedChainInfo { - if m != nil { - return m.Proof - } - return nil -} - -func init() { - proto.RegisterType((*QueryParamsRequest)(nil), "babylon.zoneconcierge.v1.QueryParamsRequest") - proto.RegisterType((*QueryParamsResponse)(nil), "babylon.zoneconcierge.v1.QueryParamsResponse") - proto.RegisterType((*QueryHeaderRequest)(nil), "babylon.zoneconcierge.v1.QueryHeaderRequest") - proto.RegisterType((*QueryHeaderResponse)(nil), "babylon.zoneconcierge.v1.QueryHeaderResponse") - proto.RegisterType((*QueryChainListRequest)(nil), "babylon.zoneconcierge.v1.QueryChainListRequest") - proto.RegisterType((*QueryChainListResponse)(nil), "babylon.zoneconcierge.v1.QueryChainListResponse") - proto.RegisterType((*QueryChainsInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryChainsInfoRequest") - proto.RegisterType((*QueryChainsInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryChainsInfoResponse") - proto.RegisterType((*QueryEpochChainsInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryEpochChainsInfoRequest") - proto.RegisterType((*QueryEpochChainsInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryEpochChainsInfoResponse") - proto.RegisterType((*QueryListHeadersRequest)(nil), "babylon.zoneconcierge.v1.QueryListHeadersRequest") - proto.RegisterType((*QueryListHeadersResponse)(nil), "babylon.zoneconcierge.v1.QueryListHeadersResponse") - proto.RegisterType((*QueryListEpochHeadersRequest)(nil), "babylon.zoneconcierge.v1.QueryListEpochHeadersRequest") - proto.RegisterType((*QueryListEpochHeadersResponse)(nil), "babylon.zoneconcierge.v1.QueryListEpochHeadersResponse") - proto.RegisterType((*QueryFinalizedChainsInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainsInfoRequest") - proto.RegisterType((*QueryFinalizedChainsInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainsInfoResponse") - proto.RegisterType((*QueryFinalizedChainInfoUntilHeightRequest)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoUntilHeightRequest") - proto.RegisterType((*QueryFinalizedChainInfoUntilHeightResponse)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoUntilHeightResponse") -} - -func init() { - proto.RegisterFile("babylon/zoneconcierge/v1/query.proto", fileDescriptor_cd665af90102da38) -} - -var fileDescriptor_cd665af90102da38 = []byte{ - // 1180 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcd, 0x6f, 0xdc, 0x44, - 0x14, 0x8f, 0x37, 0x1f, 0x34, 0x6f, 0x5b, 0xa8, 0x26, 0x69, 0x59, 0xb9, 0xed, 0x26, 0x18, 0x4a, - 0xd3, 0xd2, 0xd8, 0x6c, 0x2a, 0x1a, 0x15, 0x24, 0x10, 0x49, 0x48, 0x1b, 0x28, 0x51, 0x6b, 0xc8, - 0xa5, 0x02, 0x2d, 0xb6, 0x77, 0x76, 0xd7, 0x4a, 0xd6, 0xb3, 0xf5, 0x78, 0xb7, 0x4d, 0xa2, 0x48, - 0xa8, 0xe2, 0x4e, 0x25, 0x2e, 0x88, 0x13, 0x27, 0x6e, 0x20, 0x71, 0xe0, 0x4f, 0x40, 0xea, 0x8d, - 0x4a, 0x5c, 0x38, 0x21, 0x94, 0xf0, 0x87, 0x54, 0x9e, 0x19, 0xef, 0xfa, 0x33, 0x76, 0xd2, 0xdc, - 0xd6, 0x33, 0xef, 0xfd, 0xde, 0xef, 0x7d, 0xcd, 0x7b, 0x0b, 0x6f, 0x99, 0x86, 0xb9, 0xbd, 0x45, - 0x1c, 0x6d, 0x87, 0x38, 0xd8, 0x22, 0x8e, 0x65, 0x63, 0xb7, 0x85, 0xb5, 0x7e, 0x4d, 0x7b, 0xd8, - 0xc3, 0xee, 0xb6, 0xda, 0x75, 0x89, 0x47, 0x50, 0x45, 0x48, 0xa9, 0x11, 0x29, 0xb5, 0x5f, 0x93, - 0xa7, 0x5b, 0xa4, 0x45, 0x98, 0x90, 0xe6, 0xff, 0xe2, 0xf2, 0xf2, 0xc5, 0x16, 0x21, 0xad, 0x2d, - 0xac, 0x19, 0x5d, 0x5b, 0x33, 0x1c, 0x87, 0x78, 0x86, 0x67, 0x13, 0x87, 0x8a, 0xdb, 0x6b, 0x16, - 0xa1, 0x1d, 0x42, 0x35, 0xd3, 0xa0, 0x98, 0x9b, 0xd1, 0xfa, 0x35, 0x13, 0x7b, 0x46, 0x4d, 0xeb, - 0x1a, 0x2d, 0xdb, 0x61, 0xc2, 0x42, 0xf6, 0x7a, 0xc0, 0xcf, 0xf4, 0x2c, 0xab, 0x8d, 0xad, 0xcd, - 0x2e, 0xb1, 0x1d, 0xcf, 0xe7, 0x17, 0x39, 0x10, 0xd2, 0x57, 0x03, 0xe9, 0xe1, 0x8d, 0xed, 0xb4, - 0x7c, 0xe9, 0x84, 0xa8, 0x12, 0x88, 0xe2, 0x2e, 0xb1, 0xda, 0x42, 0x2a, 0xf8, 0x1d, 0x37, 0x9e, - 0x08, 0x4e, 0x34, 0x0e, 0x5c, 0xfa, 0x72, 0xa6, 0x74, 0xd7, 0x70, 0x8d, 0x8e, 0xf0, 0x5e, 0x99, - 0x06, 0x74, 0xdf, 0xf7, 0xf9, 0x1e, 0x3b, 0xd4, 0xf1, 0xc3, 0x1e, 0xa6, 0x9e, 0xb2, 0x01, 0x53, - 0x91, 0x53, 0xda, 0x25, 0x0e, 0xc5, 0xe8, 0x43, 0x98, 0xe0, 0xca, 0x15, 0x69, 0x56, 0x9a, 0x2b, - 0x2f, 0xcc, 0xaa, 0x59, 0x99, 0x50, 0xb9, 0xe6, 0xd2, 0xd8, 0xb3, 0x7f, 0x67, 0x46, 0x74, 0xa1, - 0xa5, 0x7c, 0x2e, 0x8c, 0xdd, 0xc1, 0x46, 0x03, 0xbb, 0xc2, 0x18, 0x9a, 0x81, 0xb2, 0x45, 0x1c, - 0xda, 0xeb, 0x60, 0xb7, 0x6e, 0x37, 0x18, 0xf4, 0xa4, 0x0e, 0xc1, 0xd1, 0x5a, 0x03, 0x9d, 0x87, - 0x89, 0x36, 0xb6, 0x5b, 0x6d, 0xaf, 0x52, 0x9a, 0x95, 0xe6, 0xc6, 0x74, 0xf1, 0xa5, 0xfc, 0x24, - 0x09, 0x9a, 0x01, 0x9e, 0xa0, 0xf9, 0x91, 0x2f, 0xef, 0x9f, 0x08, 0x9a, 0x57, 0xb2, 0x69, 0xae, - 0x39, 0x0d, 0xfc, 0x18, 0x37, 0x04, 0x80, 0x50, 0x43, 0x4b, 0x70, 0xba, 0x49, 0xdc, 0xcd, 0x3a, - 0xff, 0xa4, 0xcc, 0x6c, 0x79, 0x61, 0x26, 0x1b, 0x66, 0x95, 0xb8, 0x9b, 0x54, 0x2f, 0xfb, 0x4a, - 0x1c, 0x8a, 0x2a, 0x75, 0x38, 0xc7, 0xb8, 0x2d, 0xb7, 0x0d, 0xdb, 0xb9, 0x6b, 0x53, 0x2f, 0x70, - 0x77, 0x15, 0x60, 0x58, 0x57, 0x82, 0xe1, 0xdb, 0x2a, 0x2f, 0x42, 0xd5, 0x2f, 0x42, 0x95, 0xd7, - 0xba, 0x28, 0x42, 0xf5, 0x9e, 0xd1, 0xc2, 0x42, 0x57, 0x0f, 0x69, 0x2a, 0xdf, 0x49, 0x70, 0x3e, - 0x6e, 0x41, 0x04, 0xe0, 0x0d, 0x38, 0x1d, 0x8a, 0xa8, 0x9f, 0xad, 0xd1, 0xb9, 0x49, 0xbd, 0x3c, - 0x0c, 0x29, 0x45, 0xb7, 0x23, 0x2c, 0x4a, 0x22, 0x4e, 0x79, 0x2c, 0x38, 0x7e, 0x84, 0xc6, 0x07, - 0x61, 0x16, 0x74, 0xcd, 0x69, 0x92, 0xc0, 0xd1, 0x7c, 0x16, 0x4a, 0x1d, 0x5e, 0x4f, 0x28, 0x0b, - 0x1f, 0x56, 0xa0, 0x6c, 0xb1, 0xd3, 0xba, 0xed, 0x34, 0x09, 0x53, 0x2e, 0x2f, 0xbc, 0x99, 0x9d, - 0x02, 0x06, 0xc1, 0x10, 0xc0, 0x1a, 0xa0, 0x29, 0x5f, 0xc3, 0x05, 0x66, 0xe0, 0x13, 0xbf, 0x95, - 0x92, 0x14, 0x2f, 0xc0, 0x24, 0x6b, 0xb2, 0xba, 0xd3, 0xeb, 0xb0, 0x54, 0x8c, 0xe9, 0xa7, 0xd8, - 0xc1, 0x7a, 0xaf, 0x93, 0xe0, 0x5f, 0x4a, 0xf2, 0x6f, 0xc0, 0xc5, 0x74, 0xf8, 0x13, 0x75, 0xe2, - 0x89, 0x24, 0xc2, 0xe4, 0x27, 0x59, 0xd4, 0x57, 0xe1, 0xe6, 0x59, 0x4d, 0x49, 0xf4, 0x71, 0xca, - 0xed, 0x17, 0x09, 0x2a, 0x49, 0x12, 0xc2, 0xcf, 0x8f, 0xe1, 0x95, 0xa0, 0x57, 0xb8, 0x8f, 0x85, - 0x5b, 0x2e, 0xd0, 0x3b, 0xb9, 0x82, 0xfc, 0x4a, 0xe4, 0xc4, 0xe7, 0xc9, 0xf2, 0x12, 0x8b, 0xd8, - 0xa1, 0x39, 0x8f, 0x85, 0xb3, 0x14, 0x0f, 0xa7, 0x62, 0xc2, 0xa5, 0x0c, 0xf4, 0x13, 0x0b, 0x85, - 0xf2, 0x00, 0x66, 0x98, 0x8d, 0x55, 0xdb, 0x31, 0xb6, 0xec, 0x1d, 0xdc, 0x38, 0x4e, 0x6f, 0xa1, - 0x69, 0x18, 0xef, 0xba, 0xa4, 0x8f, 0x99, 0x13, 0xa7, 0x74, 0xfe, 0xe1, 0xbf, 0x1a, 0xb3, 0xd9, - 0xe0, 0xc2, 0x87, 0x6f, 0xe0, 0x5c, 0x33, 0xb8, 0xae, 0x27, 0x0b, 0xf8, 0xfa, 0x21, 0x0f, 0x61, - 0x04, 0x95, 0x81, 0x4e, 0x35, 0x93, 0x96, 0x94, 0x1d, 0xb8, 0x9a, 0xc2, 0xc2, 0xbf, 0xda, 0x70, - 0x3c, 0x7b, 0xeb, 0x0e, 0x7b, 0xe0, 0x5f, 0x76, 0x40, 0x0c, 0x43, 0x30, 0x1a, 0x0e, 0xc1, 0x6f, - 0xa3, 0x70, 0xad, 0x88, 0x71, 0x11, 0x8c, 0x0d, 0x98, 0x8e, 0x05, 0x23, 0x88, 0x85, 0x54, 0xb4, - 0x99, 0x51, 0x33, 0x61, 0x09, 0xdd, 0x02, 0xe0, 0x65, 0xc8, 0xc0, 0x78, 0xbd, 0xcb, 0x03, 0xb0, - 0xc1, 0xe8, 0xef, 0xd7, 0x54, 0x56, 0x66, 0x3a, 0x2f, 0x5a, 0xa6, 0xba, 0x0e, 0xaf, 0xba, 0xc6, - 0xa3, 0xfa, 0x70, 0x89, 0x60, 0xfe, 0x85, 0x2b, 0x2d, 0xb2, 0x70, 0xf8, 0x18, 0xba, 0xf1, 0x68, - 0x79, 0x70, 0xa6, 0x9f, 0x71, 0xc3, 0x9f, 0x68, 0x03, 0x90, 0xe9, 0x59, 0x75, 0xda, 0x33, 0x3b, - 0x36, 0xa5, 0x36, 0x71, 0xea, 0x9b, 0x78, 0xbb, 0x32, 0x16, 0xc3, 0x8c, 0x6e, 0x38, 0xfd, 0x9a, - 0xfa, 0xc5, 0x40, 0xfe, 0x33, 0xbc, 0xad, 0x9f, 0x35, 0x3d, 0x2b, 0x72, 0x82, 0x6e, 0xb3, 0xe8, - 0x93, 0x66, 0x65, 0x9c, 0x21, 0xd5, 0x0e, 0x59, 0x16, 0x7c, 0xb1, 0x94, 0xd2, 0xe1, 0xfa, 0x0b, - 0x4f, 0xcf, 0xc0, 0x38, 0x4b, 0x18, 0xfa, 0x5e, 0x82, 0x09, 0xbe, 0x59, 0xa0, 0x43, 0x8a, 0x30, - 0xb9, 0xd0, 0xc8, 0xf3, 0x05, 0xa5, 0x79, 0xce, 0x95, 0xb9, 0x27, 0x7f, 0xff, 0xff, 0x43, 0x49, - 0x41, 0xb3, 0x5a, 0xce, 0x16, 0x85, 0x7e, 0x97, 0x60, 0x82, 0xf7, 0x6f, 0x2e, 0xa3, 0xc8, 0xd6, - 0x93, 0xcb, 0x28, 0xba, 0xd3, 0x28, 0x9f, 0x32, 0x46, 0x2b, 0x68, 0x29, 0x9b, 0xd1, 0xb0, 0x36, - 0xb5, 0xdd, 0x50, 0xbf, 0xec, 0x69, 0xfc, 0x5d, 0xd1, 0x76, 0x79, 0x57, 0xec, 0xa1, 0x1f, 0x25, - 0x98, 0x1c, 0x2c, 0x0d, 0x48, 0xcb, 0x21, 0x12, 0x5f, 0x60, 0xe4, 0x77, 0x8b, 0x2b, 0x14, 0x0f, - 0x27, 0x7f, 0x65, 0xd0, 0xcf, 0x12, 0xc0, 0xf0, 0x99, 0x40, 0x85, 0x4c, 0x85, 0x1f, 0x46, 0xb9, - 0x76, 0x04, 0x0d, 0xc1, 0x6e, 0x9e, 0xb1, 0xbb, 0x82, 0x2e, 0xe7, 0xb1, 0x63, 0xb1, 0x45, 0x7f, - 0x48, 0xf0, 0x5a, 0x6c, 0xde, 0xa3, 0xf7, 0x72, 0xac, 0xa6, 0xaf, 0x1f, 0xf2, 0xcd, 0xa3, 0xaa, - 0x09, 0xc6, 0x37, 0x18, 0xe3, 0x79, 0xf4, 0x4e, 0x36, 0x63, 0xfe, 0xb6, 0x84, 0x79, 0xff, 0x2a, - 0x41, 0x39, 0x34, 0xbb, 0x51, 0x5e, 0xa4, 0x92, 0xcb, 0x86, 0xbc, 0x70, 0x14, 0x15, 0xc1, 0x75, - 0x91, 0x71, 0xad, 0x21, 0x2d, 0x9b, 0xab, 0x98, 0x7b, 0xd1, 0xaa, 0x45, 0x7f, 0x49, 0x70, 0x36, - 0x3e, 0x65, 0xd1, 0xcd, 0x02, 0x0c, 0x52, 0x86, 0xbe, 0xbc, 0x78, 0x64, 0xbd, 0xe2, 0x7d, 0x97, - 0x4a, 0x9f, 0x27, 0x80, 0x6a, 0xbb, 0x83, 0x5d, 0x63, 0x0f, 0xfd, 0x29, 0xc1, 0x54, 0xca, 0xd8, - 0x45, 0xb7, 0x72, 0xc8, 0x65, 0xef, 0x01, 0xf2, 0xfb, 0xc7, 0x51, 0x2d, 0x9e, 0x99, 0xd4, 0x2d, - 0x00, 0x7d, 0x5b, 0x82, 0x4b, 0x87, 0xce, 0x4e, 0xb4, 0x7c, 0x24, 0x5a, 0xe9, 0x63, 0x5f, 0x5e, - 0x79, 0x39, 0x10, 0xe1, 0xe5, 0x97, 0xcc, 0xcb, 0x75, 0x74, 0xb7, 0xb0, 0x97, 0xe9, 0x4f, 0xa8, - 0x0f, 0x3a, 0x78, 0x42, 0x97, 0xee, 0x3f, 0xdb, 0xaf, 0x4a, 0xcf, 0xf7, 0xab, 0xd2, 0x7f, 0xfb, - 0x55, 0xe9, 0xe9, 0x41, 0x75, 0xe4, 0xf9, 0x41, 0x75, 0xe4, 0x9f, 0x83, 0xea, 0xc8, 0x83, 0xc5, - 0x96, 0xed, 0xb5, 0x7b, 0xa6, 0x6a, 0x91, 0x4e, 0x60, 0x71, 0xcb, 0x30, 0xe9, 0xbc, 0x4d, 0x06, - 0x04, 0x1e, 0xc7, 0x28, 0x78, 0xdb, 0x5d, 0x4c, 0xcd, 0x09, 0xf6, 0x87, 0xfc, 0xc6, 0x8b, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x95, 0x86, 0xeb, 0x57, 0x04, 0x11, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Params queries the parameters of the module. - Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) - // Header queries the CZ header and fork headers at a given height. - Header(ctx context.Context, in *QueryHeaderRequest, opts ...grpc.CallOption) (*QueryHeaderResponse, error) - // ChainList queries the list of chains that checkpoint to Babylon - ChainList(ctx context.Context, in *QueryChainListRequest, opts ...grpc.CallOption) (*QueryChainListResponse, error) - // ChainsInfo queries the latest info for a given list of chains in Babylon's view - ChainsInfo(ctx context.Context, in *QueryChainsInfoRequest, opts ...grpc.CallOption) (*QueryChainsInfoResponse, error) - // EpochChainsInfo queries the latest info for a list of chains - // in a given epoch in Babylon's view - EpochChainsInfo(ctx context.Context, in *QueryEpochChainsInfoRequest, opts ...grpc.CallOption) (*QueryEpochChainsInfoResponse, error) - // ListHeaders queries the headers of a chain in Babylon's view, with - // pagination support - ListHeaders(ctx context.Context, in *QueryListHeadersRequest, opts ...grpc.CallOption) (*QueryListHeadersResponse, error) - // ListEpochHeaders queries the headers of a chain timestamped in a given - // epoch of Babylon, with pagination support - ListEpochHeaders(ctx context.Context, in *QueryListEpochHeadersRequest, opts ...grpc.CallOption) (*QueryListEpochHeadersResponse, error) - // FinalizedChainsInfo queries the BTC-finalised info of chains with given IDs, with proofs - FinalizedChainsInfo(ctx context.Context, in *QueryFinalizedChainsInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainsInfoResponse, error) - // FinalizedChainInfoUntilHeight queries the BTC-finalised info no later than - // the provided CZ height, with proofs - FinalizedChainInfoUntilHeight(ctx context.Context, in *QueryFinalizedChainInfoUntilHeightRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoUntilHeightResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { - out := new(QueryParamsResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/Params", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Header(ctx context.Context, in *QueryHeaderRequest, opts ...grpc.CallOption) (*QueryHeaderResponse, error) { - out := new(QueryHeaderResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/Header", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ChainList(ctx context.Context, in *QueryChainListRequest, opts ...grpc.CallOption) (*QueryChainListResponse, error) { - out := new(QueryChainListResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ChainList", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ChainsInfo(ctx context.Context, in *QueryChainsInfoRequest, opts ...grpc.CallOption) (*QueryChainsInfoResponse, error) { - out := new(QueryChainsInfoResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ChainsInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) EpochChainsInfo(ctx context.Context, in *QueryEpochChainsInfoRequest, opts ...grpc.CallOption) (*QueryEpochChainsInfoResponse, error) { - out := new(QueryEpochChainsInfoResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/EpochChainsInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ListHeaders(ctx context.Context, in *QueryListHeadersRequest, opts ...grpc.CallOption) (*QueryListHeadersResponse, error) { - out := new(QueryListHeadersResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ListHeaders", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ListEpochHeaders(ctx context.Context, in *QueryListEpochHeadersRequest, opts ...grpc.CallOption) (*QueryListEpochHeadersResponse, error) { - out := new(QueryListEpochHeadersResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ListEpochHeaders", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) FinalizedChainsInfo(ctx context.Context, in *QueryFinalizedChainsInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainsInfoResponse, error) { - out := new(QueryFinalizedChainsInfoResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/FinalizedChainsInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) FinalizedChainInfoUntilHeight(ctx context.Context, in *QueryFinalizedChainInfoUntilHeightRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoUntilHeightResponse, error) { - out := new(QueryFinalizedChainInfoUntilHeightResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/FinalizedChainInfoUntilHeight", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Params queries the parameters of the module. - Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) - // Header queries the CZ header and fork headers at a given height. - Header(context.Context, *QueryHeaderRequest) (*QueryHeaderResponse, error) - // ChainList queries the list of chains that checkpoint to Babylon - ChainList(context.Context, *QueryChainListRequest) (*QueryChainListResponse, error) - // ChainsInfo queries the latest info for a given list of chains in Babylon's view - ChainsInfo(context.Context, *QueryChainsInfoRequest) (*QueryChainsInfoResponse, error) - // EpochChainsInfo queries the latest info for a list of chains - // in a given epoch in Babylon's view - EpochChainsInfo(context.Context, *QueryEpochChainsInfoRequest) (*QueryEpochChainsInfoResponse, error) - // ListHeaders queries the headers of a chain in Babylon's view, with - // pagination support - ListHeaders(context.Context, *QueryListHeadersRequest) (*QueryListHeadersResponse, error) - // ListEpochHeaders queries the headers of a chain timestamped in a given - // epoch of Babylon, with pagination support - ListEpochHeaders(context.Context, *QueryListEpochHeadersRequest) (*QueryListEpochHeadersResponse, error) - // FinalizedChainsInfo queries the BTC-finalised info of chains with given IDs, with proofs - FinalizedChainsInfo(context.Context, *QueryFinalizedChainsInfoRequest) (*QueryFinalizedChainsInfoResponse, error) - // FinalizedChainInfoUntilHeight queries the BTC-finalised info no later than - // the provided CZ height, with proofs - FinalizedChainInfoUntilHeight(context.Context, *QueryFinalizedChainInfoUntilHeightRequest) (*QueryFinalizedChainInfoUntilHeightResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") -} -func (*UnimplementedQueryServer) Header(ctx context.Context, req *QueryHeaderRequest) (*QueryHeaderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Header not implemented") -} -func (*UnimplementedQueryServer) ChainList(ctx context.Context, req *QueryChainListRequest) (*QueryChainListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ChainList not implemented") -} -func (*UnimplementedQueryServer) ChainsInfo(ctx context.Context, req *QueryChainsInfoRequest) (*QueryChainsInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ChainsInfo not implemented") -} -func (*UnimplementedQueryServer) EpochChainsInfo(ctx context.Context, req *QueryEpochChainsInfoRequest) (*QueryEpochChainsInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method EpochChainsInfo not implemented") -} -func (*UnimplementedQueryServer) ListHeaders(ctx context.Context, req *QueryListHeadersRequest) (*QueryListHeadersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListHeaders not implemented") -} -func (*UnimplementedQueryServer) ListEpochHeaders(ctx context.Context, req *QueryListEpochHeadersRequest) (*QueryListEpochHeadersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListEpochHeaders not implemented") -} -func (*UnimplementedQueryServer) FinalizedChainsInfo(ctx context.Context, req *QueryFinalizedChainsInfoRequest) (*QueryFinalizedChainsInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinalizedChainsInfo not implemented") -} -func (*UnimplementedQueryServer) FinalizedChainInfoUntilHeight(ctx context.Context, req *QueryFinalizedChainInfoUntilHeightRequest) (*QueryFinalizedChainInfoUntilHeightResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinalizedChainInfoUntilHeight not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryParamsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Params(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/Params", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Header_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryHeaderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Header(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/Header", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Header(ctx, req.(*QueryHeaderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ChainList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryChainListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ChainList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/ChainList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ChainList(ctx, req.(*QueryChainListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ChainsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryChainsInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ChainsInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/ChainsInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ChainsInfo(ctx, req.(*QueryChainsInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_EpochChainsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryEpochChainsInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).EpochChainsInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/EpochChainsInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).EpochChainsInfo(ctx, req.(*QueryEpochChainsInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ListHeaders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryListHeadersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ListHeaders(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/ListHeaders", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ListHeaders(ctx, req.(*QueryListHeadersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ListEpochHeaders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryListEpochHeadersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ListEpochHeaders(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/ListEpochHeaders", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ListEpochHeaders(ctx, req.(*QueryListEpochHeadersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_FinalizedChainsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryFinalizedChainsInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).FinalizedChainsInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/FinalizedChainsInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).FinalizedChainsInfo(ctx, req.(*QueryFinalizedChainsInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_FinalizedChainInfoUntilHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryFinalizedChainInfoUntilHeightRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).FinalizedChainInfoUntilHeight(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/FinalizedChainInfoUntilHeight", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).FinalizedChainInfoUntilHeight(ctx, req.(*QueryFinalizedChainInfoUntilHeightRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "babylon.zoneconcierge.v1.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Params", - Handler: _Query_Params_Handler, - }, - { - MethodName: "Header", - Handler: _Query_Header_Handler, - }, - { - MethodName: "ChainList", - Handler: _Query_ChainList_Handler, - }, - { - MethodName: "ChainsInfo", - Handler: _Query_ChainsInfo_Handler, - }, - { - MethodName: "EpochChainsInfo", - Handler: _Query_EpochChainsInfo_Handler, - }, - { - MethodName: "ListHeaders", - Handler: _Query_ListHeaders_Handler, - }, - { - MethodName: "ListEpochHeaders", - Handler: _Query_ListEpochHeaders_Handler, - }, - { - MethodName: "FinalizedChainsInfo", - Handler: _Query_FinalizedChainsInfo_Handler, - }, - { - MethodName: "FinalizedChainInfoUntilHeight", - Handler: _Query_FinalizedChainInfoUntilHeight_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "babylon/zoneconcierge/v1/query.proto", -} - -func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryHeaderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryHeaderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryHeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Height != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x10 - } - if len(m.ConsumerId) > 0 { - i -= len(m.ConsumerId) - copy(dAtA[i:], m.ConsumerId) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryHeaderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryHeaderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryHeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ForkHeaders != nil { - { - size, err := m.ForkHeaders.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryChainListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryChainListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryChainListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryChainListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryChainListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryChainListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ConsumerIds) > 0 { - for iNdEx := len(m.ConsumerIds) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ConsumerIds[iNdEx]) - copy(dAtA[i:], m.ConsumerIds[iNdEx]) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerIds[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryChainsInfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryChainsInfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryChainsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ConsumerIds) > 0 { - for iNdEx := len(m.ConsumerIds) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ConsumerIds[iNdEx]) - copy(dAtA[i:], m.ConsumerIds[iNdEx]) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerIds[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryChainsInfoResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryChainsInfoResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryChainsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChainsInfo) > 0 { - for iNdEx := len(m.ChainsInfo) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChainsInfo[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryEpochChainsInfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryEpochChainsInfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryEpochChainsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ConsumerIds) > 0 { - for iNdEx := len(m.ConsumerIds) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ConsumerIds[iNdEx]) - copy(dAtA[i:], m.ConsumerIds[iNdEx]) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerIds[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.EpochNum != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.EpochNum)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *QueryEpochChainsInfoResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryEpochChainsInfoResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryEpochChainsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChainsInfo) > 0 { - for iNdEx := len(m.ChainsInfo) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChainsInfo[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryListHeadersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryListHeadersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryListHeadersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ConsumerId) > 0 { - i -= len(m.ConsumerId) - copy(dAtA[i:], m.ConsumerId) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryListHeadersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryListHeadersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryListHeadersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Headers) > 0 { - for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryListEpochHeadersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryListEpochHeadersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryListEpochHeadersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ConsumerId) > 0 { - i -= len(m.ConsumerId) - copy(dAtA[i:], m.ConsumerId) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerId))) - i-- - dAtA[i] = 0x12 - } - if m.EpochNum != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.EpochNum)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *QueryListEpochHeadersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryListEpochHeadersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryListEpochHeadersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Headers) > 0 { - for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryFinalizedChainsInfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryFinalizedChainsInfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryFinalizedChainsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Prove { - i-- - if m.Prove { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.ConsumerIds) > 0 { - for iNdEx := len(m.ConsumerIds) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ConsumerIds[iNdEx]) - copy(dAtA[i:], m.ConsumerIds[iNdEx]) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerIds[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryFinalizedChainsInfoResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryFinalizedChainsInfoResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryFinalizedChainsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.FinalizedChainsInfo) > 0 { - for iNdEx := len(m.FinalizedChainsInfo) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.FinalizedChainsInfo[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryFinalizedChainInfoUntilHeightRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryFinalizedChainInfoUntilHeightRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryFinalizedChainInfoUntilHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Prove { - i-- - if m.Prove { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.Height != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x10 - } - if len(m.ConsumerId) > 0 { - i -= len(m.ConsumerId) - copy(dAtA[i:], m.ConsumerId) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Proof != nil { - { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.BtcSubmissionKey != nil { - { - size, err := m.BtcSubmissionKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.RawCheckpoint != nil { - { - size, err := m.RawCheckpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.EpochInfo != nil { - { - size, err := m.EpochInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.FinalizedChainInfo != nil { - { - size, err := m.FinalizedChainInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryParamsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *QueryParamsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Params.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryHeaderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ConsumerId) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Height != 0 { - n += 1 + sovQuery(uint64(m.Height)) - } - return n -} - -func (m *QueryHeaderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovQuery(uint64(l)) - } - if m.ForkHeaders != nil { - l = m.ForkHeaders.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryChainListRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryChainListResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ConsumerIds) > 0 { - for _, s := range m.ConsumerIds { - l = len(s) - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryChainsInfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ConsumerIds) > 0 { - for _, s := range m.ConsumerIds { - l = len(s) - n += 1 + l + sovQuery(uint64(l)) - } - } - return n -} - -func (m *QueryChainsInfoResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ChainsInfo) > 0 { - for _, e := range m.ChainsInfo { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - return n -} - -func (m *QueryEpochChainsInfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EpochNum != 0 { - n += 1 + sovQuery(uint64(m.EpochNum)) - } - if len(m.ConsumerIds) > 0 { - for _, s := range m.ConsumerIds { - l = len(s) - n += 1 + l + sovQuery(uint64(l)) - } - } - return n -} - -func (m *QueryEpochChainsInfoResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ChainsInfo) > 0 { - for _, e := range m.ChainsInfo { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - return n -} - -func (m *QueryListHeadersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ConsumerId) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryListHeadersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Headers) > 0 { - for _, e := range m.Headers { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryListEpochHeadersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EpochNum != 0 { - n += 1 + sovQuery(uint64(m.EpochNum)) - } - l = len(m.ConsumerId) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryListEpochHeadersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Headers) > 0 { - for _, e := range m.Headers { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - return n -} - -func (m *QueryFinalizedChainsInfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ConsumerIds) > 0 { - for _, s := range m.ConsumerIds { - l = len(s) - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Prove { - n += 2 - } - return n -} - -func (m *QueryFinalizedChainsInfoResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.FinalizedChainsInfo) > 0 { - for _, e := range m.FinalizedChainsInfo { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - return n -} - -func (m *QueryFinalizedChainInfoUntilHeightRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ConsumerId) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Height != 0 { - n += 1 + sovQuery(uint64(m.Height)) - } - if m.Prove { - n += 2 - } - return n -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FinalizedChainInfo != nil { - l = m.FinalizedChainInfo.Size() - n += 1 + l + sovQuery(uint64(l)) - } - if m.EpochInfo != nil { - l = m.EpochInfo.Size() - n += 1 + l + sovQuery(uint64(l)) - } - if m.RawCheckpoint != nil { - l = m.RawCheckpoint.Size() - n += 1 + l + sovQuery(uint64(l)) - } - if m.BtcSubmissionKey != nil { - l = m.BtcSubmissionKey.Size() - n += 1 + l + sovQuery(uint64(l)) - } - if m.Proof != nil { - l = m.Proof.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryHeaderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryHeaderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryHeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryHeaderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryHeaderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryHeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &IndexedHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForkHeaders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ForkHeaders == nil { - m.ForkHeaders = &Forks{} - } - if err := m.ForkHeaders.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryChainListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryChainListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryChainListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryChainListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryChainListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryChainListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerIds", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerIds = append(m.ConsumerIds, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryChainsInfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryChainsInfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryChainsInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerIds", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerIds = append(m.ConsumerIds, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryChainsInfoResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryChainsInfoResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryChainsInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainsInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainsInfo = append(m.ChainsInfo, &ChainInfo{}) - if err := m.ChainsInfo[len(m.ChainsInfo)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryEpochChainsInfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryEpochChainsInfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryEpochChainsInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) - } - m.EpochNum = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EpochNum |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerIds", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerIds = append(m.ConsumerIds, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryEpochChainsInfoResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryEpochChainsInfoResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryEpochChainsInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainsInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainsInfo = append(m.ChainsInfo, &ChainInfo{}) - if err := m.ChainsInfo[len(m.ChainsInfo)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryListHeadersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryListHeadersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryListHeadersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryListHeadersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryListHeadersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryListHeadersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Headers = append(m.Headers, &IndexedHeader{}) - if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryListEpochHeadersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryListEpochHeadersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryListEpochHeadersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) - } - m.EpochNum = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EpochNum |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryListEpochHeadersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryListEpochHeadersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryListEpochHeadersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Headers = append(m.Headers, &IndexedHeader{}) - if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryFinalizedChainsInfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryFinalizedChainsInfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryFinalizedChainsInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerIds", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerIds = append(m.ConsumerIds, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Prove = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryFinalizedChainsInfoResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryFinalizedChainsInfoResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryFinalizedChainsInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinalizedChainsInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FinalizedChainsInfo = append(m.FinalizedChainsInfo, &FinalizedChainInfo{}) - if err := m.FinalizedChainsInfo[len(m.FinalizedChainsInfo)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryFinalizedChainInfoUntilHeightRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Prove = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryFinalizedChainInfoUntilHeightResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinalizedChainInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FinalizedChainInfo == nil { - m.FinalizedChainInfo = &ChainInfo{} - } - if err := m.FinalizedChainInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EpochInfo == nil { - m.EpochInfo = &types.Epoch{} - } - if err := m.EpochInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RawCheckpoint == nil { - m.RawCheckpoint = &types1.RawCheckpoint{} - } - if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BtcSubmissionKey", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BtcSubmissionKey == nil { - m.BtcSubmissionKey = &types2.SubmissionKey{} - } - if err := m.BtcSubmissionKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Proof == nil { - m.Proof = &ProofFinalizedChainInfo{} - } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/zoneconcierge/types/query.pb.gw.go b/x/zoneconcierge/types/query.pb.gw.go deleted file mode 100644 index de98ae8a..00000000 --- a/x/zoneconcierge/types/query.pb.gw.go +++ /dev/null @@ -1,991 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: babylon/zoneconcierge/v1/query.proto - -/* -Package types is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package types - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryParamsRequest - var metadata runtime.ServerMetadata - - msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryParamsRequest - var metadata runtime.ServerMetadata - - msg, err := server.Params(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Query_Header_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryHeaderRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["consumer_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id") - } - - protoReq.ConsumerId, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err) - } - - val, ok = pathParams["height"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") - } - - protoReq.Height, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) - } - - msg, err := client.Header(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Header_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryHeaderRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["consumer_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id") - } - - protoReq.ConsumerId, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err) - } - - val, ok = pathParams["height"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") - } - - protoReq.Height, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) - } - - msg, err := server.Header(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_ChainList_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_ChainList_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryChainListRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ChainList_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ChainList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_ChainList_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryChainListRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ChainList_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ChainList(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_ChainsInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_ChainsInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryChainsInfoRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ChainsInfo_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ChainsInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_ChainsInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryChainsInfoRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ChainsInfo_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ChainsInfo(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_EpochChainsInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_EpochChainsInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryEpochChainsInfoRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_EpochChainsInfo_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.EpochChainsInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_EpochChainsInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryEpochChainsInfoRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_EpochChainsInfo_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.EpochChainsInfo(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_ListHeaders_0 = &utilities.DoubleArray{Encoding: map[string]int{"consumer_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Query_ListHeaders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryListHeadersRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["consumer_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id") - } - - protoReq.ConsumerId, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ListHeaders_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ListHeaders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_ListHeaders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryListHeadersRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["consumer_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id") - } - - protoReq.ConsumerId, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ListHeaders_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ListHeaders(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Query_ListEpochHeaders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryListEpochHeadersRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["consumer_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id") - } - - protoReq.ConsumerId, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err) - } - - val, ok = pathParams["epoch_num"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") - } - - protoReq.EpochNum, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) - } - - msg, err := client.ListEpochHeaders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_ListEpochHeaders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryListEpochHeadersRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["consumer_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id") - } - - protoReq.ConsumerId, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err) - } - - val, ok = pathParams["epoch_num"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") - } - - protoReq.EpochNum, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) - } - - msg, err := server.ListEpochHeaders(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_FinalizedChainsInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_FinalizedChainsInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryFinalizedChainsInfoRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainsInfo_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.FinalizedChainsInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_FinalizedChainsInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryFinalizedChainsInfoRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainsInfo_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.FinalizedChainsInfo(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_FinalizedChainInfoUntilHeight_0 = &utilities.DoubleArray{Encoding: map[string]int{"consumer_id": 0, "height": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} -) - -func request_Query_FinalizedChainInfoUntilHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryFinalizedChainInfoUntilHeightRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["consumer_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id") - } - - protoReq.ConsumerId, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err) - } - - val, ok = pathParams["height"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") - } - - protoReq.Height, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainInfoUntilHeight_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.FinalizedChainInfoUntilHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_FinalizedChainInfoUntilHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryFinalizedChainInfoUntilHeightRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["consumer_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id") - } - - protoReq.ConsumerId, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err) - } - - val, ok = pathParams["height"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") - } - - protoReq.Height, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainInfoUntilHeight_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.FinalizedChainInfoUntilHeight(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Header_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Header_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Header_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ChainList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_ChainList_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ChainList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ChainsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_ChainsInfo_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ChainsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_EpochChainsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_EpochChainsInfo_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_EpochChainsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ListHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_ListHeaders_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ListHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ListEpochHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_ListEpochHeaders_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ListEpochHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_FinalizedChainsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_FinalizedChainsInfo_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_FinalizedChainsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_FinalizedChainInfoUntilHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_FinalizedChainInfoUntilHeight_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_FinalizedChainInfoUntilHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Header_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Header_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Header_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ChainList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_ChainList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ChainList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ChainsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_ChainsInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ChainsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_EpochChainsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_EpochChainsInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_EpochChainsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ListHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_ListHeaders_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ListHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ListEpochHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_ListEpochHeaders_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ListEpochHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_FinalizedChainsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_FinalizedChainsInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_FinalizedChainsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_FinalizedChainInfoUntilHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_FinalizedChainInfoUntilHeight_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_FinalizedChainInfoUntilHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) - - pattern_Query_Header_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "consumer_id", "header", "height"}, "", runtime.AssumeColonVerbOpt(false))) - - pattern_Query_ChainList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "chains"}, "", runtime.AssumeColonVerbOpt(false))) - - pattern_Query_ChainsInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "chains_info"}, "", runtime.AssumeColonVerbOpt(false))) - - pattern_Query_EpochChainsInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "epoch_chains_info"}, "", runtime.AssumeColonVerbOpt(false))) - - pattern_Query_ListHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "headers", "consumer_id"}, "", runtime.AssumeColonVerbOpt(false))) - - pattern_Query_ListEpochHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "headers", "consumer_id", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) - - pattern_Query_FinalizedChainsInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "finalized_chains_info"}, "", runtime.AssumeColonVerbOpt(false))) - - pattern_Query_FinalizedChainInfoUntilHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 5}, []string{"babylon", "zoneconcierge", "v1", "finalized_chain_info", "consumer_id", "height"}, "", runtime.AssumeColonVerbOpt(false))) -) - -var ( - forward_Query_Params_0 = runtime.ForwardResponseMessage - - forward_Query_Header_0 = runtime.ForwardResponseMessage - - forward_Query_ChainList_0 = runtime.ForwardResponseMessage - - forward_Query_ChainsInfo_0 = runtime.ForwardResponseMessage - - forward_Query_EpochChainsInfo_0 = runtime.ForwardResponseMessage - - forward_Query_ListHeaders_0 = runtime.ForwardResponseMessage - - forward_Query_ListEpochHeaders_0 = runtime.ForwardResponseMessage - - forward_Query_FinalizedChainsInfo_0 = runtime.ForwardResponseMessage - - forward_Query_FinalizedChainInfoUntilHeight_0 = runtime.ForwardResponseMessage -) diff --git a/x/zoneconcierge/types/tx.pb.go b/x/zoneconcierge/types/tx.pb.go deleted file mode 100644 index d9f61716..00000000 --- a/x/zoneconcierge/types/tx.pb.go +++ /dev/null @@ -1,596 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: babylon/zoneconcierge/v1/tx.proto - -package types - -import ( - context "context" - fmt "fmt" - _ "github.com/cosmos/cosmos-proto" - _ "github.com/cosmos/cosmos-sdk/types/msgservice" - _ "github.com/cosmos/gogoproto/gogoproto" - grpc1 "github.com/cosmos/gogoproto/grpc" - proto "github.com/cosmos/gogoproto/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// MsgUpdateParams defines a message for updating zoneconcierge module parameters. -type MsgUpdateParams struct { - // authority is the address of the governance account. - // just FYI: cosmos.AddressString marks that this field should use type alias - // for AddressString instead of string, but the functionality is not yet implemented - // in cosmos-proto - Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` - // params defines the zoneconcierge parameters to update. - // - // NOTE: All parameters must be supplied. - Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` -} - -func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } -func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateParams) ProtoMessage() {} -func (*MsgUpdateParams) Descriptor() ([]byte, []int) { - return fileDescriptor_35e2112d987e4e18, []int{0} -} -func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateParams.Merge(m, src) -} -func (m *MsgUpdateParams) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateParams) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo - -func (m *MsgUpdateParams) GetAuthority() string { - if m != nil { - return m.Authority - } - return "" -} - -func (m *MsgUpdateParams) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -// MsgUpdateParamsResponse is the response to the MsgUpdateParams message. -type MsgUpdateParamsResponse struct { -} - -func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } -func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateParamsResponse) ProtoMessage() {} -func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_35e2112d987e4e18, []int{1} -} -func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) -} -func (m *MsgUpdateParamsResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*MsgUpdateParams)(nil), "babylon.zoneconcierge.v1.MsgUpdateParams") - proto.RegisterType((*MsgUpdateParamsResponse)(nil), "babylon.zoneconcierge.v1.MsgUpdateParamsResponse") -} - -func init() { proto.RegisterFile("babylon/zoneconcierge/v1/tx.proto", fileDescriptor_35e2112d987e4e18) } - -var fileDescriptor_35e2112d987e4e18 = []byte{ - // 333 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0x4a, 0x4c, 0xaa, - 0xcc, 0xc9, 0xcf, 0xd3, 0xaf, 0xca, 0xcf, 0x4b, 0x4d, 0xce, 0xcf, 0x4b, 0xce, 0x4c, 0x2d, 0x4a, - 0x4f, 0xd5, 0x2f, 0x33, 0xd4, 0x2f, 0xa9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x80, - 0x2a, 0xd1, 0x43, 0x51, 0xa2, 0x57, 0x66, 0x28, 0x25, 0x92, 0x9e, 0x9f, 0x9e, 0x0f, 0x56, 0xa4, - 0x0f, 0x62, 0x41, 0xd4, 0x4b, 0x49, 0x26, 0xe7, 0x17, 0xe7, 0xe6, 0x17, 0xc7, 0x43, 0x24, 0x20, - 0x1c, 0xa8, 0x94, 0x38, 0x84, 0xa7, 0x9f, 0x5b, 0x9c, 0x0e, 0xb2, 0x22, 0xb7, 0x38, 0x1d, 0x2a, - 0xa1, 0x8a, 0xd3, 0x19, 0x05, 0x89, 0x45, 0x89, 0xb9, 0x50, 0xfd, 0x4a, 0x33, 0x19, 0xb9, 0xf8, - 0x7d, 0x8b, 0xd3, 0x43, 0x0b, 0x52, 0x12, 0x4b, 0x52, 0x03, 0xc0, 0x32, 0x42, 0x66, 0x5c, 0x9c, - 0x89, 0xa5, 0x25, 0x19, 0xf9, 0x45, 0x99, 0x25, 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x4e, - 0x12, 0x97, 0xb6, 0xe8, 0x8a, 0x40, 0x2d, 0x76, 0x4c, 0x49, 0x29, 0x4a, 0x2d, 0x2e, 0x0e, 0x2e, - 0x29, 0xca, 0xcc, 0x4b, 0x0f, 0x42, 0x28, 0x15, 0xb2, 0xe3, 0x62, 0x83, 0x98, 0x2d, 0xc1, 0xa4, - 0xc0, 0xa8, 0xc1, 0x6d, 0xa4, 0xa0, 0x87, 0xcb, 0x9f, 0x7a, 0x10, 0x9b, 0x9c, 0x58, 0x4e, 0xdc, - 0x93, 0x67, 0x08, 0x82, 0xea, 0xb2, 0xe2, 0x6b, 0x7a, 0xbe, 0x41, 0x0b, 0x61, 0x9e, 0x92, 0x24, - 0x97, 0x38, 0x9a, 0xd3, 0x82, 0x52, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x8d, 0xaa, 0xb8, 0x98, - 0x7d, 0x8b, 0xd3, 0x85, 0x72, 0xb8, 0x78, 0x50, 0x5c, 0xae, 0x89, 0xdb, 0x46, 0x34, 0x93, 0xa4, - 0x0c, 0x89, 0x56, 0x0a, 0xb3, 0x54, 0x8a, 0xb5, 0xe1, 0xf9, 0x06, 0x2d, 0x46, 0xa7, 0xc0, 0x13, - 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, - 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x32, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, - 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x87, 0x9a, 0x9e, 0x93, 0x98, 0x54, 0xac, 0x9b, 0x99, 0x0f, 0xe3, - 0xea, 0x57, 0xa0, 0xc5, 0x47, 0x49, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0x38, 0x32, 0x8c, 0x01, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x55, 0x48, 0x49, 0xed, 0x3c, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // UpdateParams updates the zoneconcierge module parameters. - UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { - out := new(MsgUpdateParamsResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Msg/UpdateParams", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // UpdateParams updates the zoneconcierge module parameters. - UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgUpdateParams) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).UpdateParams(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Msg/UpdateParams", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "babylon.zoneconcierge.v1.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "UpdateParams", - Handler: _Msg_UpdateParams_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "babylon/zoneconcierge/v1/tx.proto", -} - -func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTx(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Authority) > 0 { - i -= len(m.Authority) - copy(dAtA[i:], m.Authority) - i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintTx(dAtA []byte, offset int, v uint64) int { - offset -= sovTx(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgUpdateParams) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Authority) - if l > 0 { - n += 1 + l + sovTx(uint64(l)) - } - l = m.Params.Size() - n += 1 + l + sovTx(uint64(l)) - return n -} - -func (m *MsgUpdateParamsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovTx(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTx(x uint64) (n int) { - return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTx - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTx - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTx - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTx - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Authority = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTx - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTx - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTx - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTx(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTx - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTx - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTx(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTx - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTx(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTx - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTx - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTx - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTx - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTx - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTx - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") -) diff --git a/x/zoneconcierge/types/types.go b/x/zoneconcierge/types/types.go deleted file mode 100644 index 6795ce58..00000000 --- a/x/zoneconcierge/types/types.go +++ /dev/null @@ -1,19 +0,0 @@ -package types - -import "time" - -// IsLatestHeader checks if a given header is higher than the latest header in chain info -func (ci *ChainInfo) IsLatestHeader(header *IndexedHeader) bool { - if ci.LatestHeader != nil && ci.LatestHeader.Height > header.Height { - return false - } - return true -} - -type HeaderInfo struct { - ClientId string - ChainId string - AppHash []byte - Height uint64 - Time time.Time -} diff --git a/x/zoneconcierge/types/zoneconcierge.go b/x/zoneconcierge/types/zoneconcierge.go deleted file mode 100644 index 91c970eb..00000000 --- a/x/zoneconcierge/types/zoneconcierge.go +++ /dev/null @@ -1,138 +0,0 @@ -package types - -import ( - "bytes" - "fmt" - - "cosmossdk.io/store/rootmulti" - "github.com/cometbft/cometbft/crypto/merkle" - cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" -) - -// VerifyStore verifies whether a KV pair is committed to the Merkle root, with the assistance of a Merkle proof -// (adapted from https://github.com/cosmos/cosmos-sdk/blob/v0.46.6/store/rootmulti/proof_test.go) -func VerifyStore(root []byte, moduleStoreKey string, key []byte, value []byte, proof *cmtcrypto.ProofOps) error { - prt := rootmulti.DefaultProofRuntime() - - keypath := merkle.KeyPath{} - keypath = keypath.AppendKey([]byte(moduleStoreKey), merkle.KeyEncodingURL) - keypath = keypath.AppendKey(key, merkle.KeyEncodingURL) - keypathStr := keypath.String() - - // NOTE: the proof can specify verification rules, either only verifying the - // top Merkle root w.r.t. all KV pairs, or verifying every layer of Merkle root - // TODO: investigate how the verification rules are chosen when generating the - // proof - if err1 := prt.VerifyValue(proof, root, keypathStr, value); err1 != nil { - if err2 := prt.VerifyAbsence(proof, root, keypathStr); err2 != nil { - return fmt.Errorf("the Merkle proof does not pass any verification: err of VerifyValue: %w; err of VerifyAbsence: %w", err1, err2) - } - } - - return nil -} - -func (p *ProofEpochSealed) ValidateBasic() error { - if p.ValidatorSet == nil { - return ErrInvalidProofEpochSealed.Wrap("ValidatorSet is nil") - } else if len(p.ValidatorSet) == 0 { - return ErrInvalidProofEpochSealed.Wrap("ValidatorSet is empty") - } else if p.ProofEpochInfo == nil { - return ErrInvalidProofEpochSealed.Wrap("ProofEpochInfo is nil") - } else if p.ProofEpochValSet == nil { - return ErrInvalidProofEpochSealed.Wrap("ProofEpochValSet is nil") - } - return nil -} - -func (ih *IndexedHeader) ValidateBasic() error { - if len(ih.ConsumerId) == 0 { - return fmt.Errorf("empty ConsumerID") - } - if len(ih.Hash) == 0 { - return fmt.Errorf("empty Hash") - } - if len(ih.BabylonHeaderHash) == 0 { - return fmt.Errorf("empty BabylonHeader hash") - } - if len(ih.BabylonTxHash) == 0 { - return fmt.Errorf("empty BabylonTxHash") - } - return nil -} - -func (ih *IndexedHeader) Equal(ih2 *IndexedHeader) bool { - if ih.ValidateBasic() != nil || ih2.ValidateBasic() != nil { - return false - } - - if ih.ConsumerId != ih2.ConsumerId { - return false - } - if !bytes.Equal(ih.Hash, ih2.Hash) { - return false - } - if ih.Height != ih2.Height { - return false - } - if !bytes.Equal(ih.BabylonHeaderHash, ih2.BabylonHeaderHash) { - return false - } - if ih.BabylonHeaderHeight != ih2.BabylonHeaderHeight { - return false - } - if ih.BabylonEpoch != ih2.BabylonEpoch { - return false - } - return bytes.Equal(ih.BabylonTxHash, ih2.BabylonTxHash) -} - -func (ci *ChainInfo) Equal(ci2 *ChainInfo) bool { - if ci.ValidateBasic() != nil || ci2.ValidateBasic() != nil { - return false - } - - if ci.ConsumerId != ci2.ConsumerId { - return false - } - if !ci.LatestHeader.Equal(ci2.LatestHeader) { - return false - } - if len(ci.LatestForks.Headers) != len(ci2.LatestForks.Headers) { - return false - } - for i := 0; i < len(ci.LatestForks.Headers); i++ { - if !ci.LatestForks.Headers[i].Equal(ci2.LatestForks.Headers[i]) { - return false - } - } - return ci.TimestampedHeadersCount == ci2.TimestampedHeadersCount -} - -func (ci *ChainInfo) ValidateBasic() error { - if len(ci.ConsumerId) == 0 { - return ErrInvalidChainInfo.Wrap("ConsumerId is empty") - } else if ci.LatestHeader == nil { - return ErrInvalidChainInfo.Wrap("LatestHeader is nil") - } else if ci.LatestForks == nil { - return ErrInvalidChainInfo.Wrap("LatestForks is nil") - } - if err := ci.LatestHeader.ValidateBasic(); err != nil { - return err - } - for _, forkHeader := range ci.LatestForks.Headers { - if err := forkHeader.ValidateBasic(); err != nil { - return err - } - } - - return nil -} - -func NewBTCTimestampPacketData(btcTimestamp *BTCTimestamp) *ZoneconciergePacketData { - return &ZoneconciergePacketData{ - Packet: &ZoneconciergePacketData_BtcTimestamp{ - BtcTimestamp: btcTimestamp, - }, - } -} diff --git a/x/zoneconcierge/types/zoneconcierge.pb.go b/x/zoneconcierge/types/zoneconcierge.pb.go deleted file mode 100644 index cd5023d6..00000000 --- a/x/zoneconcierge/types/zoneconcierge.pb.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: babylon/zoneconcierge/v1/zoneconcierge.proto - -package types - -import ( - fmt "fmt" - types2 "github.com/babylonlabs-io/babylon/x/btccheckpoint/types" - types3 "github.com/babylonlabs-io/babylon/x/btclightclient/types" - types1 "github.com/babylonlabs-io/babylon/x/checkpointing/types" - types "github.com/babylonlabs-io/babylon/x/epoching/types" - crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" - _ "google.golang.org/protobuf/types/known/timestamppb" - io "io" - math "math" - math_bits "math/bits" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// IndexedHeader is the metadata of a CZ header -type IndexedHeader struct { - // consumer_id is the unique ID of the consumer - ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"` - // hash is the hash of this header - Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - // height is the height of this header on CZ ledger - // (hash, height) jointly provides the position of the header on CZ ledger - Height uint64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - // time is the timestamp of this header on CZ ledger - // it is needed for CZ to unbond all mature validators/delegations - // before this timestamp when this header is BTC-finalised - Time *time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time,omitempty"` - // babylon_header_hash is the hash of the babylon block that includes this CZ - // header - BabylonHeaderHash []byte `protobuf:"bytes,5,opt,name=babylon_header_hash,json=babylonHeaderHash,proto3" json:"babylon_header_hash,omitempty"` - // babylon_header_height is the height of the babylon block that includes this CZ - // header - BabylonHeaderHeight uint64 `protobuf:"varint,6,opt,name=babylon_header_height,json=babylonHeaderHeight,proto3" json:"babylon_header_height,omitempty"` - // epoch is the epoch number of this header on Babylon ledger - BabylonEpoch uint64 `protobuf:"varint,7,opt,name=babylon_epoch,json=babylonEpoch,proto3" json:"babylon_epoch,omitempty"` - // babylon_tx_hash is the hash of the tx that includes this header - // (babylon_block_height, babylon_tx_hash) jointly provides the position of - // the header on Babylon ledger - BabylonTxHash []byte `protobuf:"bytes,8,opt,name=babylon_tx_hash,json=babylonTxHash,proto3" json:"babylon_tx_hash,omitempty"` -} - -func (m *IndexedHeader) Reset() { *m = IndexedHeader{} } -func (m *IndexedHeader) String() string { return proto.CompactTextString(m) } -func (*IndexedHeader) ProtoMessage() {} -func (*IndexedHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_ab886e1868e5c5cd, []int{0} -} -func (m *IndexedHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IndexedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IndexedHeader.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IndexedHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexedHeader.Merge(m, src) -} -func (m *IndexedHeader) XXX_Size() int { - return m.Size() -} -func (m *IndexedHeader) XXX_DiscardUnknown() { - xxx_messageInfo_IndexedHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexedHeader proto.InternalMessageInfo - -func (m *IndexedHeader) GetConsumerId() string { - if m != nil { - return m.ConsumerId - } - return "" -} - -func (m *IndexedHeader) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *IndexedHeader) GetHeight() uint64 { - if m != nil { - return m.Height - } - return 0 -} - -func (m *IndexedHeader) GetTime() *time.Time { - if m != nil { - return m.Time - } - return nil -} - -func (m *IndexedHeader) GetBabylonHeaderHash() []byte { - if m != nil { - return m.BabylonHeaderHash - } - return nil -} - -func (m *IndexedHeader) GetBabylonHeaderHeight() uint64 { - if m != nil { - return m.BabylonHeaderHeight - } - return 0 -} - -func (m *IndexedHeader) GetBabylonEpoch() uint64 { - if m != nil { - return m.BabylonEpoch - } - return 0 -} - -func (m *IndexedHeader) GetBabylonTxHash() []byte { - if m != nil { - return m.BabylonTxHash - } - return nil -} - -// Forks is a list of non-canonical `IndexedHeader`s at the same height. -// For example, assuming the following blockchain -// ``` -// A <- B <- C <- D <- E -// -// \ -- D1 -// \ -- D2 -// -// ``` -// Then the fork will be {[D1, D2]} where each item is in struct `IndexedBlock`. -// -// Note that each `IndexedHeader` in the fork should have a valid quorum -// certificate. Such forks exist since Babylon considers CZs might have -// dishonest majority. Also note that the IBC-Go implementation will only -// consider the first header in a fork valid, since the subsequent headers -// cannot be verified without knowing the validator set in the previous header. -type Forks struct { - // blocks is the list of non-canonical indexed headers at the same height - Headers []*IndexedHeader `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty"` -} - -func (m *Forks) Reset() { *m = Forks{} } -func (m *Forks) String() string { return proto.CompactTextString(m) } -func (*Forks) ProtoMessage() {} -func (*Forks) Descriptor() ([]byte, []int) { - return fileDescriptor_ab886e1868e5c5cd, []int{1} -} -func (m *Forks) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Forks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Forks.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Forks) XXX_Merge(src proto.Message) { - xxx_messageInfo_Forks.Merge(m, src) -} -func (m *Forks) XXX_Size() int { - return m.Size() -} -func (m *Forks) XXX_DiscardUnknown() { - xxx_messageInfo_Forks.DiscardUnknown(m) -} - -var xxx_messageInfo_Forks proto.InternalMessageInfo - -func (m *Forks) GetHeaders() []*IndexedHeader { - if m != nil { - return m.Headers - } - return nil -} - -// ChainInfo is the information of a CZ -type ChainInfo struct { - // consumer_id is the ID of the consumer - ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"` - // latest_header is the latest header in CZ's canonical chain - LatestHeader *IndexedHeader `protobuf:"bytes,2,opt,name=latest_header,json=latestHeader,proto3" json:"latest_header,omitempty"` - // latest_forks is the latest forks, formed as a series of IndexedHeader (from - // low to high) - LatestForks *Forks `protobuf:"bytes,3,opt,name=latest_forks,json=latestForks,proto3" json:"latest_forks,omitempty"` - // timestamped_headers_count is the number of timestamped headers in CZ's - // canonical chain - TimestampedHeadersCount uint64 `protobuf:"varint,4,opt,name=timestamped_headers_count,json=timestampedHeadersCount,proto3" json:"timestamped_headers_count,omitempty"` -} - -func (m *ChainInfo) Reset() { *m = ChainInfo{} } -func (m *ChainInfo) String() string { return proto.CompactTextString(m) } -func (*ChainInfo) ProtoMessage() {} -func (*ChainInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ab886e1868e5c5cd, []int{2} -} -func (m *ChainInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChainInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChainInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChainInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChainInfo.Merge(m, src) -} -func (m *ChainInfo) XXX_Size() int { - return m.Size() -} -func (m *ChainInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ChainInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ChainInfo proto.InternalMessageInfo - -func (m *ChainInfo) GetConsumerId() string { - if m != nil { - return m.ConsumerId - } - return "" -} - -func (m *ChainInfo) GetLatestHeader() *IndexedHeader { - if m != nil { - return m.LatestHeader - } - return nil -} - -func (m *ChainInfo) GetLatestForks() *Forks { - if m != nil { - return m.LatestForks - } - return nil -} - -func (m *ChainInfo) GetTimestampedHeadersCount() uint64 { - if m != nil { - return m.TimestampedHeadersCount - } - return 0 -} - -// ChainInfoWithProof is the chain info with a proof that the latest header in -// the chain info is included in the epoch -type ChainInfoWithProof struct { - ChainInfo *ChainInfo `protobuf:"bytes,1,opt,name=chain_info,json=chainInfo,proto3" json:"chain_info,omitempty"` - // proof_header_in_epoch is an inclusion proof that the latest_header in chain_info - // is committed to `app_hash` of the sealer header of latest_header.babylon_epoch - // this field is optional - ProofHeaderInEpoch *crypto.ProofOps `protobuf:"bytes,2,opt,name=proof_header_in_epoch,json=proofHeaderInEpoch,proto3" json:"proof_header_in_epoch,omitempty"` -} - -func (m *ChainInfoWithProof) Reset() { *m = ChainInfoWithProof{} } -func (m *ChainInfoWithProof) String() string { return proto.CompactTextString(m) } -func (*ChainInfoWithProof) ProtoMessage() {} -func (*ChainInfoWithProof) Descriptor() ([]byte, []int) { - return fileDescriptor_ab886e1868e5c5cd, []int{3} -} -func (m *ChainInfoWithProof) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChainInfoWithProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChainInfoWithProof.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChainInfoWithProof) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChainInfoWithProof.Merge(m, src) -} -func (m *ChainInfoWithProof) XXX_Size() int { - return m.Size() -} -func (m *ChainInfoWithProof) XXX_DiscardUnknown() { - xxx_messageInfo_ChainInfoWithProof.DiscardUnknown(m) -} - -var xxx_messageInfo_ChainInfoWithProof proto.InternalMessageInfo - -func (m *ChainInfoWithProof) GetChainInfo() *ChainInfo { - if m != nil { - return m.ChainInfo - } - return nil -} - -func (m *ChainInfoWithProof) GetProofHeaderInEpoch() *crypto.ProofOps { - if m != nil { - return m.ProofHeaderInEpoch - } - return nil -} - -// FinalizedChainInfo is the information of a CZ that is BTC-finalised -type FinalizedChainInfo struct { - // consumer_id is the ID of the consumer - ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"` - // finalized_chain_info is the info of the CZ - FinalizedChainInfo *ChainInfo `protobuf:"bytes,2,opt,name=finalized_chain_info,json=finalizedChainInfo,proto3" json:"finalized_chain_info,omitempty"` - // epoch_info is the metadata of the last BTC-finalised epoch - EpochInfo *types.Epoch `protobuf:"bytes,3,opt,name=epoch_info,json=epochInfo,proto3" json:"epoch_info,omitempty"` - // raw_checkpoint is the raw checkpoint of this epoch - RawCheckpoint *types1.RawCheckpoint `protobuf:"bytes,4,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` - // btc_submission_key is position of two BTC txs that include the raw - // checkpoint of this epoch - BtcSubmissionKey *types2.SubmissionKey `protobuf:"bytes,5,opt,name=btc_submission_key,json=btcSubmissionKey,proto3" json:"btc_submission_key,omitempty"` - // proof is the proof that the chain info is finalized - Proof *ProofFinalizedChainInfo `protobuf:"bytes,6,opt,name=proof,proto3" json:"proof,omitempty"` -} - -func (m *FinalizedChainInfo) Reset() { *m = FinalizedChainInfo{} } -func (m *FinalizedChainInfo) String() string { return proto.CompactTextString(m) } -func (*FinalizedChainInfo) ProtoMessage() {} -func (*FinalizedChainInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ab886e1868e5c5cd, []int{4} -} -func (m *FinalizedChainInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FinalizedChainInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FinalizedChainInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FinalizedChainInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_FinalizedChainInfo.Merge(m, src) -} -func (m *FinalizedChainInfo) XXX_Size() int { - return m.Size() -} -func (m *FinalizedChainInfo) XXX_DiscardUnknown() { - xxx_messageInfo_FinalizedChainInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_FinalizedChainInfo proto.InternalMessageInfo - -func (m *FinalizedChainInfo) GetConsumerId() string { - if m != nil { - return m.ConsumerId - } - return "" -} - -func (m *FinalizedChainInfo) GetFinalizedChainInfo() *ChainInfo { - if m != nil { - return m.FinalizedChainInfo - } - return nil -} - -func (m *FinalizedChainInfo) GetEpochInfo() *types.Epoch { - if m != nil { - return m.EpochInfo - } - return nil -} - -func (m *FinalizedChainInfo) GetRawCheckpoint() *types1.RawCheckpoint { - if m != nil { - return m.RawCheckpoint - } - return nil -} - -func (m *FinalizedChainInfo) GetBtcSubmissionKey() *types2.SubmissionKey { - if m != nil { - return m.BtcSubmissionKey - } - return nil -} - -func (m *FinalizedChainInfo) GetProof() *ProofFinalizedChainInfo { - if m != nil { - return m.Proof - } - return nil -} - -// ProofEpochSealed is the proof that an epoch is sealed by the sealer header, -// i.e., the 2nd header of the next epoch With the access of metadata -// - Metadata of this epoch, which includes the sealer header -// - Raw checkpoint of this epoch -// The verifier can perform the following verification rules: -// - The raw checkpoint's `app_hash` is same as in the sealer header -// - More than 2/3 (in voting power) validators in the validator set of this -// epoch have signed `app_hash` of the sealer header -// - The epoch metadata is committed to the `app_hash` of the sealer header -// - The validator set is committed to the `app_hash` of the sealer header -type ProofEpochSealed struct { - // validator_set is the validator set of the sealed epoch - // This validator set has generated a BLS multisig on `app_hash` of - // the sealer header - ValidatorSet []*types1.ValidatorWithBlsKey `protobuf:"bytes,1,rep,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` - // proof_epoch_info is the Merkle proof that the epoch's metadata is committed - // to `app_hash` of the sealer header - ProofEpochInfo *crypto.ProofOps `protobuf:"bytes,2,opt,name=proof_epoch_info,json=proofEpochInfo,proto3" json:"proof_epoch_info,omitempty"` - // proof_epoch_info is the Merkle proof that the epoch's validator set is - // committed to `app_hash` of the sealer header - ProofEpochValSet *crypto.ProofOps `protobuf:"bytes,3,opt,name=proof_epoch_val_set,json=proofEpochValSet,proto3" json:"proof_epoch_val_set,omitempty"` -} - -func (m *ProofEpochSealed) Reset() { *m = ProofEpochSealed{} } -func (m *ProofEpochSealed) String() string { return proto.CompactTextString(m) } -func (*ProofEpochSealed) ProtoMessage() {} -func (*ProofEpochSealed) Descriptor() ([]byte, []int) { - return fileDescriptor_ab886e1868e5c5cd, []int{5} -} -func (m *ProofEpochSealed) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProofEpochSealed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProofEpochSealed.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProofEpochSealed) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProofEpochSealed.Merge(m, src) -} -func (m *ProofEpochSealed) XXX_Size() int { - return m.Size() -} -func (m *ProofEpochSealed) XXX_DiscardUnknown() { - xxx_messageInfo_ProofEpochSealed.DiscardUnknown(m) -} - -var xxx_messageInfo_ProofEpochSealed proto.InternalMessageInfo - -func (m *ProofEpochSealed) GetValidatorSet() []*types1.ValidatorWithBlsKey { - if m != nil { - return m.ValidatorSet - } - return nil -} - -func (m *ProofEpochSealed) GetProofEpochInfo() *crypto.ProofOps { - if m != nil { - return m.ProofEpochInfo - } - return nil -} - -func (m *ProofEpochSealed) GetProofEpochValSet() *crypto.ProofOps { - if m != nil { - return m.ProofEpochValSet - } - return nil -} - -// ProofFinalizedChainInfo is a set of proofs that attest a chain info is -// BTC-finalised -type ProofFinalizedChainInfo struct { - // proof_cz_header_in_epoch is the proof that the CZ header is timestamped - // within a certain epoch - ProofCzHeaderInEpoch *crypto.ProofOps `protobuf:"bytes,1,opt,name=proof_cz_header_in_epoch,json=proofCzHeaderInEpoch,proto3" json:"proof_cz_header_in_epoch,omitempty"` - // proof_epoch_sealed is the proof that the epoch is sealed - ProofEpochSealed *ProofEpochSealed `protobuf:"bytes,2,opt,name=proof_epoch_sealed,json=proofEpochSealed,proto3" json:"proof_epoch_sealed,omitempty"` - // proof_epoch_submitted is the proof that the epoch's checkpoint is included - // in BTC ledger It is the two TransactionInfo in the best (i.e., earliest) - // checkpoint submission - ProofEpochSubmitted []*types2.TransactionInfo `protobuf:"bytes,3,rep,name=proof_epoch_submitted,json=proofEpochSubmitted,proto3" json:"proof_epoch_submitted,omitempty"` -} - -func (m *ProofFinalizedChainInfo) Reset() { *m = ProofFinalizedChainInfo{} } -func (m *ProofFinalizedChainInfo) String() string { return proto.CompactTextString(m) } -func (*ProofFinalizedChainInfo) ProtoMessage() {} -func (*ProofFinalizedChainInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ab886e1868e5c5cd, []int{6} -} -func (m *ProofFinalizedChainInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProofFinalizedChainInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProofFinalizedChainInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProofFinalizedChainInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProofFinalizedChainInfo.Merge(m, src) -} -func (m *ProofFinalizedChainInfo) XXX_Size() int { - return m.Size() -} -func (m *ProofFinalizedChainInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ProofFinalizedChainInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ProofFinalizedChainInfo proto.InternalMessageInfo - -func (m *ProofFinalizedChainInfo) GetProofCzHeaderInEpoch() *crypto.ProofOps { - if m != nil { - return m.ProofCzHeaderInEpoch - } - return nil -} - -func (m *ProofFinalizedChainInfo) GetProofEpochSealed() *ProofEpochSealed { - if m != nil { - return m.ProofEpochSealed - } - return nil -} - -func (m *ProofFinalizedChainInfo) GetProofEpochSubmitted() []*types2.TransactionInfo { - if m != nil { - return m.ProofEpochSubmitted - } - return nil -} - -// Btc light client chain segment grown during last finalized epoch -type BTCChainSegment struct { - BtcHeaders []*types3.BTCHeaderInfo `protobuf:"bytes,1,rep,name=btc_headers,json=btcHeaders,proto3" json:"btc_headers,omitempty"` -} - -func (m *BTCChainSegment) Reset() { *m = BTCChainSegment{} } -func (m *BTCChainSegment) String() string { return proto.CompactTextString(m) } -func (*BTCChainSegment) ProtoMessage() {} -func (*BTCChainSegment) Descriptor() ([]byte, []int) { - return fileDescriptor_ab886e1868e5c5cd, []int{7} -} -func (m *BTCChainSegment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BTCChainSegment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BTCChainSegment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BTCChainSegment) XXX_Merge(src proto.Message) { - xxx_messageInfo_BTCChainSegment.Merge(m, src) -} -func (m *BTCChainSegment) XXX_Size() int { - return m.Size() -} -func (m *BTCChainSegment) XXX_DiscardUnknown() { - xxx_messageInfo_BTCChainSegment.DiscardUnknown(m) -} - -var xxx_messageInfo_BTCChainSegment proto.InternalMessageInfo - -func (m *BTCChainSegment) GetBtcHeaders() []*types3.BTCHeaderInfo { - if m != nil { - return m.BtcHeaders - } - return nil -} - -func init() { - proto.RegisterType((*IndexedHeader)(nil), "babylon.zoneconcierge.v1.IndexedHeader") - proto.RegisterType((*Forks)(nil), "babylon.zoneconcierge.v1.Forks") - proto.RegisterType((*ChainInfo)(nil), "babylon.zoneconcierge.v1.ChainInfo") - proto.RegisterType((*ChainInfoWithProof)(nil), "babylon.zoneconcierge.v1.ChainInfoWithProof") - proto.RegisterType((*FinalizedChainInfo)(nil), "babylon.zoneconcierge.v1.FinalizedChainInfo") - proto.RegisterType((*ProofEpochSealed)(nil), "babylon.zoneconcierge.v1.ProofEpochSealed") - proto.RegisterType((*ProofFinalizedChainInfo)(nil), "babylon.zoneconcierge.v1.ProofFinalizedChainInfo") - proto.RegisterType((*BTCChainSegment)(nil), "babylon.zoneconcierge.v1.BTCChainSegment") -} - -func init() { - proto.RegisterFile("babylon/zoneconcierge/v1/zoneconcierge.proto", fileDescriptor_ab886e1868e5c5cd) -} - -var fileDescriptor_ab886e1868e5c5cd = []byte{ - // 974 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x5f, 0x6f, 0x1b, 0x45, - 0x10, 0xcf, 0xc5, 0x4e, 0x4a, 0xc6, 0x71, 0x1b, 0x36, 0x29, 0x35, 0x41, 0x38, 0x96, 0x2b, 0x15, - 0x17, 0xd1, 0xb3, 0x6c, 0x90, 0x10, 0xbc, 0x61, 0xab, 0xa5, 0x2e, 0x88, 0x3f, 0x67, 0xb7, 0x20, - 0x04, 0x3a, 0xdd, 0x9f, 0xb5, 0xef, 0x94, 0xf3, 0xad, 0x75, 0xbb, 0x76, 0xe3, 0x7c, 0x8a, 0x7e, - 0x0b, 0xf8, 0x06, 0x7c, 0x00, 0x5e, 0x78, 0xec, 0x23, 0x6f, 0xa0, 0xe4, 0x53, 0xc0, 0x13, 0xda, - 0xd9, 0xdd, 0xf3, 0xb9, 0x91, 0x95, 0xf6, 0x25, 0xba, 0x9d, 0xfd, 0xcd, 0xcc, 0x6f, 0xe6, 0x37, - 0xb3, 0x0e, 0x7c, 0xe4, 0x7b, 0xfe, 0x32, 0x61, 0x69, 0xfb, 0x9c, 0xa5, 0x34, 0x60, 0x69, 0x10, - 0xd3, 0x6c, 0x42, 0xdb, 0x8b, 0xce, 0xba, 0xc1, 0x9e, 0x65, 0x4c, 0x30, 0x52, 0xd3, 0x68, 0x7b, - 0xfd, 0x72, 0xd1, 0x39, 0x3e, 0x9a, 0xb0, 0x09, 0x43, 0x50, 0x5b, 0x7e, 0x29, 0xfc, 0xf1, 0xc9, - 0x84, 0xb1, 0x49, 0x42, 0xdb, 0x78, 0xf2, 0xe7, 0xe3, 0xb6, 0x88, 0xa7, 0x94, 0x0b, 0x6f, 0x3a, - 0xd3, 0x80, 0xf7, 0x05, 0x4d, 0x43, 0x9a, 0x4d, 0xe3, 0x54, 0xb4, 0x83, 0x6c, 0x39, 0x13, 0x4c, - 0x62, 0xd9, 0x58, 0x5f, 0xe7, 0xec, 0x7c, 0x11, 0x04, 0x11, 0x0d, 0x4e, 0x67, 0x4c, 0x22, 0x17, - 0x9d, 0x75, 0x83, 0x46, 0xdf, 0x33, 0xe8, 0xd5, 0x4d, 0x9c, 0x4e, 0x10, 0x9d, 0x70, 0xf7, 0x94, - 0x2e, 0x35, 0xee, 0xfe, 0x46, 0xdc, 0x95, 0x90, 0x4d, 0x03, 0xa5, 0x33, 0x16, 0x44, 0x1a, 0x65, - 0xbe, 0x35, 0xc6, 0x2e, 0x90, 0x4c, 0xe2, 0x49, 0x24, 0xff, 0xd2, 0x9c, 0x65, 0xc1, 0xa2, 0xf0, - 0xcd, 0x3f, 0xb6, 0xa1, 0x3a, 0x48, 0x43, 0x7a, 0x46, 0xc3, 0xc7, 0xd4, 0x0b, 0x69, 0x46, 0x4e, - 0xa0, 0x12, 0xb0, 0x94, 0xcf, 0xa7, 0x34, 0x73, 0xe3, 0xb0, 0x66, 0x35, 0xac, 0xd6, 0x9e, 0x03, - 0xc6, 0x34, 0x08, 0x09, 0x81, 0x72, 0xe4, 0xf1, 0xa8, 0xb6, 0xdd, 0xb0, 0x5a, 0xfb, 0x0e, 0x7e, - 0x93, 0x77, 0x60, 0x37, 0xa2, 0x32, 0x78, 0xad, 0xd4, 0xb0, 0x5a, 0x65, 0x47, 0x9f, 0xc8, 0x27, - 0x50, 0x96, 0x5d, 0xae, 0x95, 0x1b, 0x56, 0xab, 0xd2, 0x3d, 0xb6, 0x95, 0x04, 0xb6, 0x91, 0xc0, - 0x1e, 0x19, 0x09, 0x7a, 0xe5, 0x17, 0x7f, 0x9f, 0x58, 0x0e, 0xa2, 0x89, 0x0d, 0x87, 0xba, 0x0c, - 0x37, 0x42, 0x52, 0x2e, 0x26, 0xdc, 0xc1, 0x84, 0x6f, 0xeb, 0x2b, 0x45, 0xf7, 0xb1, 0xcc, 0xde, - 0x85, 0xdb, 0xaf, 0xe2, 0x15, 0x99, 0x5d, 0x24, 0x73, 0xb8, 0xee, 0xa1, 0x98, 0xdd, 0x85, 0xaa, - 0xf1, 0xc1, 0x16, 0xd6, 0x6e, 0x20, 0x76, 0x5f, 0x1b, 0x1f, 0x4a, 0x1b, 0xb9, 0x07, 0xb7, 0x0c, - 0x48, 0x9c, 0x29, 0x12, 0x6f, 0x21, 0x09, 0xe3, 0x3b, 0x3a, 0x93, 0x04, 0x9a, 0x4f, 0x60, 0xe7, - 0x11, 0xcb, 0x4e, 0x39, 0xf9, 0x02, 0x6e, 0x28, 0x06, 0xbc, 0x56, 0x6a, 0x94, 0x5a, 0x95, 0xee, - 0x07, 0xf6, 0xa6, 0x29, 0xb5, 0xd7, 0xda, 0xee, 0x18, 0xbf, 0xe6, 0x7f, 0x16, 0xec, 0xf5, 0x23, - 0x2f, 0x4e, 0x07, 0xe9, 0x98, 0x5d, 0xaf, 0xc6, 0xd7, 0x50, 0x4d, 0x3c, 0x41, 0xb9, 0xd0, 0xa5, - 0xa3, 0x2c, 0x6f, 0x90, 0x77, 0x5f, 0x79, 0x6b, 0xf1, 0x7b, 0xa0, 0xcf, 0xee, 0x58, 0xd6, 0x83, - 0x6a, 0x56, 0xba, 0x27, 0x9b, 0x83, 0x61, 0xd9, 0x4e, 0x45, 0x39, 0xa9, 0x1e, 0x7c, 0x0e, 0xef, - 0xe6, 0x9b, 0x45, 0x43, 0x4d, 0x8b, 0xbb, 0x01, 0x9b, 0xa7, 0x02, 0x07, 0xa1, 0xec, 0xdc, 0x29, - 0x00, 0x54, 0x66, 0xde, 0x97, 0xd7, 0xcd, 0xdf, 0x2c, 0x20, 0x79, 0xf1, 0x3f, 0xc4, 0x22, 0xfa, - 0x4e, 0x2e, 0x20, 0xe9, 0x01, 0x04, 0xd2, 0xea, 0xc6, 0xe9, 0x98, 0x61, 0x13, 0x2a, 0xdd, 0xbb, - 0x9b, 0x49, 0xe5, 0x11, 0x9c, 0xbd, 0x20, 0xef, 0xe4, 0x37, 0x70, 0x1b, 0xb7, 0xd9, 0x8c, 0x48, - 0x6c, 0x84, 0x57, 0x0d, 0x7b, 0xcf, 0x5e, 0x6d, 0xbf, 0xad, 0xb6, 0xdf, 0xc6, 0xe4, 0xdf, 0xce, - 0xb8, 0x43, 0xd0, 0x53, 0x31, 0x1d, 0xa8, 0xd9, 0x68, 0xfe, 0x5e, 0x02, 0xf2, 0x28, 0x4e, 0xbd, - 0x24, 0x3e, 0xa7, 0xe1, 0x1b, 0x08, 0xf6, 0x14, 0x8e, 0xc6, 0xc6, 0xcd, 0x2d, 0x54, 0xb5, 0xfd, - 0xfa, 0x55, 0x91, 0xf1, 0xd5, 0xbc, 0x9f, 0x01, 0x60, 0x39, 0x2a, 0x58, 0x49, 0xef, 0x9b, 0x09, - 0x96, 0xbf, 0x12, 0x8b, 0x8e, 0x8d, 0xf4, 0x9d, 0x3d, 0x34, 0xe9, 0xce, 0xdc, 0xcc, 0xbc, 0xe7, - 0xee, 0xea, 0xbd, 0xd1, 0xeb, 0xba, 0x9a, 0xa1, 0xb5, 0xb7, 0x49, 0xc6, 0x70, 0xbc, 0xe7, 0xfd, - 0xdc, 0xe6, 0x54, 0xb3, 0xe2, 0x91, 0x3c, 0x05, 0xe2, 0x8b, 0xc0, 0xe5, 0x73, 0x7f, 0x1a, 0x73, - 0x1e, 0xb3, 0x54, 0x3e, 0x77, 0xb8, 0xbd, 0xc5, 0x98, 0xeb, 0x8f, 0xe6, 0xa2, 0x63, 0x0f, 0x73, - 0xfc, 0x57, 0x74, 0xe9, 0x1c, 0xf8, 0x22, 0x58, 0xb3, 0x90, 0x2f, 0x61, 0x07, 0x65, 0xc0, 0xad, - 0xae, 0x74, 0x3b, 0x9b, 0x3b, 0x85, 0xba, 0x5d, 0xd5, 0xc6, 0x51, 0xfe, 0xcd, 0x7f, 0x2d, 0x38, - 0x40, 0x08, 0x76, 0x62, 0x48, 0xbd, 0x84, 0x86, 0xc4, 0x81, 0xea, 0xc2, 0x4b, 0xe2, 0xd0, 0x13, - 0x2c, 0x73, 0x39, 0x15, 0x35, 0x0b, 0xf7, 0xf7, 0xc1, 0xe6, 0x1e, 0x3c, 0x33, 0x70, 0x39, 0xa7, - 0xbd, 0x84, 0x4b, 0xd6, 0xfb, 0x79, 0x8c, 0x21, 0x15, 0xe4, 0x21, 0x1c, 0xa8, 0x91, 0x2b, 0x28, - 0xf3, 0x1a, 0xd3, 0x76, 0x73, 0x96, 0x93, 0x43, 0x7d, 0x9e, 0xc0, 0x61, 0x31, 0xcc, 0xc2, 0x4b, - 0x90, 0x60, 0xe9, 0xfa, 0x48, 0x07, 0xab, 0x48, 0xcf, 0xbc, 0x64, 0x48, 0x45, 0xf3, 0xd7, 0x6d, - 0xb8, 0xb3, 0xa1, 0x3d, 0x64, 0x08, 0x35, 0x95, 0x27, 0x38, 0xbf, 0xb2, 0x24, 0xd6, 0xf5, 0xc9, - 0x8e, 0xd0, 0xb9, 0x7f, 0xbe, 0xb6, 0x26, 0xe4, 0x47, 0x20, 0x45, 0xf2, 0x1c, 0xbb, 0xad, 0xbb, - 0xf0, 0xe1, 0x35, 0x12, 0x16, 0xf4, 0x29, 0x96, 0xa2, 0x15, 0xfb, 0xc5, 0x2c, 0xb4, 0x8e, 0x2c, - 0x87, 0x45, 0x08, 0x1a, 0xea, 0x97, 0xf7, 0xfe, 0xe6, 0x49, 0x1b, 0x65, 0x5e, 0xca, 0xbd, 0x40, - 0xc4, 0x4c, 0xcd, 0xc5, 0x61, 0x21, 0xb6, 0x89, 0xd2, 0xfc, 0x19, 0x6e, 0xf5, 0x46, 0x7d, 0xec, - 0xce, 0x90, 0x4e, 0xa6, 0x34, 0x15, 0x64, 0x00, 0x15, 0x39, 0xd8, 0xe6, 0x85, 0x57, 0x13, 0xd2, - 0x2a, 0xe6, 0x29, 0xfe, 0xc0, 0x2e, 0x3a, 0x76, 0x6f, 0xd4, 0x37, 0xdd, 0x18, 0x33, 0x07, 0x7c, - 0x11, 0xe8, 0xd7, 0xae, 0xf7, 0xfd, 0x9f, 0x17, 0x75, 0xeb, 0xe5, 0x45, 0xdd, 0xfa, 0xe7, 0xa2, - 0x6e, 0xbd, 0xb8, 0xac, 0x6f, 0xbd, 0xbc, 0xac, 0x6f, 0xfd, 0x75, 0x59, 0xdf, 0xfa, 0xe9, 0xd3, - 0x49, 0x2c, 0xa2, 0xb9, 0x6f, 0x07, 0x6c, 0xda, 0xd6, 0x91, 0x13, 0xcf, 0xe7, 0x0f, 0x62, 0x66, - 0x8e, 0xed, 0xb3, 0x57, 0xfe, 0x41, 0x12, 0xcb, 0x19, 0xe5, 0xfe, 0x2e, 0xfe, 0xaa, 0x7e, 0xfc, - 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, 0xc7, 0xfb, 0xb5, 0x46, 0x09, 0x00, 0x00, -} - -func (m *IndexedHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IndexedHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IndexedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.BabylonTxHash) > 0 { - i -= len(m.BabylonTxHash) - copy(dAtA[i:], m.BabylonTxHash) - i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.BabylonTxHash))) - i-- - dAtA[i] = 0x42 - } - if m.BabylonEpoch != 0 { - i = encodeVarintZoneconcierge(dAtA, i, uint64(m.BabylonEpoch)) - i-- - dAtA[i] = 0x38 - } - if m.BabylonHeaderHeight != 0 { - i = encodeVarintZoneconcierge(dAtA, i, uint64(m.BabylonHeaderHeight)) - i-- - dAtA[i] = 0x30 - } - if len(m.BabylonHeaderHash) > 0 { - i -= len(m.BabylonHeaderHash) - copy(dAtA[i:], m.BabylonHeaderHash) - i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.BabylonHeaderHash))) - i-- - dAtA[i] = 0x2a - } - if m.Time != nil { - n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(*m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(*m.Time):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintZoneconcierge(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x22 - } - if m.Height != 0 { - i = encodeVarintZoneconcierge(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x18 - } - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0x12 - } - if len(m.ConsumerId) > 0 { - i -= len(m.ConsumerId) - copy(dAtA[i:], m.ConsumerId) - i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.ConsumerId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Forks) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Forks) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Forks) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Headers) > 0 { - for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - return len(dAtA) - i, nil -} - -func (m *ChainInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChainInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChainInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TimestampedHeadersCount != 0 { - i = encodeVarintZoneconcierge(dAtA, i, uint64(m.TimestampedHeadersCount)) - i-- - dAtA[i] = 0x20 - } - if m.LatestForks != nil { - { - size, err := m.LatestForks.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.LatestHeader != nil { - { - size, err := m.LatestHeader.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ConsumerId) > 0 { - i -= len(m.ConsumerId) - copy(dAtA[i:], m.ConsumerId) - i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.ConsumerId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ChainInfoWithProof) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChainInfoWithProof) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChainInfoWithProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ProofHeaderInEpoch != nil { - { - size, err := m.ProofHeaderInEpoch.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ChainInfo != nil { - { - size, err := m.ChainInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FinalizedChainInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FinalizedChainInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FinalizedChainInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Proof != nil { - { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.BtcSubmissionKey != nil { - { - size, err := m.BtcSubmissionKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.RawCheckpoint != nil { - { - size, err := m.RawCheckpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.EpochInfo != nil { - { - size, err := m.EpochInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.FinalizedChainInfo != nil { - { - size, err := m.FinalizedChainInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ConsumerId) > 0 { - i -= len(m.ConsumerId) - copy(dAtA[i:], m.ConsumerId) - i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.ConsumerId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProofEpochSealed) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProofEpochSealed) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProofEpochSealed) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ProofEpochValSet != nil { - { - size, err := m.ProofEpochValSet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.ProofEpochInfo != nil { - { - size, err := m.ProofEpochInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ValidatorSet) > 0 { - for iNdEx := len(m.ValidatorSet) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ValidatorSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ProofFinalizedChainInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProofFinalizedChainInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProofFinalizedChainInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ProofEpochSubmitted) > 0 { - for iNdEx := len(m.ProofEpochSubmitted) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProofEpochSubmitted[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.ProofEpochSealed != nil { - { - size, err := m.ProofEpochSealed.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ProofCzHeaderInEpoch != nil { - { - size, err := m.ProofCzHeaderInEpoch.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *BTCChainSegment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BTCChainSegment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BTCChainSegment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.BtcHeaders) > 0 { - for iNdEx := len(m.BtcHeaders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.BtcHeaders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintZoneconcierge(dAtA []byte, offset int, v uint64) int { - offset -= sovZoneconcierge(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *IndexedHeader) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ConsumerId) - if l > 0 { - n += 1 + l + sovZoneconcierge(uint64(l)) - } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.Height != 0 { - n += 1 + sovZoneconcierge(uint64(m.Height)) - } - if m.Time != nil { - l = github_com_cosmos_gogoproto_types.SizeOfStdTime(*m.Time) - n += 1 + l + sovZoneconcierge(uint64(l)) - } - l = len(m.BabylonHeaderHash) - if l > 0 { - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.BabylonHeaderHeight != 0 { - n += 1 + sovZoneconcierge(uint64(m.BabylonHeaderHeight)) - } - if m.BabylonEpoch != 0 { - n += 1 + sovZoneconcierge(uint64(m.BabylonEpoch)) - } - l = len(m.BabylonTxHash) - if l > 0 { - n += 1 + l + sovZoneconcierge(uint64(l)) - } - return n -} - -func (m *Forks) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Headers) > 0 { - for _, e := range m.Headers { - l = e.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - } - return n -} - -func (m *ChainInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ConsumerId) - if l > 0 { - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.LatestHeader != nil { - l = m.LatestHeader.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.LatestForks != nil { - l = m.LatestForks.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.TimestampedHeadersCount != 0 { - n += 1 + sovZoneconcierge(uint64(m.TimestampedHeadersCount)) - } - return n -} - -func (m *ChainInfoWithProof) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ChainInfo != nil { - l = m.ChainInfo.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.ProofHeaderInEpoch != nil { - l = m.ProofHeaderInEpoch.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - return n -} - -func (m *FinalizedChainInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ConsumerId) - if l > 0 { - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.FinalizedChainInfo != nil { - l = m.FinalizedChainInfo.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.EpochInfo != nil { - l = m.EpochInfo.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.RawCheckpoint != nil { - l = m.RawCheckpoint.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.BtcSubmissionKey != nil { - l = m.BtcSubmissionKey.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.Proof != nil { - l = m.Proof.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - return n -} - -func (m *ProofEpochSealed) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ValidatorSet) > 0 { - for _, e := range m.ValidatorSet { - l = e.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - } - if m.ProofEpochInfo != nil { - l = m.ProofEpochInfo.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.ProofEpochValSet != nil { - l = m.ProofEpochValSet.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - return n -} - -func (m *ProofFinalizedChainInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ProofCzHeaderInEpoch != nil { - l = m.ProofCzHeaderInEpoch.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if m.ProofEpochSealed != nil { - l = m.ProofEpochSealed.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - if len(m.ProofEpochSubmitted) > 0 { - for _, e := range m.ProofEpochSubmitted { - l = e.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - } - return n -} - -func (m *BTCChainSegment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.BtcHeaders) > 0 { - for _, e := range m.BtcHeaders { - l = e.Size() - n += 1 + l + sovZoneconcierge(uint64(l)) - } - } - return n -} - -func sovZoneconcierge(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozZoneconcierge(x uint64) (n int) { - return sovZoneconcierge(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *IndexedHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IndexedHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IndexedHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Time == nil { - m.Time = new(time.Time) - } - if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(m.Time, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BabylonHeaderHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BabylonHeaderHash = append(m.BabylonHeaderHash[:0], dAtA[iNdEx:postIndex]...) - if m.BabylonHeaderHash == nil { - m.BabylonHeaderHash = []byte{} - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BabylonHeaderHeight", wireType) - } - m.BabylonHeaderHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BabylonHeaderHeight |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BabylonEpoch", wireType) - } - m.BabylonEpoch = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BabylonEpoch |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BabylonTxHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BabylonTxHash = append(m.BabylonTxHash[:0], dAtA[iNdEx:postIndex]...) - if m.BabylonTxHash == nil { - m.BabylonTxHash = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipZoneconcierge(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthZoneconcierge - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Forks) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Forks: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Forks: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Headers = append(m.Headers, &IndexedHeader{}) - if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipZoneconcierge(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthZoneconcierge - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChainInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChainInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChainInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LatestHeader", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LatestHeader == nil { - m.LatestHeader = &IndexedHeader{} - } - if err := m.LatestHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LatestForks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LatestForks == nil { - m.LatestForks = &Forks{} - } - if err := m.LatestForks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimestampedHeadersCount", wireType) - } - m.TimestampedHeadersCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TimestampedHeadersCount |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipZoneconcierge(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthZoneconcierge - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChainInfoWithProof) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChainInfoWithProof: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChainInfoWithProof: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ChainInfo == nil { - m.ChainInfo = &ChainInfo{} - } - if err := m.ChainInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofHeaderInEpoch", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProofHeaderInEpoch == nil { - m.ProofHeaderInEpoch = &crypto.ProofOps{} - } - if err := m.ProofHeaderInEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipZoneconcierge(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthZoneconcierge - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FinalizedChainInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FinalizedChainInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FinalizedChainInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsumerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinalizedChainInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FinalizedChainInfo == nil { - m.FinalizedChainInfo = &ChainInfo{} - } - if err := m.FinalizedChainInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EpochInfo == nil { - m.EpochInfo = &types.Epoch{} - } - if err := m.EpochInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RawCheckpoint == nil { - m.RawCheckpoint = &types1.RawCheckpoint{} - } - if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BtcSubmissionKey", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BtcSubmissionKey == nil { - m.BtcSubmissionKey = &types2.SubmissionKey{} - } - if err := m.BtcSubmissionKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Proof == nil { - m.Proof = &ProofFinalizedChainInfo{} - } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipZoneconcierge(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthZoneconcierge - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProofEpochSealed) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProofEpochSealed: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProofEpochSealed: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValidatorSet = append(m.ValidatorSet, &types1.ValidatorWithBlsKey{}) - if err := m.ValidatorSet[len(m.ValidatorSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProofEpochInfo == nil { - m.ProofEpochInfo = &crypto.ProofOps{} - } - if err := m.ProofEpochInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochValSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProofEpochValSet == nil { - m.ProofEpochValSet = &crypto.ProofOps{} - } - if err := m.ProofEpochValSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipZoneconcierge(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthZoneconcierge - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProofFinalizedChainInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProofFinalizedChainInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProofFinalizedChainInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofCzHeaderInEpoch", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProofCzHeaderInEpoch == nil { - m.ProofCzHeaderInEpoch = &crypto.ProofOps{} - } - if err := m.ProofCzHeaderInEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSealed", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProofEpochSealed == nil { - m.ProofEpochSealed = &ProofEpochSealed{} - } - if err := m.ProofEpochSealed.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSubmitted", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProofEpochSubmitted = append(m.ProofEpochSubmitted, &types2.TransactionInfo{}) - if err := m.ProofEpochSubmitted[len(m.ProofEpochSubmitted)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipZoneconcierge(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthZoneconcierge - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BTCChainSegment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BTCChainSegment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BTCChainSegment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BtcHeaders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthZoneconcierge - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthZoneconcierge - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BtcHeaders = append(m.BtcHeaders, &types3.BTCHeaderInfo{}) - if err := m.BtcHeaders[len(m.BtcHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipZoneconcierge(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthZoneconcierge - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipZoneconcierge(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowZoneconcierge - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthZoneconcierge - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupZoneconcierge - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthZoneconcierge - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthZoneconcierge = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowZoneconcierge = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupZoneconcierge = fmt.Errorf("proto: unexpected end of group") -)