diff --git a/proto/babylon/btccheckpoint/btccheckpoint.proto b/proto/babylon/btccheckpoint/btccheckpoint.proto new file mode 100644 index 000000000..3a9e3b014 --- /dev/null +++ b/proto/babylon/btccheckpoint/btccheckpoint.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; +package babylon.btccheckpoint; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/babylonchain/babylon/x/btccheckpoint/types"; + +// Each provided OP_RETURN transaction can be idendtified by hash of block in +// which transaction was included and transaction index in the block +message TransactionKey { + uint32 index = 1; + bytes hash = 2 [ + (gogoproto.customtype) = "github.com/babylonchain/babylon/types.BTCHeaderHashBytes" + ]; +} + +// Checkpoint can be composed from multiple transactions, so to identify whole +// submission we need list of transaction keys. +// Each submission can generally be identified by this list of (txIdx, blockHash) +// tuples. +// Note: this could possibly be optimized as if transactions were in one block +// they would have the same block hash and different indexes, but each blockhash +// is only 33 (1 byte for prefix encoding and 32 byte hash), so there should +// be other strong arguments for this optimization +message SubmissionKey { + repeated TransactionKey key = 1; +} + +// TODO: Determine if we should keep any block number or depth info. +// On one hand it may be usefull to determine if block is stable or not, on other +// depth/block number info, without context (i.e info about chain) is pretty useless +// and blockshash in enough to retrieve is from lightclient +message SubmissionData { + // TODO: this could probably be better typed + // Address of submitter of given checkpoint. Required to payup the reward to + // submitter of given checkpoint + bytes submitter = 1; + // Required to recover address of sender of btc transction to payup the reward. + // TODO: Maybe it is worth recovering senders while processing the InsertProof + // message, and store only those. Another point is that it is not that simple + // to recover sender of btc tx. + repeated bytes btctransaction = 2; +} + +// Data stored in db and indexed by epoch number +// TODO: Add btc blockheight at epooch end, when adding hadnling of epoching callbacks +message EpochData { + // List of all received checkpoints during this epoch, sorted by order of + // submission. + repeated SubmissionKey key = 1; +} + diff --git a/x/btccheckpoint/btcutils/btcutils.go b/x/btccheckpoint/btcutils/btcutils.go index 16624b03a..02d165d87 100644 --- a/x/btccheckpoint/btcutils/btcutils.go +++ b/x/btccheckpoint/btcutils/btcutils.go @@ -10,7 +10,6 @@ import ( "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" ) const ( @@ -20,12 +19,17 @@ const ( // Parsed proof represent semantically valid: // - Bitcoin Header +// - Bitcoin Header hash // - Bitcoin Transaction +// - Bitcoin Transaction index in block // - Non-empty OpReturnData type ParsedProof struct { - BlockHeader wire.BlockHeader - Transaction *btcutil.Tx - OpReturnData []byte + // keeping header hash to avoid recomputing it everytime + BlockHash types.BTCHeaderHashBytes + Transaction *btcutil.Tx + TransactionBytes []byte + TransactionIdx uint32 + OpReturnData []byte } // Concatenates and double hashes two provided inputs @@ -147,10 +151,13 @@ func ParseProof( return nil, fmt.Errorf("provided transaction should provide op return data") } + bh := header.BlockHash() parsedProof := &ParsedProof{ - BlockHeader: *header, - Transaction: tx, - OpReturnData: opReturnData, + BlockHash: types.NewBTCHeaderHashBytesFromChainhash(&bh), + Transaction: tx, + TransactionBytes: btcTransaction, + TransactionIdx: transactionIndex, + OpReturnData: opReturnData, } return parsedProof, nil diff --git a/x/btccheckpoint/keeper/keeper.go b/x/btccheckpoint/keeper/keeper.go index d99381bbf..31599bb0f 100644 --- a/x/btccheckpoint/keeper/keeper.go +++ b/x/btccheckpoint/keeper/keeper.go @@ -3,9 +3,9 @@ package keeper import ( "fmt" - "github.com/btcsuite/btcd/wire" "github.com/tendermint/tendermint/libs/log" + btypes "github.com/babylonchain/babylon/types" "github.com/babylonchain/babylon/x/btccheckpoint/types" "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" @@ -50,26 +50,49 @@ func (k Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) } -func (k Keeper) GetBlockHeight(b wire.BlockHeader) (uint64, error) { +func (k Keeper) GetBlockHeight(b btypes.BTCHeaderHashBytes) (uint64, error) { return k.btcLightClientKeeper.BlockHeight(b) } +func (k Keeper) IsAncestor(parentHash btypes.BTCHeaderHashBytes, childHash btypes.BTCHeaderHashBytes) (bool, error) { + return k.btcLightClientKeeper.IsAncestor(parentHash, childHash) +} + func (k Keeper) GetCheckpointEpoch(c []byte) (uint64, error) { return k.checkpointingKeeper.CheckpointEpoch(c) } -// TODO for now we jsut store raw checkpoint with epoch as key. Ultimatly -// we should store checkpoint with some timestamp info, or even do not store -// checkpoint itelf but its status -func (k Keeper) StoreCheckpoint(ctx sdk.Context, e uint64, c []byte) { +func (k Keeper) SubmissionExists(ctx sdk.Context, sk types.SubmissionKey) bool { + store := ctx.KVStore(k.storeKey) + kBytes := k.cdc.MustMarshal(&sk) + return store.Has(kBytes) +} + +// Return epoch data for given epoch, if there is not epoch data yet returns nil +func (k Keeper) GetEpochData(ctx sdk.Context, e uint64) *types.EpochData { + store := ctx.KVStore(k.storeKey) + bytes := store.Get(types.GetEpochIndexKey(e)) + + if len(bytes) == 0 { + return nil + } + + ed := &types.EpochData{} + k.cdc.MustUnmarshal(bytes, ed) + return ed + +} + +func (k Keeper) SaveEpochData(ctx sdk.Context, e uint64, ed *types.EpochData) { store := ctx.KVStore(k.storeKey) - key := sdk.Uint64ToBigEndian(e) - store.Set(key, c) + ek := types.GetEpochIndexKey(e) + eb := k.cdc.MustMarshal(ed) + store.Set(ek, eb) } -// TODO just return checkpoint if exists -func (k Keeper) GetCheckpoint(ctx sdk.Context, e uint64) []byte { +func (k Keeper) SaveSubmission(ctx sdk.Context, sk types.SubmissionKey, sd types.SubmissionData) { store := ctx.KVStore(k.storeKey) - key := sdk.Uint64ToBigEndian(e) - return store.Get(key) + kBytes := k.cdc.MustMarshal(&sk) + sBytes := k.cdc.MustMarshal(&sd) + store.Set(kBytes, sBytes) } diff --git a/x/btccheckpoint/keeper/msg_server.go b/x/btccheckpoint/keeper/msg_server.go index 5e6b83bdf..2c6364705 100644 --- a/x/btccheckpoint/keeper/msg_server.go +++ b/x/btccheckpoint/keeper/msg_server.go @@ -3,10 +3,11 @@ package keeper import ( "context" - "github.com/babylonchain/babylon/x/btccheckpoint/btcutils" + btypes "github.com/babylonchain/babylon/types" "github.com/babylonchain/babylon/x/btccheckpoint/types" btcchaincfg "github.com/btcsuite/btcd/chaincfg" sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ) type msgServer struct { @@ -19,56 +20,217 @@ func NewMsgServerImpl(keeper Keeper) types.MsgServer { return &msgServer{keeper} } -func (m msgServer) getProofHeight(proofs []*btcutils.ParsedProof) (uint64, error) { +// Gets proof height in context of btclightclilent, also if proof is composed +// from two different blocks checks that they are on the same fork. +func (m msgServer) getProofHeight(rawSub *types.RawCheckpointSubmission) (uint64, error) { var latestblock = uint64(0) - for _, proof := range proofs { - // TODO consider interfaces here. As currently this implicity assume that all headers - // are on the same fork. The only possiblity to detect that this is not the case - // is to pass []*wire.BlockHeader to light client, and for it to make that - // determination - num, err := m.k.GetBlockHeight(proof.BlockHeader) + fh := rawSub.GetFirstBlockHash() + sh := rawSub.GetSecondBlockHash() + + if fh.Eq(&sh) { + // both hashes are the same which means, two transactions with their respective + // proofs were provided in the same block. We only need to check one block for + // for height + num, err := m.k.GetBlockHeight(fh) if err != nil { - return latestblock, err + return 0, err } - // returning hightes block number as checkpoint number as if highest becomes - // stable then it means older is also stable. + return num, nil + } + + // at this point we know that both transactions were in different blocks. + // we need to check two things: + // - if both blocks are known to header oracle + // - if both blocks are on the same fork i.e if second block is descendant of the + // first block + for _, hash := range []btypes.BTCHeaderHashBytes{fh, sh} { + num, err := m.k.GetBlockHeight(hash) + if err != nil { + return 0, err + } + // the highest block number with a checkpoint being stable implies that all blocks are stable if num > latestblock { latestblock = num } } + // we have checked earlier that both blocks are known to header light client, + // so no need to check err. + isAncestor, err := m.k.IsAncestor(fh, sh) + + if err != nil { + panic("Headers which are should have been known to btclight client") + } + + if !isAncestor { + return 0, types.ErrProvidedHeaderFromDifferentForks + } + return latestblock, nil } -// returns fully assembled rawcheckpoint data and the latest header number of -// headers provided in the proof -func (m msgServer) getRawCheckPoint(proofs []*btcutils.ParsedProof) []byte { - var rawCheckpointData []byte +// checkHashesFromOneBlock checks if all hashes are from the same block i.e +// if all hashes are equal +func checkHashesFromOneBlock(hs []*btypes.BTCHeaderHashBytes) bool { + if len(hs) < 2 { + return true + } + + for i := 1; i < len(hs); i++ { + if !hs[i-1].Eq(hs[i]) { + return false + } + } + + return true +} + +func (m msgServer) checkHashesAreAncestors(hs []*btypes.BTCHeaderHashBytes) bool { + if len(hs) < 2 { + // cannot have ancestry relations with only 0 or 1 hash + return false + } + + for i := 1; i < len(hs); i++ { + anc, err := m.k.IsAncestor(*hs[i-1], *hs[i]) - for _, proof := range proofs { - rawCheckpointData = append(rawCheckpointData, proof.OpReturnData...) + if err != nil { + // TODO: Light client lost knowledge of one of the chekpoint hashes. + // decide what to do here. For now returning false, as we cannot check ancestry. + return false + } + + if !anc { + // all block hashes are known to light client, but are no longer at the same + // fork. Checkpoint defacto lost its validity due to some reorg happening. + return false + } } - return rawCheckpointData + return true +} + +func (m msgServer) checkHeaderIsDescentantOfPreviousEpoch( + previousEpochSubmissions []*types.SubmissionKey, + rawSub *types.RawCheckpointSubmission) bool { + + for _, sub := range previousEpochSubmissions { + // This should always be true, if we have some submission key composed from + // less than 2 transaction keys in previous epoch, something went really wrong + if len(sub.Key) < 2 { + panic("Submission key composed of less than 2 transactions keys in database") + } + + hs := sub.GetKeyBlockHashes() + + // All this functionality could be implemented in checkHashesAreAncestors + // and appending first hash of new subbmision to old checkpoint hashes, but there + // different error conditions here which require different loging. + if checkHashesFromOneBlock(hs) { + // all the hashes are from the same block, we only need to check if firstHash + // of new submission is ancestor of this one hash + anc, err := m.k.IsAncestor(*hs[0], rawSub.GetFirstBlockHash()) + + if err != nil { + // TODO: light client lost knowledge of blockhash from previous epoch + // (we know that this is not rawSub as we checked that earlier) + // It means either some bug / or fork had happened. For now just move + // forward as we are not able to establish ancestry here + continue + } + + if anc { + // found ancestry stop checking + return true + } + } else { + // hashes are not from the same block i.e this checkpoint was split between + // at least two block, check that it is still valid + if !m.checkHashesAreAncestors(hs) { + // checkpoint blockhashes no longer form a chain. Cannot check ancestry + // with new submission. Move to the next checkpoint + continue + } + + lastHashFromSavedCheckpoint := hs[len(hs)-1] + + // do not check err as all those hashes were checked in previous validation steps + anc, err := m.k.IsAncestor(*lastHashFromSavedCheckpoint, rawSub.GetFirstBlockHash()) + + if err != nil { + panic("Unexpected anecestry error, all blocks should have been known at this point") + } + + if anc { + // found ancestry stop checking + return true + } + } + } + + return false +} + +// TODO maybe move it to keeper +func (m msgServer) checkAndSaveEpochData( + sdkCtx sdk.Context, + epochNum uint64, + subKey types.SubmissionKey, + subData types.SubmissionData) { + ed := m.k.GetEpochData(sdkCtx, epochNum) + + // TODO: SaveEpochData and SaveSubmission should be done in one transaction. + // Not sure cosmos-sdk has facialities to do it. + // Otherwise it is possible to end up with node which updated submission list + // but did not save submission itself. + + if ed == nil { + // we do not have any data saved yet + newEd := types.NewEpochData(subKey) + ed = &newEd + } else { + // epoch data already existis for given epoch, append new submission, and save + // submission key and data + ed.AppendKey(subKey) + } + + m.k.SaveEpochData(sdkCtx, epochNum, ed) + m.k.SaveSubmission(sdkCtx, subKey, subData) } // TODO at some point add proper logging of error // TODO emit some events for external consumers. Those should be probably emited // at EndBlockerCallback func (m msgServer) InsertBTCSpvProof(ctx context.Context, req *types.MsgInsertBTCSpvProof) (*types.MsgInsertBTCSpvProofResponse, error) { + + address, err := sdk.AccAddressFromBech32(req.Submitter) + + if err != nil { + return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid submitter address: %s", err) + } + // TODO get PowLimit from config - proofs, e := types.ParseTwoProofs(req.Proofs, btcchaincfg.MainNetParams.PowLimit) + rawSubmission, e := types.ParseTwoProofs(address, req.Proofs, btcchaincfg.MainNetParams.PowLimit) if e != nil { return nil, types.ErrInvalidCheckpointProof } + // Get the SDK wrapped context + sdkCtx := sdk.UnwrapSDKContext(ctx) + + submissionKey := rawSubmission.GetSubmissionKey() + + if m.k.SubmissionExists(sdkCtx, submissionKey) { + return nil, types.ErrDuplicatedSubmission + } + // TODO for now we do nothing with processed blockHeight but ultimatly it should // be a part of timestamp - _, err := m.getProofHeight(proofs) + _, err = m.getProofHeight(rawSubmission) if err != nil { return nil, err @@ -77,30 +239,41 @@ func (m msgServer) InsertBTCSpvProof(ctx context.Context, req *types.MsgInsertBT // At this point: // - every proof of inclusion is valid i.e every transaction is proved to be // part of provided block and contains some OP_RETURN data - // - header is proved to be part of the chain we know about thorugh BTCLightClient + // - header is proved to be part of the chain we know about through BTCLightClient + // - this is new checkpoint submission // Inform checkpointing module about it. - rawCheckPointData := m.getRawCheckPoint(proofs) - - epochNum, err := m.k.GetCheckpointEpoch(rawCheckPointData) + epochNum, err := m.k.GetCheckpointEpoch(rawSubmission.GetRawCheckPointBytes()) if err != nil { return nil, err } - // Get the SDK wrapped context - sdkCtx := sdk.UnwrapSDKContext(ctx) + // This seems to be valid babylon checkpoint, check ancestors. + // If this submission is not for initial epoch there must already exsits checkpoints + // for previous epoch which are ancestors of provided submissions + if epochNum > 1 { + // this is valid checkpoint for not initial epoch, we need to check previous epoch + // checkpoints + previousEpochData := m.k.GetEpochData(sdkCtx, epochNum-1) + + // First check if there are any checkpoints for previous epoch at all. + if previousEpochData == nil { + return nil, types.ErrNoCheckpointsForPreviousEpoch + } + + if len(previousEpochData.Key) == 0 { + return nil, types.ErrNoCheckpointsForPreviousEpoch + } + + isDescendant := m.checkHeaderIsDescentantOfPreviousEpoch(previousEpochData.Key, rawSubmission) + + if !isDescendant { + return nil, types.ErrProvidedHeaderDoesNotHaveAncestor + } + } - // TODO consider handling here. - // Checkpointing module deemed this checkpoint as correc so for now lets just - // store it. (in future store some metadata about it) - // Things to consider: - // - Are we really guaranteed that epoch is unique key ? - // - It would probably be better to check for duplicates ourselves, by keeping some - // additional indexes like: sha256(checkpoint) -> epochNum and epochnNum -> checkpointStatus - // then we could check for dupliacates without involvement of checkpointing - // module and parsing checkpoint itself - // - What is good db layout for all requiremens - m.k.StoreCheckpoint(sdkCtx, epochNum, rawCheckPointData) + // Everything is fine, save new checkpoint and update Epoch data + m.checkAndSaveEpochData(sdkCtx, epochNum, submissionKey, rawSubmission.GetSubmissionData()) return &types.MsgInsertBTCSpvProofResponse{}, nil } diff --git a/x/btccheckpoint/types/btccheckpoint.pb.go b/x/btccheckpoint/types/btccheckpoint.pb.go new file mode 100644 index 000000000..1dfc266eb --- /dev/null +++ b/x/btccheckpoint/types/btccheckpoint.pb.go @@ -0,0 +1,979 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: babylon/btccheckpoint/btccheckpoint.proto + +package types + +import ( + fmt "fmt" + github_com_babylonchain_babylon_types "github.com/babylonchain/babylon/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Each provided OP_RETURN transaction can be idendtified by hash of block in +// which transaction was included and transaction index in the block +type TransactionKey struct { + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Hash *github_com_babylonchain_babylon_types.BTCHeaderHashBytes `protobuf:"bytes,2,opt,name=hash,proto3,customtype=github.com/babylonchain/babylon/types.BTCHeaderHashBytes" json:"hash,omitempty"` +} + +func (m *TransactionKey) Reset() { *m = TransactionKey{} } +func (m *TransactionKey) String() string { return proto.CompactTextString(m) } +func (*TransactionKey) ProtoMessage() {} +func (*TransactionKey) Descriptor() ([]byte, []int) { + return fileDescriptor_da8b9af3dbd18a36, []int{0} +} +func (m *TransactionKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransactionKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransactionKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TransactionKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransactionKey.Merge(m, src) +} +func (m *TransactionKey) XXX_Size() int { + return m.Size() +} +func (m *TransactionKey) XXX_DiscardUnknown() { + xxx_messageInfo_TransactionKey.DiscardUnknown(m) +} + +var xxx_messageInfo_TransactionKey proto.InternalMessageInfo + +func (m *TransactionKey) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +// Checkpoint can be composed from multiple transactions, so to identify whole +// submission we need list of transaction keys. +// Each submission can generally be identified by this list of (txIdx, blockHash) +// tuples. +// Note: this could possibly be optimized as if transactions were in one block +// there would have the same block hash and different indexes, but each blockshash +// is only 33 (1 bytes for prefix encoding and 32 byte hash), so there should +// be other strong arguments for this optimization +type SubmissionKey struct { + Key []*TransactionKey `protobuf:"bytes,1,rep,name=key,proto3" json:"key,omitempty"` +} + +func (m *SubmissionKey) Reset() { *m = SubmissionKey{} } +func (m *SubmissionKey) String() string { return proto.CompactTextString(m) } +func (*SubmissionKey) ProtoMessage() {} +func (*SubmissionKey) Descriptor() ([]byte, []int) { + return fileDescriptor_da8b9af3dbd18a36, []int{1} +} +func (m *SubmissionKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubmissionKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SubmissionKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SubmissionKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubmissionKey.Merge(m, src) +} +func (m *SubmissionKey) XXX_Size() int { + return m.Size() +} +func (m *SubmissionKey) XXX_DiscardUnknown() { + xxx_messageInfo_SubmissionKey.DiscardUnknown(m) +} + +var xxx_messageInfo_SubmissionKey proto.InternalMessageInfo + +func (m *SubmissionKey) GetKey() []*TransactionKey { + if m != nil { + return m.Key + } + return nil +} + +// TODO: Determine if we should keep any block number or depth info. +// On one hand it may be usefull to determine if block is stable or not, on other +// depth/block number info, without context (i.e info about chain) is pretty useless +// and blockshash in enough to retrieve is from lightclient +type SubmissionData struct { + // TODO: this could probably be better typed + // Address of submitter of given checkpoint. Required to payup the reward to + // submitter of given checkpoint + Submitter []byte `protobuf:"bytes,1,opt,name=submitter,proto3" json:"submitter,omitempty"` + // Required to recover address of sender of btc transction to payup the reward. + // TODO: Maybe it is worth recovering senders while processing the InsertProof + // message, and store only those. Another point is that it is not that simple + // to recover sender of btc tx. + Btctransaction [][]byte `protobuf:"bytes,2,rep,name=btctransaction,proto3" json:"btctransaction,omitempty"` +} + +func (m *SubmissionData) Reset() { *m = SubmissionData{} } +func (m *SubmissionData) String() string { return proto.CompactTextString(m) } +func (*SubmissionData) ProtoMessage() {} +func (*SubmissionData) Descriptor() ([]byte, []int) { + return fileDescriptor_da8b9af3dbd18a36, []int{2} +} +func (m *SubmissionData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubmissionData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SubmissionData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SubmissionData) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubmissionData.Merge(m, src) +} +func (m *SubmissionData) XXX_Size() int { + return m.Size() +} +func (m *SubmissionData) XXX_DiscardUnknown() { + xxx_messageInfo_SubmissionData.DiscardUnknown(m) +} + +var xxx_messageInfo_SubmissionData proto.InternalMessageInfo + +func (m *SubmissionData) GetSubmitter() []byte { + if m != nil { + return m.Submitter + } + return nil +} + +func (m *SubmissionData) GetBtctransaction() [][]byte { + if m != nil { + return m.Btctransaction + } + return nil +} + +// Data stored in db and indexed by epoch number +// TODO: Add btc blockheight at epooch end, when adding hadnling of epoching callbacks +type EpochData struct { + // List of all received checkpoints during this epoch, sorted by order of + // submission. + Key []*SubmissionKey `protobuf:"bytes,1,rep,name=key,proto3" json:"key,omitempty"` +} + +func (m *EpochData) Reset() { *m = EpochData{} } +func (m *EpochData) String() string { return proto.CompactTextString(m) } +func (*EpochData) ProtoMessage() {} +func (*EpochData) Descriptor() ([]byte, []int) { + return fileDescriptor_da8b9af3dbd18a36, []int{3} +} +func (m *EpochData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EpochData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EpochData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EpochData) XXX_Merge(src proto.Message) { + xxx_messageInfo_EpochData.Merge(m, src) +} +func (m *EpochData) XXX_Size() int { + return m.Size() +} +func (m *EpochData) XXX_DiscardUnknown() { + xxx_messageInfo_EpochData.DiscardUnknown(m) +} + +var xxx_messageInfo_EpochData proto.InternalMessageInfo + +func (m *EpochData) GetKey() []*SubmissionKey { + if m != nil { + return m.Key + } + return nil +} + +func init() { + proto.RegisterType((*TransactionKey)(nil), "babylon.btccheckpoint.TransactionKey") + proto.RegisterType((*SubmissionKey)(nil), "babylon.btccheckpoint.SubmissionKey") + proto.RegisterType((*SubmissionData)(nil), "babylon.btccheckpoint.SubmissionData") + proto.RegisterType((*EpochData)(nil), "babylon.btccheckpoint.EpochData") +} + +func init() { + proto.RegisterFile("babylon/btccheckpoint/btccheckpoint.proto", fileDescriptor_da8b9af3dbd18a36) +} + +var fileDescriptor_da8b9af3dbd18a36 = []byte{ + // 327 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x51, 0x4d, 0x4b, 0xc3, 0x40, + 0x14, 0xec, 0xb6, 0x2a, 0x74, 0x6d, 0x7b, 0x08, 0x15, 0x82, 0x48, 0x0c, 0x41, 0x25, 0x5e, 0x12, + 0x50, 0xfc, 0x38, 0x78, 0x4a, 0x15, 0x0a, 0x1e, 0x94, 0x58, 0x3c, 0x78, 0xdb, 0xdd, 0x2e, 0xdd, + 0x50, 0xbb, 0x1b, 0xb2, 0xaf, 0xd0, 0xfc, 0x0b, 0x7f, 0x96, 0xc7, 0x1e, 0xc5, 0x83, 0x48, 0xfb, + 0x47, 0xa4, 0xdb, 0x4a, 0x4c, 0x51, 0xbc, 0x65, 0x26, 0xb3, 0xf3, 0x66, 0xde, 0xc3, 0xc7, 0x94, + 0xd0, 0xfc, 0x59, 0xc9, 0x90, 0x02, 0x63, 0x82, 0xb3, 0x61, 0xaa, 0x12, 0x09, 0x65, 0x14, 0xa4, + 0x99, 0x02, 0x65, 0xed, 0xac, 0xa4, 0x41, 0xe9, 0xe7, 0x6e, 0x7b, 0xa0, 0x06, 0xca, 0x28, 0xc2, + 0xc5, 0xd7, 0x52, 0xec, 0x4d, 0x70, 0xab, 0x97, 0x11, 0xa9, 0x09, 0x83, 0x44, 0xc9, 0x5b, 0x9e, + 0x5b, 0x6d, 0xbc, 0x99, 0xc8, 0x3e, 0x9f, 0xd8, 0xc8, 0x45, 0x7e, 0x33, 0x5e, 0x02, 0xeb, 0x1e, + 0x6f, 0x08, 0xa2, 0x85, 0x5d, 0x75, 0x91, 0xdf, 0x88, 0xae, 0xde, 0x3f, 0xf6, 0x2f, 0x07, 0x09, + 0x88, 0x31, 0x0d, 0x98, 0x1a, 0x85, 0xab, 0x89, 0x4c, 0x90, 0x44, 0x7e, 0x83, 0x10, 0xf2, 0x94, + 0xeb, 0x20, 0xea, 0x75, 0xba, 0x9c, 0xf4, 0x79, 0xd6, 0x25, 0x5a, 0x44, 0x39, 0x70, 0x1d, 0x1b, + 0x27, 0xaf, 0x8b, 0x9b, 0x0f, 0x63, 0x3a, 0x4a, 0xb4, 0x5e, 0x0d, 0xbe, 0xc0, 0xb5, 0x21, 0xcf, + 0x6d, 0xe4, 0xd6, 0xfc, 0xed, 0x93, 0xc3, 0xe0, 0xd7, 0x16, 0x41, 0x39, 0x6c, 0xbc, 0x78, 0xe1, + 0x3d, 0xe2, 0x56, 0xe1, 0x74, 0x4d, 0x80, 0x58, 0x7b, 0xb8, 0xae, 0x17, 0x0c, 0x00, 0xcf, 0x4c, + 0x8f, 0x46, 0x5c, 0x10, 0xd6, 0x11, 0x6e, 0x51, 0x60, 0x50, 0x38, 0xd9, 0x55, 0xb7, 0xe6, 0x37, + 0xe2, 0x35, 0xd6, 0xeb, 0xe0, 0xfa, 0x4d, 0xaa, 0x98, 0x30, 0x96, 0xe7, 0x3f, 0xd3, 0x1d, 0xfc, + 0x91, 0xae, 0x54, 0xc8, 0x84, 0x8b, 0xee, 0x5e, 0x67, 0x0e, 0x9a, 0xce, 0x1c, 0xf4, 0x39, 0x73, + 0xd0, 0xcb, 0xdc, 0xa9, 0x4c, 0xe7, 0x4e, 0xe5, 0x6d, 0xee, 0x54, 0x9e, 0xce, 0xfe, 0x5b, 0xe0, + 0x64, 0xed, 0xd8, 0x66, 0xa1, 0x74, 0xcb, 0x1c, 0xee, 0xf4, 0x2b, 0x00, 0x00, 0xff, 0xff, 0xc4, + 0x2a, 0x5d, 0xee, 0x12, 0x02, 0x00, 0x00, +} + +func (m *TransactionKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransactionKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TransactionKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Hash != nil { + { + size := m.Hash.Size() + i -= size + if _, err := m.Hash.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintBtccheckpoint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintBtccheckpoint(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SubmissionKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubmissionKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubmissionKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Key) > 0 { + for iNdEx := len(m.Key) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Key[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBtccheckpoint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SubmissionData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubmissionData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubmissionData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Btctransaction) > 0 { + for iNdEx := len(m.Btctransaction) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Btctransaction[iNdEx]) + copy(dAtA[i:], m.Btctransaction[iNdEx]) + i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.Btctransaction[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Submitter) > 0 { + i -= len(m.Submitter) + copy(dAtA[i:], m.Submitter) + i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.Submitter))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EpochData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EpochData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EpochData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Key) > 0 { + for iNdEx := len(m.Key) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Key[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBtccheckpoint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintBtccheckpoint(dAtA []byte, offset int, v uint64) int { + offset -= sovBtccheckpoint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TransactionKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovBtccheckpoint(uint64(m.Index)) + } + if m.Hash != nil { + l = m.Hash.Size() + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + return n +} + +func (m *SubmissionKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Key) > 0 { + for _, e := range m.Key { + l = e.Size() + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + } + return n +} + +func (m *SubmissionData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Submitter) + if l > 0 { + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + if len(m.Btctransaction) > 0 { + for _, b := range m.Btctransaction { + l = len(b) + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + } + return n +} + +func (m *EpochData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Key) > 0 { + for _, e := range m.Key { + l = e.Size() + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + } + return n +} + +func sovBtccheckpoint(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBtccheckpoint(x uint64) (n int) { + return sovBtccheckpoint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TransactionKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransactionKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransactionKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var v github_com_babylonchain_babylon_types.BTCHeaderHashBytes + m.Hash = &v + if err := m.Hash.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBtccheckpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBtccheckpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubmissionKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubmissionKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubmissionKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key, &TransactionKey{}) + if err := m.Key[len(m.Key)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBtccheckpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBtccheckpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubmissionData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubmissionData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubmissionData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Submitter", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Submitter = append(m.Submitter[:0], dAtA[iNdEx:postIndex]...) + if m.Submitter == nil { + m.Submitter = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Btctransaction", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Btctransaction = append(m.Btctransaction, make([]byte, postIndex-iNdEx)) + copy(m.Btctransaction[len(m.Btctransaction)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBtccheckpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBtccheckpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EpochData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EpochData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EpochData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key, &SubmissionKey{}) + if err := m.Key[len(m.Key)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBtccheckpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBtccheckpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBtccheckpoint(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBtccheckpoint + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBtccheckpoint + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBtccheckpoint + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBtccheckpoint = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBtccheckpoint = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBtccheckpoint = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/btccheckpoint/types/errors.go b/x/btccheckpoint/types/errors.go index c335e0b86..ebcaa1222 100644 --- a/x/btccheckpoint/types/errors.go +++ b/x/btccheckpoint/types/errors.go @@ -8,5 +8,9 @@ import ( // x/btccheckpoint module sentinel errors var ( - ErrInvalidCheckpointProof = sdkerrors.Register(ModuleName, 1100, "Invalid checkpoint proof") + ErrInvalidCheckpointProof = sdkerrors.Register(ModuleName, 1100, "Invalid checkpoint proof") + ErrDuplicatedSubmission = sdkerrors.Register(ModuleName, 1101, "Duplicated submission") + ErrNoCheckpointsForPreviousEpoch = sdkerrors.Register(ModuleName, 1102, "No checkpoints for previous epoch") + ErrProvidedHeaderFromDifferentForks = sdkerrors.Register(ModuleName, 1103, "Proof header from different forks") + ErrProvidedHeaderDoesNotHaveAncestor = sdkerrors.Register(ModuleName, 1104, "Proof header does not have ancestor in previous epoch") ) diff --git a/x/btccheckpoint/types/expected_keepers.go b/x/btccheckpoint/types/expected_keepers.go index 078408631..1b4b395b8 100644 --- a/x/btccheckpoint/types/expected_keepers.go +++ b/x/btccheckpoint/types/expected_keepers.go @@ -1,7 +1,7 @@ package types import ( - "github.com/btcsuite/btcd/wire" + btypes "github.com/babylonchain/babylon/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/auth/types" ) @@ -19,9 +19,14 @@ type BankKeeper interface { } type BTCLightClientKeeper interface { - // Function should validate if provided header is valid and return header - // height if thats the case. - BlockHeight(header wire.BlockHeader) (uint64, error) + // BlockHeight should validate if header with given hash is valid and if it is + // part of known chain. In case this is true it shoudld return this block height + // in case this is false it should return error + BlockHeight(headerHash btypes.BTCHeaderHashBytes) (uint64, error) + + // IsAncestor should check if childHash header is direct ancestor of parentHash + // if either of this header is not known to light clinet it should return error + IsAncestor(parentHash btypes.BTCHeaderHashBytes, childHash btypes.BTCHeaderHashBytes) (bool, error) } type CheckpointingKeeper interface { diff --git a/x/btccheckpoint/types/mock_keepers.go b/x/btccheckpoint/types/mock_keepers.go index 801dc4d2a..b52bab5e5 100644 --- a/x/btccheckpoint/types/mock_keepers.go +++ b/x/btccheckpoint/types/mock_keepers.go @@ -1,17 +1,21 @@ package types import ( - "github.com/btcsuite/btcd/wire" + btypes "github.com/babylonchain/babylon/types" ) // TODO Mock keepers are currently only used when wiring app to satisfy the compiler type MockBTCLightClientKeeper struct{} type MockCheckpointingKeeper struct{} -func (mb MockBTCLightClientKeeper) BlockHeight(header wire.BlockHeader) (uint64, error) { +func (mb MockBTCLightClientKeeper) BlockHeight(header btypes.BTCHeaderHashBytes) (uint64, error) { return uint64(10), nil } +func (mb MockBTCLightClientKeeper) IsAncestor(parentHash btypes.BTCHeaderHashBytes, childHash btypes.BTCHeaderHashBytes) (bool, error) { + return true, nil +} + func (ck MockCheckpointingKeeper) CheckpointEpoch(rawCheckpoint []byte) (uint64, error) { return uint64(10), nil } diff --git a/x/btccheckpoint/types/msgs.go b/x/btccheckpoint/types/msgs.go index 2ba057128..0fa6091d4 100644 --- a/x/btccheckpoint/types/msgs.go +++ b/x/btccheckpoint/types/msgs.go @@ -29,7 +29,7 @@ const ( // Returned ParsedProofs are in same order as raw proofs // TODO explore possibility of validating that output in second tx is payed by // input in the first tx -func ParseTwoProofs(proofs []*BTCSpvProof, powLimit *big.Int) ([]*btcutils.ParsedProof, error) { +func ParseTwoProofs(submitter sdk.AccAddress, proofs []*BTCSpvProof, powLimit *big.Int) (*RawCheckpointSubmission, error) { if len(proofs) != expectedProofs { return nil, fmt.Errorf("expected at exactly valid op return transactions") } @@ -53,18 +53,22 @@ func ParseTwoProofs(proofs []*BTCSpvProof, powLimit *big.Int) ([]*btcutils.Parse parsedProofs = append(parsedProofs, parsedProof) } - return parsedProofs, nil + sub := NewRawCheckpointSubmission(submitter, *parsedProofs[0], *parsedProofs[1]) + + return &sub, nil } func (m *MsgInsertBTCSpvProof) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(m.Submitter); err != nil { + address, err := sdk.AccAddressFromBech32(m.Submitter) + + if err != nil { return sdkerrors.ErrInvalidAddress.Wrapf("invalid submitter address: %s", err) } // TODO get powLimit from some config // result of parsed proof is not needed, drop it // whole parsing stuff is stateless - _, err := ParseTwoProofs(m.Proofs, btcchaincfg.MainNetParams.PowLimit) + _, err = ParseTwoProofs(address, m.Proofs, btcchaincfg.MainNetParams.PowLimit) if err != nil { return err diff --git a/x/btccheckpoint/types/types.go b/x/btccheckpoint/types/types.go index ab1254f4c..b40baf141 100644 --- a/x/btccheckpoint/types/types.go +++ b/x/btccheckpoint/types/types.go @@ -1 +1,100 @@ package types + +import ( + "github.com/babylonchain/babylon/types" + "github.com/babylonchain/babylon/x/btccheckpoint/btcutils" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Semantically valid checkpoint submission with: +// - valid submitter address +// - at least 2 parsed proof +// Modelling proofs as separate Proof1 and Proof2, as this is more explicit than +// []*ParsedProof. +type RawCheckpointSubmission struct { + Submitter sdk.AccAddress + Proof1 btcutils.ParsedProof + Proof2 btcutils.ParsedProof +} + +func NewRawCheckpointSubmission(a sdk.AccAddress, p1 btcutils.ParsedProof, p2 btcutils.ParsedProof) RawCheckpointSubmission { + r := RawCheckpointSubmission{ + Submitter: a, + Proof1: p1, + Proof2: p2, + } + + return r +} + +func (s *RawCheckpointSubmission) GetProofs() []*btcutils.ParsedProof { + return []*btcutils.ParsedProof{&s.Proof1, &s.Proof2} +} + +func (s *RawCheckpointSubmission) GetRawCheckPointBytes() []byte { + var rawCheckpointData []byte + rawCheckpointData = append(rawCheckpointData, s.Proof1.OpReturnData...) + rawCheckpointData = append(rawCheckpointData, s.Proof2.OpReturnData...) + return rawCheckpointData +} + +func (s *RawCheckpointSubmission) GetFirstBlockHash() types.BTCHeaderHashBytes { + return s.Proof1.BlockHash +} + +func (s *RawCheckpointSubmission) GetSecondBlockHash() types.BTCHeaderHashBytes { + return s.Proof2.BlockHash +} + +func toTransactionKey(p *btcutils.ParsedProof) TransactionKey { + hashBytes := p.BlockHash + return TransactionKey{ + Index: p.TransactionIdx, + Hash: &hashBytes, + } +} + +func (rsc *RawCheckpointSubmission) GetSubmissionKey() SubmissionKey { + var keys []*TransactionKey + k1 := toTransactionKey(&rsc.Proof1) + keys = append(keys, &k1) + k2 := toTransactionKey(&rsc.Proof2) + keys = append(keys, &k2) + return SubmissionKey{ + Key: keys, + } +} + +func (rsc *RawCheckpointSubmission) GetSubmissionData() SubmissionData { + + tBytes := [][]byte{rsc.Proof1.TransactionBytes, rsc.Proof2.TransactionBytes} + return SubmissionData{ + Submitter: rsc.Submitter.Bytes(), + Btctransaction: tBytes, + } +} + +func (sk *SubmissionKey) GetKeyBlockHashes() []*types.BTCHeaderHashBytes { + var hashes []*types.BTCHeaderHashBytes + + for _, k := range sk.Key { + h := k.Hash + hashes = append(hashes, h) + } + + return hashes +} + +func GetEpochIndexKey(e uint64) []byte { + return sdk.Uint64ToBigEndian(e) +} + +func NewEpochData(key SubmissionKey) EpochData { + keys := []*SubmissionKey{&key} + return EpochData{Key: keys} +} + +func (s *EpochData) AppendKey(k SubmissionKey) { + key := &k + s.Key = append(s.Key, key) +}