Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Extend header with amount of used data shares #262

Merged
merged 5 commits into from
Apr 2, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion blockchain/msgs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func TestBlockchainMessageVectors(t *testing.T) {
BlockRequest: &bcproto.BlockRequest{Height: math.MaxInt64}}},
"0a0a08ffffffffffffffff7f"},
{"BlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_BlockResponse{
BlockResponse: &bcproto.BlockResponse{Block: bpb}}}, "1ac0020abd020a5b0a02080b1803220b088092b8c398feffffff012a0212003a204c149a7cfadc92b669b0cbfa4951a1b18c2d9f3177a3b8756d39ebb96e9d63316a20e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b85512130a0b48656c6c6f20576f726c6412001a0022001ac8010a3000000000000000010000000000000001b81cb5596c28d044214b9f935e4af7dbe76e417f6182d86fbee68bfff7b2ff3a0a30ffffffffffffffffffffffffffffffffc4096ba8fccf882c309896e9168fa43fe62fccb752cb12d5160cc1d9c2ebffe7123000000000000000010000000000000001b81cb5596c28d044214b9f935e4af7dbe76e417f6182d86fbee68bfff7b2ff3a1230ffffffffffffffffffffffffffffffffc4096ba8fccf882c309896e9168fa43fe62fccb752cb12d5160cc1d9c2ebffe7"},
BlockResponse: &bcproto.BlockResponse{Block: bpb}}}, "1ac2020abf020a5d0a02080b1803220b088092b8c398feffffff012a0212003a204c149a7cfadc92b669b0cbfa4951a1b18c2d9f3177a3b8756d39ebb96e9d633140017220e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b85512130a0b48656c6c6f20576f726c6412001a0022001ac8010a3000000000000000010000000000000001b81cb5596c28d044214b9f935e4af7dbe76e417f6182d86fbee68bfff7b2ff3a0a30ffffffffffffffffffffffffffffffffc4096ba8fccf882c309896e9168fa43fe62fccb752cb12d5160cc1d9c2ebffe7123000000000000000010000000000000001b81cb5596c28d044214b9f935e4af7dbe76e417f6182d86fbee68bfff7b2ff3a1230ffffffffffffffffffffffffffffffffc4096ba8fccf882c309896e9168fa43fe62fccb752cb12d5160cc1d9c2ebffe7"},
{"NoBlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_NoBlockResponse{
NoBlockResponse: &bcproto.NoBlockResponse{Height: 1}}}, "12020801"},
{"NoBlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_NoBlockResponse{
Expand Down
1 change: 1 addition & 0 deletions node/node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -480,6 +480,7 @@ func TestMaxProposalBlockSize(t *testing.T) {

// this ensures that the header is at max size
block.Header.Time = timestamp
block.Header.DataShares = math.MaxInt64

pb, err := block.ToProto()
require.NoError(t, err)
Expand Down
308 changes: 172 additions & 136 deletions proto/tendermint/types/types.pb.go

Large diffs are not rendered by default.

15 changes: 8 additions & 7 deletions proto/tendermint/types/types.proto
Original file line number Diff line number Diff line change
Expand Up @@ -68,17 +68,18 @@ message Header {
// hashes of block data
bytes last_commit_hash = 6; // commit from validators from the last block
bytes data_hash = 7; // transactions
int64 data_shares = 8;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe add in a comment here with a (permanent) link to the LL spec.
Also, while it makes sense to keep related fields closer to each other, the comment // hashes of block data implies that this is a hash. I'd suggest adding a newline in between data_hash and data_shares.

Also, the naming is not clear. IMO, we should aim for parity with the spec where feasible. here availableDataOriginalSharesUsed is used in the spec. Either use that name or something that is clearly related. IMO, num_original_data_shares would also be clear enough (cc @adlerjohn).

Copy link
Member

@adlerjohn adlerjohn Apr 1, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree naming parity with the specs should be preferred. Order parity is required, as if affects the canonical serialization.

Copy link
Member Author

@Wondertan Wondertan Apr 1, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@liamsi, Should I then revisit all the protos to check their names and order? Not just for Header

Copy link
Member

@liamsi liamsi Apr 1, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should eventually do this but not necessarily in this PR. Complete parity of all the protos is currently not possible yet as the data structures still differ too much from the ones in the spec anyways. Here it is sufficient to have parity with the name and maybe the position in relation to the other ones in the spec.


// hashes from the app output from the prev block
bytes validators_hash = 8; // validators for the current block
bytes next_validators_hash = 9; // validators for the next block
bytes consensus_hash = 10; // consensus params for current block
bytes app_hash = 11; // state after txs from the previous block
bytes last_results_hash = 12; // root hash of all results from the txs from the previous block
bytes validators_hash = 9; // validators for the current block
bytes next_validators_hash = 10; // validators for the next block
bytes consensus_hash = 11; // consensus params for current block
bytes app_hash = 12; // state after txs from the previous block
bytes last_results_hash = 13; // root hash of all results from the txs from the previous block

// consensus info
bytes evidence_hash = 13; // evidence included in the block
bytes proposer_address = 14; // original proposer of the block
bytes evidence_hash = 14; // evidence included in the block
bytes proposer_address = 15; // original proposer of the block
}

// Data contains the set of transactions included in the block
Expand Down
2 changes: 1 addition & 1 deletion state/tx_filter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func TestTxFilter(t *testing.T) {
tx types.Tx
isErr bool
}{
{types.Tx(tmrand.Bytes(2149)), false},
{types.Tx(tmrand.Bytes(2139)), false},
{types.Tx(tmrand.Bytes(2150)), true},
{types.Tx(tmrand.Bytes(3000)), true},
}
Expand Down
19 changes: 13 additions & 6 deletions types/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ const (
// MaxHeaderBytes is a maximum header size.
// NOTE: Because app hash can be of arbitrary size, the header is therefore not
// capped in size and thus this number should be seen as a soft max
MaxHeaderBytes int64 = 626
MaxHeaderBytes int64 = 636

// MaxOverheadForBlock - maximum overhead to encode a block (up to
// MaxBlockSizeBytes in size) not including it's parts except Data.
Expand Down Expand Up @@ -228,7 +228,7 @@ func (b *Block) fillHeader() {
// fillDataAvailabilityHeader fills in any remaining DataAvailabilityHeader fields
// that are a function of the block data.
func (b *Block) fillDataAvailabilityHeader() {
namespacedShares := b.Data.ComputeShares()
namespacedShares, dataSharesLen := b.Data.ComputeShares()
shares := namespacedShares.RawShares()
if len(shares) == 0 {
// no shares -> no row/colum roots -> hash(empty)
Expand Down Expand Up @@ -268,6 +268,7 @@ func (b *Block) fillDataAvailabilityHeader() {

// return the root hash of DA Header
b.DataHash = b.DataAvailabilityHeader.Hash()
b.DataShares = int64(dataSharesLen)
}

// nmtcommitment generates the nmt root of some namespaced data
Expand Down Expand Up @@ -300,7 +301,7 @@ func (b *Block) PutBlock(ctx context.Context, nodeAdder format.NodeAdder) error
}

// recompute the shares
namespacedShares := b.Data.ComputeShares()
namespacedShares, _ := b.Data.ComputeShares()
shares := namespacedShares.RawShares()

// don't do anything if there is no data to put on IPFS
Expand Down Expand Up @@ -645,6 +646,8 @@ type Header struct {
// DataHash = root((rowRoot_1 || rowRoot_2 || ... ||rowRoot_2k || columnRoot1 || columnRoot2 || ... || columnRoot2k))
// Block.DataAvailabilityHeader for stores (row|column)Root_i // TODO ...
DataHash tmbytes.HexBytes `json:"data_hash"` // transactions
// amount of data shares within a Block #specs:availableDataOriginalSharesUsed
DataShares int64 `json:"data_shares"`

// hashes from the app output from the prev block
ValidatorsHash tmbytes.HexBytes `json:"validators_hash"` // validators for the current block
Expand Down Expand Up @@ -773,6 +776,7 @@ func (h *Header) Hash() tmbytes.HexBytes {
bzbi,
cdcEncode(h.LastCommitHash),
cdcEncode(h.DataHash),
cdcEncode(h.DataShares),
cdcEncode(h.ValidatorsHash),
cdcEncode(h.NextValidatorsHash),
cdcEncode(h.ConsensusHash),
Expand Down Expand Up @@ -838,6 +842,7 @@ func (h *Header) ToProto() *tmproto.Header {
ConsensusHash: h.ConsensusHash,
AppHash: h.AppHash,
DataHash: h.DataHash,
DataShares: h.DataShares,
EvidenceHash: h.EvidenceHash,
LastResultsHash: h.LastResultsHash,
LastCommitHash: h.LastCommitHash,
Expand Down Expand Up @@ -870,6 +875,7 @@ func HeaderFromProto(ph *tmproto.Header) (Header, error) {
h.ConsensusHash = ph.ConsensusHash
h.AppHash = ph.AppHash
h.DataHash = ph.DataHash
h.DataShares = ph.DataShares
h.EvidenceHash = ph.EvidenceHash
h.LastResultsHash = ph.LastResultsHash
h.LastCommitHash = ph.LastCommitHash
Expand Down Expand Up @@ -1364,8 +1370,9 @@ func (msgs Messages) splitIntoShares() NamespacedShares {
return shares
}

// ComputeShares splits block data into shares of an original data square.
func (data *Data) ComputeShares() NamespacedShares {
// ComputeShares splits block data into shares of an original data square and
// returns them along with an amount of non-redundant shares.
func (data *Data) ComputeShares() (NamespacedShares, int) {
// TODO(ismail): splitting into shares should depend on the block size and layout
// see: https://github.com/lazyledger/lazyledger-specs/blob/master/specs/block_proposer.md#laying-out-transactions-and-messages

Expand All @@ -1388,7 +1395,7 @@ func (data *Data) ComputeShares() NamespacedShares {
intermRootsShares...),
evidenceShares...),
msgShares...),
tailShares...)
tailShares...), wantLen
}

func getNextSquareNum(length int) int {
Expand Down
20 changes: 11 additions & 9 deletions types/block_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -343,14 +343,15 @@ func TestHeaderHash(t *testing.T) {
LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)),
LastCommitHash: tmhash.Sum([]byte("last_commit_hash")),
DataHash: tmhash.Sum([]byte("data_hash")),
DataShares: 4,
ValidatorsHash: tmhash.Sum([]byte("validators_hash")),
NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")),
ConsensusHash: tmhash.Sum([]byte("consensus_hash")),
AppHash: tmhash.Sum([]byte("app_hash")),
LastResultsHash: tmhash.Sum([]byte("last_results_hash")),
EvidenceHash: tmhash.Sum([]byte("evidence_hash")),
ProposerAddress: crypto.AddressHash([]byte("proposer_address")),
}, hexBytesFromString("F740121F553B5418C3EFBD343C2DBFE9E007BB67B0D020A0741374BAB65242A4")},
}, hexBytesFromString("3BA96EAE652191EDBEA84E130C32E94AD86A901B856EC7201B776669F72DE39F")},
{"nil header yields nil", nil, nil},
{"nil ValidatorsHash yields nil", &Header{
Version: tmversion.Consensus{Block: 1, App: 2},
Expand Down Expand Up @@ -435,6 +436,7 @@ func TestMaxHeaderBytes(t *testing.T) {
LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt32, make([]byte, tmhash.Size)),
LastCommitHash: tmhash.Sum([]byte("last_commit_hash")),
DataHash: tmhash.Sum([]byte("data_hash")),
DataShares: math.MaxInt64,
ValidatorsHash: tmhash.Sum([]byte("validators_hash")),
NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")),
ConsensusHash: tmhash.Sum([]byte("consensus_hash")),
Expand Down Expand Up @@ -479,11 +481,11 @@ func TestBlockMaxDataBytes(t *testing.T) {
}{
0: {-10, 1, 0, true, 0},
1: {10, 1, 0, true, 0},
2: {841, 1, 0, true, 0},
3: {842, 1, 0, false, 0},
4: {843, 1, 0, false, 1},
5: {954, 2, 0, false, 1},
6: {1053, 2, 100, false, 0},
2: {851, 1, 0, true, 0},
3: {852, 1, 0, false, 0},
4: {853, 1, 0, false, 1},
5: {964, 2, 0, false, 1},
6: {1063, 2, 100, false, 0},
}

for i, tc := range testCases {
Expand All @@ -510,9 +512,9 @@ func TestBlockMaxDataBytesNoEvidence(t *testing.T) {
}{
0: {-10, 1, true, 0},
1: {10, 1, true, 0},
2: {841, 1, true, 0},
3: {842, 1, false, 0},
4: {843, 1, false, 1},
2: {851, 1, true, 0},
3: {852, 1, false, 0},
4: {853, 1, false, 1},
}

for i, tc := range testCases {
Expand Down