Skip to content

Commit

Permalink
[FAB-6744]: Verify hash of pvt rwset
Browse files Browse the repository at this point in the history
Currently at the block commit time, coordinator retrieves from the
transient store required private read write sets, while doesn't
validate whenever hash of extracted private rwset matches the hash
value published on rwset of the transaction in the block. As a
consequence peer fails to commit thus leading to panic. This commit
adds a test to cover this use case and provides a fix, which takes care
to filter out private rwsets with unexpcted hash values.

Change-Id: I8c603327bcbb2a1ddc2bd819b85bc927df0c036e
Signed-off-by: Artem Barger <bartem@il.ibm.com>
  • Loading branch information
C0rWin committed Oct 25, 2017
1 parent 43d3e63 commit e3e140d
Show file tree
Hide file tree
Showing 2 changed files with 102 additions and 8 deletions.
17 changes: 9 additions & 8 deletions gossip/privdata/coordinator.go
Original file line number Diff line number Diff line change
Expand Up @@ -620,14 +620,6 @@ func (c *coordinator) listMissingPrivateData(block *common.Block, ownedRWsets ma
if err != nil {
return nil, errors.WithStack(err)
}
// In the end, iterate over the ownedRWsets, and if the key doesn't exist in
// the privateRWsetsInBlock - delete it from the ownedRWsets
for k := range ownedRWsets {
if _, exists := privateRWsetsInBlock[k]; !exists {
logger.Warning("Removed", k.namespace, k.collection, "hash", k.hash, "from the data passed to the ledger")
delete(ownedRWsets, k)
}
}

privateInfo := &privateDataInfo{
sources: sources,
Expand All @@ -640,6 +632,15 @@ func (c *coordinator) listMissingPrivateData(block *common.Block, ownedRWsets ma
// Put into ownedRWsets RW sets that are missing and found in the transient store
c.fetchMissingFromTransientStore(privateInfo.missingKeysByTxIDs, ownedRWsets)

// In the end, iterate over the ownedRWsets, and if the key doesn't exist in
// the privateRWsetsInBlock - delete it from the ownedRWsets
for k := range ownedRWsets {
if _, exists := privateRWsetsInBlock[k]; !exists {
logger.Warning("Removed", k.namespace, k.collection, "hash", k.hash, "from the data passed to the ledger")
delete(ownedRWsets, k)
}
}

privateInfo.missingKeys = privateInfo.missingKeysByTxIDs.flatten()
// Remove all keys we already own
privateInfo.missingKeys.exclude(func(key rwSetKey) bool {
Expand Down
93 changes: 93 additions & 0 deletions gossip/privdata/coordinator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -683,6 +683,99 @@ func TestCoordinatorStoreInvalidBlock(t *testing.T) {
assert.Contains(t, err.Error(), "Block data is empty")
}

func TestCoordinatorToFilterOutPvtRWSetsWithWrongHash(t *testing.T) {
/*
Test case, where peer receives new block for commit
it has ns1:c1 in transient store, while it has wrong
hash, hence it will fetch ns1:c1 from other peers
*/
peerSelfSignedData := common.SignedData{
Identity: []byte{0, 1, 2},
Signature: []byte{3, 4, 5},
Data: []byte{6, 7, 8},
}

expectedPvtData := map[uint64]*ledger.TxPvtData{
0: {SeqInBlock: 0, WriteSet: &rwset.TxPvtReadWriteSet{
DataModel: rwset.TxReadWriteSet_KV,
NsPvtRwset: []*rwset.NsPvtReadWriteSet{
{
Namespace: "ns1",
CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
{
CollectionName: "c1",
Rwset: []byte("rws-original"),
},
},
},
},
}},
}

cs := createcollectionStore(peerSelfSignedData).thatAcceptsAll()
committer := &committerMock{}
store := &mockTransientStore{t: t}

fetcher := &fetcherMock{t: t}

var commitHappened bool

committer.On("CommitWithPvtData", mock.Anything).Run(func(args mock.Arguments) {
var privateDataPassed2Ledger privateData = args.Get(0).(*ledger.BlockAndPvtData).BlockPvtData
assert.True(t, privateDataPassed2Ledger.Equal(expectedPvtData))
commitHappened = true
}).Return(nil)

hash := util2.ComputeSHA256([]byte("rws-original"))
bf := &blockFactory{
channelID: "test",
}

block := bf.AddTxnWithEndorsement("tx1", "ns1", hash, "org1", "c1").create()
store.On("GetTxPvtRWSetByTxid", "tx1", mock.Anything).Return((&mockRWSetScanner{}).withRWSet("ns1", "c1"), nil)

coordinator := NewCoordinator(Support{
CollectionStore: cs,
Committer: committer,
Fetcher: fetcher,
TransientStore: store,
Validator: &validatorMock{},
}, peerSelfSignedData)

fetcher.On("fetch", mock.Anything).expectingDigests([]*proto.PvtDataDigest{
{
TxId: "tx1", Namespace: "ns1", Collection: "c1", BlockSeq: 1,
},
}).expectingEndorsers("org1").Return([]*proto.PvtDataElement{
{
Digest: &proto.PvtDataDigest{
BlockSeq: 1,
Collection: "c1",
Namespace: "ns1",
TxId: "tx1",
},
Payload: [][]byte{[]byte("rws-original")},
},
}, nil)
store.On("Persist", mock.Anything, uint64(1), mock.Anything).
expectRWSet("ns1", "c1", []byte("rws-original")).Return(nil)

purgedTxns := make(map[string]struct{})
store.On("PurgeByTxids", mock.Anything).Run(func(args mock.Arguments) {
for _, txn := range args.Get(0).([]string) {
purgedTxns[txn] = struct{}{}
}
}).Return(nil)

coordinator.StoreBlock(block, nil)
// Assert blocks was eventually committed
assert.True(t, commitHappened)

// Assert transaction has been purged
_, exists := purgedTxns["tx1"]
assert.True(t, exists)
}

func TestCoordinatorStoreBlock(t *testing.T) {
peerSelfSignedData := common.SignedData{
Identity: []byte{0, 1, 2},
Expand Down

0 comments on commit e3e140d

Please sign in to comment.