From 137bfcb894bce8a848f2d8f7725c4efa444b08df Mon Sep 17 00:00:00 2001 From: manish Date: Tue, 29 May 2018 09:41:01 -0400 Subject: [PATCH] [FAB-10095] Add pvt data related tests This CR introduces a test folder under kvledger that contains - Util code for making it easy to write tests that operate at the level of ledger APIs - Tests related to pvt data functions utilizing the above mentioned util code Change-Id: Ic16007cf2d6034249b0a5e10c0bcb0b800439dae Signed-off-by: manish --- core/ledger/kvledger/kv_ledger.go | 1 + core/ledger/kvledger/tests/client.go | 125 +++++++++ core/ledger/kvledger/tests/committer.go | 94 +++++++ core/ledger/kvledger/tests/env.go | 151 ++++++++++ core/ledger/kvledger/tests/ledger_test.go | 45 +++ core/ledger/kvledger/tests/pvtdata_test.go | 154 ++++++++++ .../kvledger/tests/sample_data_helper.go | 265 ++++++++++++++++++ core/ledger/kvledger/tests/test_helper.go | 73 +++++ core/ledger/kvledger/tests/util.go | 81 ++++++ core/ledger/kvledger/tests/verifier.go | 196 +++++++++++++ .../ledgermgmt/ledger_mgmt_test_exports.go | 10 + 11 files changed, 1195 insertions(+) create mode 100644 core/ledger/kvledger/tests/client.go create mode 100644 core/ledger/kvledger/tests/committer.go create mode 100644 core/ledger/kvledger/tests/env.go create mode 100644 core/ledger/kvledger/tests/ledger_test.go create mode 100644 core/ledger/kvledger/tests/pvtdata_test.go create mode 100644 core/ledger/kvledger/tests/sample_data_helper.go create mode 100644 core/ledger/kvledger/tests/test_helper.go create mode 100644 core/ledger/kvledger/tests/util.go create mode 100644 core/ledger/kvledger/tests/verifier.go diff --git a/core/ledger/kvledger/kv_ledger.go b/core/ledger/kvledger/kv_ledger.go index 179912ff155..cd217461bcf 100644 --- a/core/ledger/kvledger/kv_ledger.go +++ b/core/ledger/kvledger/kv_ledger.go @@ -140,6 +140,7 @@ func (l *kvLedger) recoverDBs() error { //recommitLostBlocks retrieves blocks in specified range and commit the write set to either //state DB or history DB or both func (l *kvLedger) recommitLostBlocks(firstBlockNum uint64, lastBlockNum uint64, recoverables ...recoverable) error { + logger.Debugf("recommitLostBlocks() - firstBlockNum=%d, lastBlockNum=%d, recoverables=%#v", firstBlockNum, lastBlockNum, recoverables) var err error var blockAndPvtdata *ledger.BlockAndPvtData for blockNumber := firstBlockNum; blockNumber <= lastBlockNum; blockNumber++ { diff --git a/core/ledger/kvledger/tests/client.go b/core/ledger/kvledger/tests/client.go new file mode 100644 index 00000000000..49385bb5884 --- /dev/null +++ b/core/ledger/kvledger/tests/client.go @@ -0,0 +1,125 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package tests + +import ( + "testing" + + "github.com/golang/protobuf/proto" + "github.com/hyperledger/fabric/common/util" + "github.com/hyperledger/fabric/core/common/ccprovider" + "github.com/hyperledger/fabric/core/common/privdata" + "github.com/hyperledger/fabric/core/ledger" + "github.com/stretchr/testify/assert" +) + +// client helps in a transction simulation. The client keeps accumlating the results of each simulated transaction +// in a slice and at a later stage can be used to cut a test block for committing. +// In a test, for each instantiated ledger, a single instance of a client is typically sufficient. +type client struct { + lgr ledger.PeerLedger + simulatedTrans []*txAndPvtdata // accumulates the results of transactions simulations + assert *assert.Assertions +} + +func newClient(lgr ledger.PeerLedger, t *testing.T) *client { + return &client{lgr, nil, assert.New(t)} +} + +// simulateDataTx takes a simulation logic and wraps it between +// (A) the pre-simulation tasks (such as obtaining a fresh simulator) and +// (B) the post simulation tasks (such as gathering (public and pvt) simulation results and constructing a transaction) +// Since (A) and (B) both are handled in this function, the test code can be kept simple by just supplying the simulation logic +func (c *client) simulateDataTx(txid string, simulationLogic func(s *simulator)) *txAndPvtdata { + if txid == "" { + txid = util.GenerateUUID() + } + ledgerSimulator, err := c.lgr.NewTxSimulator(txid) + c.assert.NoError(err) + sim := &simulator{ledgerSimulator, txid, c.assert} + simulationLogic(sim) + txAndPvtdata := sim.done() + c.simulatedTrans = append(c.simulatedTrans, txAndPvtdata) + return txAndPvtdata +} + +// simulateDeployTx mimics a transction that deploys a chaincode. This in turn calls the function 'simulateDataTx' +// with supplying the simulation logic that mimics the inoke funciton of 'lscc' for the ledger tests +func (c *client) simulateDeployTx(ccName string, collConfs []*collConf) *txAndPvtdata { + ccData := &ccprovider.ChaincodeData{Name: ccName} + ccDataBytes, err := proto.Marshal(ccData) + c.assert.NoError(err) + + psudoLSCCInvokeFunc := func(s *simulator) { + s.setState("lscc", ccName, string(ccDataBytes)) + if collConfs != nil { + protoBytes, err := convertToCollConfigProtoBytes(collConfs) + c.assert.NoError(err) + s.setState("lscc", privdata.BuildCollectionKVSKey(ccName), string(protoBytes)) + } + } + return c.simulateDataTx("", psudoLSCCInvokeFunc) +} + +// simulateUpgradeTx see comments on function 'simulateDeployTx' +func (c *client) simulateUpgradeTx(ccName string, collConfs []*collConf) *txAndPvtdata { + return c.simulateDeployTx(ccName, collConfs) +} + +/////////////////////// simulator wrapper functions /////////////////////// +type simulator struct { + ledger.TxSimulator + txid string + assert *assert.Assertions +} + +func (s *simulator) getState(ns, key string) string { + val, err := s.GetState(ns, key) + s.assert.NoError(err) + return string(val) +} + +func (s *simulator) setState(ns, key string, val string) { + s.assert.NoError( + s.SetState(ns, key, []byte(val)), + ) +} + +func (s *simulator) delState(ns, key string) { + s.assert.NoError( + s.DeleteState(ns, key), + ) +} + +func (s *simulator) getPvtdata(ns, coll, key string) { + _, err := s.GetPrivateData(ns, coll, key) + s.assert.NoError(err) +} + +func (s *simulator) setPvtdata(ns, coll, key string, val string) { + s.assert.NoError( + s.SetPrivateData(ns, coll, key, []byte(val)), + ) +} + +func (s *simulator) delPvtdata(ns, coll, key string) { + s.assert.NoError( + s.DeletePrivateData(ns, coll, key), + ) +} + +func (s *simulator) done() *txAndPvtdata { + s.Done() + simRes, err := s.GetTxSimulationResults() + s.assert.NoError(err) + pubRwsetBytes, err := simRes.GetPubSimulationBytes() + s.assert.NoError(err) + envelope, err := constructTransaction(s.txid, pubRwsetBytes) + s.assert.NoError(err) + txAndPvtdata := &txAndPvtdata{Txid: s.txid, Envelope: envelope, Pvtws: simRes.PvtSimulationResults} + return txAndPvtdata +} diff --git a/core/ledger/kvledger/tests/committer.go b/core/ledger/kvledger/tests/committer.go new file mode 100644 index 00000000000..b8cb77f99be --- /dev/null +++ b/core/ledger/kvledger/tests/committer.go @@ -0,0 +1,94 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package tests + +import ( + "testing" + + "github.com/golang/protobuf/proto" + "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/protos/common" + "github.com/stretchr/testify/assert" +) + +// committer helps in cutting a block and commits the block (with pvt data) to the ledger +type committer struct { + lgr ledger.PeerLedger + blkgen *blkGenerator + assert *assert.Assertions +} + +func newCommitter(lgr ledger.PeerLedger, t *testing.T) *committer { + return &committer{lgr, newBlockGenerator(lgr, t), assert.New(t)} +} + +// cutBlockAndCommitWithPvtdata cuts the next block from the given 'txAndPvtdata' and commits the block (with pvt data) to the ledger +// This function return a copy of 'ledger.BlockAndPvtData' that was submitted to the ledger to commit. +// A copy is returned instead of the actual one because, ledger makes some changes to the submitted block before commit +// (such as setting the metadata) and the test code would want to have the exact copy of the block that was submitted to +// the ledger +func (c *committer) cutBlockAndCommitWithPvtdata(trans ...*txAndPvtdata) *ledger.BlockAndPvtData { + blk := c.blkgen.nextBlockAndPvtdata(trans...) + blkCopy := c.copyOfBlockAndPvtdata(blk) + c.assert.NoError( + c.lgr.CommitWithPvtData(blk), + ) + return blkCopy +} + +func (c *committer) cutBlockAndCommitExpectError(trans ...*txAndPvtdata) (*ledger.BlockAndPvtData, error) { + blk := c.blkgen.nextBlockAndPvtdata(trans...) + blkCopy := c.copyOfBlockAndPvtdata(blk) + err := c.lgr.CommitWithPvtData(blk) + c.assert.Error(err) + return blkCopy, err +} + +func (c *committer) copyOfBlockAndPvtdata(blk *ledger.BlockAndPvtData) *ledger.BlockAndPvtData { + blkBytes, err := proto.Marshal(blk.Block) + c.assert.NoError(err) + blkCopy := &common.Block{} + c.assert.NoError(proto.Unmarshal(blkBytes, blkCopy)) + return &ledger.BlockAndPvtData{Block: blkCopy, BlockPvtData: blk.BlockPvtData} +} + +///////////////// block generation code /////////////////////////////////////////// +// blkGenerator helps creating the next block for the ledger +type blkGenerator struct { + lastNum uint64 + lastHash []byte + assert *assert.Assertions +} + +// newBlockGenerator constructs a 'blkGenerator' and initializes the 'blkGenerator' +// from the last block available in the ledger so that the next block can be populated +// with the correct block number and previous block hash +func newBlockGenerator(lgr ledger.PeerLedger, t *testing.T) *blkGenerator { + assert := assert.New(t) + info, err := lgr.GetBlockchainInfo() + assert.NoError(err) + return &blkGenerator{info.Height - 1, info.PreviousBlockHash, assert} +} + +// nextBlockAndPvtdata cuts the next block +func (g *blkGenerator) nextBlockAndPvtdata(trans ...*txAndPvtdata) *ledger.BlockAndPvtData { + block := common.NewBlock(g.lastNum+1, g.lastHash) + blockPvtdata := make(map[uint64]*ledger.TxPvtData) + for i, tran := range trans { + seq := uint64(i) + envelopeBytes, _ := proto.Marshal(tran.Envelope) + block.Data.Data = append(block.Data.Data, envelopeBytes) + if tran.Pvtws != nil { + blockPvtdata[seq] = &ledger.TxPvtData{SeqInBlock: seq, WriteSet: tran.Pvtws} + } + } + block.Header.DataHash = block.Data.Hash() + g.lastNum++ + g.lastHash = block.Header.Hash() + setBlockFlagsToValid(block) + return &ledger.BlockAndPvtData{Block: block, BlockPvtData: blockPvtdata} +} diff --git a/core/ledger/kvledger/tests/env.go b/core/ledger/kvledger/tests/env.go new file mode 100644 index 00000000000..62dacd7fead --- /dev/null +++ b/core/ledger/kvledger/tests/env.go @@ -0,0 +1,151 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package tests + +import ( + "os" + "path/filepath" + "testing" + + "github.com/hyperledger/fabric/common/ledger/blkstorage/fsblkstorage" + "github.com/hyperledger/fabric/common/ledger/util" + "github.com/hyperledger/fabric/core/ledger/ledgerconfig" + "github.com/hyperledger/fabric/core/ledger/ledgermgmt" + "github.com/hyperledger/fabric/core/peer" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" +) + +type config map[string]interface{} +type rebuildable uint8 + +const ( + rebuildableStatedb rebuildable = 1 + rebuildableBlockIndex rebuildable = 2 + rebuildableConfigHistory rebuildable = 4 + rebuildableHistoryDB rebuildable = 8 +) + +var ( + defaultConfig = config{ + "peer.fileSystemPath": "/tmp/fabric/ledgertests", + "ledger.state.stateDatabase": "goleveldb", + } +) + +type env struct { + assert *assert.Assertions +} + +func newEnv(conf config, t *testing.T) *env { + setupConfigs(conf) + env := &env{assert.New(t)} + initLedgerMgmt() + return env +} + +func (e *env) cleanup() { + closeLedgerMgmt() + e.assert.NoError(os.RemoveAll(getLedgerRootPath())) +} + +func (e *env) closeAllLedgersAndDrop(flags rebuildable) { + closeLedgerMgmt() + defer initLedgerMgmt() + + if flags&rebuildableBlockIndex == rebuildableBlockIndex { + indexPath := getBlockIndexDBPath() + logger.Infof("Deleting blockstore indexdb path [%s]", indexPath) + e.verifyNonEmptyDirExists(indexPath) + e.assert.NoError(os.RemoveAll(indexPath)) + } + + if flags&rebuildableStatedb == rebuildableStatedb { + statedbPath := getLevelstateDBPath() + logger.Infof("Deleting statedb path [%s]", statedbPath) + e.verifyNonEmptyDirExists(statedbPath) + e.assert.NoError(os.RemoveAll(statedbPath)) + } + + if flags&rebuildableConfigHistory == rebuildableConfigHistory { + configHistory := getConfigHistoryDBPath() + logger.Infof("Deleting configHistory db path [%s]", configHistory) + e.verifyNonEmptyDirExists(configHistory) + e.assert.NoError(os.RemoveAll(configHistory)) + } +} + +func (e *env) verifyRebuilablesExist(flags rebuildable) { + if flags&rebuildableStatedb == rebuildableBlockIndex { + e.verifyNonEmptyDirExists(getBlockIndexDBPath()) + } + if flags&rebuildableBlockIndex == rebuildableStatedb { + e.verifyNonEmptyDirExists(getLevelstateDBPath()) + } + if flags&rebuildableConfigHistory == rebuildableConfigHistory { + e.verifyNonEmptyDirExists(getConfigHistoryDBPath()) + } +} + +func (e *env) verifyRebuilableDoesNotExist(flags rebuildable) { + if flags&rebuildableStatedb == rebuildableStatedb { + e.verifyDirDoesNotExist(getLevelstateDBPath()) + } + if flags&rebuildableStatedb == rebuildableBlockIndex { + e.verifyDirDoesNotExist(getBlockIndexDBPath()) + } + if flags&rebuildableConfigHistory == rebuildableConfigHistory { + e.verifyDirDoesNotExist(getConfigHistoryDBPath()) + } +} + +func (e *env) verifyNonEmptyDirExists(path string) { + empty, err := util.DirEmpty(path) + e.assert.NoError(err) + e.assert.False(empty) +} + +func (e *env) verifyDirDoesNotExist(path string) { + exists, _, err := util.FileExists(path) + e.assert.NoError(err) + e.assert.False(exists) +} + +// ########################### ledgermgmt and ledgerconfig related functions wrappers ############################# +// In the current code, ledgermgmt and ledgerconfigs are packaged scope APIs and hence so are the following +// wrapper APIs. As a TODO, both the ledgermgmt and ledgerconfig can be refactored as separate objects and then +// the instances of these two would be wrapped inside the `env` struct above. +// ################################################################################################################# +func setupConfigs(conf config) { + for c, v := range conf { + viper.Set(c, v) + } +} + +func initLedgerMgmt() { + ledgermgmt.InitializeExistingTestEnvWithCustomProcessors(peer.ConfigTxProcessors) +} + +func closeLedgerMgmt() { + ledgermgmt.Close() +} + +func getLedgerRootPath() string { + return ledgerconfig.GetRootPath() +} + +func getLevelstateDBPath() string { + return ledgerconfig.GetStateLevelDBPath() +} + +func getBlockIndexDBPath() string { + return filepath.Join(ledgerconfig.GetBlockStorePath(), fsblkstorage.IndexDir) +} + +func getConfigHistoryDBPath() string { + return ledgerconfig.GetConfigHistoryPath() +} diff --git a/core/ledger/kvledger/tests/ledger_test.go b/core/ledger/kvledger/tests/ledger_test.go new file mode 100644 index 00000000000..8786975e8d5 --- /dev/null +++ b/core/ledger/kvledger/tests/ledger_test.go @@ -0,0 +1,45 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package tests + +import ( + "os" + "testing" + + "github.com/hyperledger/fabric/common/flogging" +) + +func TestMain(m *testing.M) { + flogging.SetModuleLevel("lockbasedtxmgr", "debug") + flogging.SetModuleLevel("statevalidator", "debug") + flogging.SetModuleLevel("statebasedval", "debug") + flogging.SetModuleLevel("statecouchdb", "debug") + flogging.SetModuleLevel("valimpl", "debug") + flogging.SetModuleLevel("pvtstatepurgemgmt", "debug") + flogging.SetModuleLevel("confighistory", "debug") + flogging.SetModuleLevel("kvledger", "debug") + + os.Exit(m.Run()) +} + +func TestLedgerAPIs(t *testing.T) { + env := newEnv(defaultConfig, t) + defer env.cleanup() + + // create two ledgers + h1 := newTestHelperCreateLgr("ledger1", t) + h2 := newTestHelperCreateLgr("ledger2", t) + + // populate ledgers with sample data + dataHelper := newSampleDataHelper(t) + dataHelper.populateLedger(h1) + dataHelper.populateLedger(h2) + + // verify contents in both the ledgers + dataHelper.verifyLedgerContent(h1) + dataHelper.verifyLedgerContent(h2) +} diff --git a/core/ledger/kvledger/tests/pvtdata_test.go b/core/ledger/kvledger/tests/pvtdata_test.go new file mode 100644 index 00000000000..3e4da12b8bc --- /dev/null +++ b/core/ledger/kvledger/tests/pvtdata_test.go @@ -0,0 +1,154 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package tests + +import ( + "testing" +) + +func TestMissingCollConfig(t *testing.T) { + env := newEnv(defaultConfig, t) + defer env.cleanup() + h := newTestHelperCreateLgr("ledger1", t) + + collConf := []*collConf{{name: "coll1", btl: 5}} + + // deploy cc1 with no coll config + h.simulateDeployTx("cc1", nil) + h.cutBlockAndCommitWithPvtdata() + + // pvt data operations should give error as no collection config defined + h.simulateDataTx("", func(s *simulator) { + h.assertError(s.GetPrivateData("cc1", "coll1", "key")) + h.assertError(s.SetPrivateData("cc1", "coll1", "key", []byte("value"))) + h.assertError(s.DeletePrivateData("cc1", "coll1", "key")) + }) + + // upgrade cc1 (add collConf) + h.simulateUpgradeTx("cc1", collConf) + h.cutBlockAndCommitWithPvtdata() + + // operations on coll1 should not give error + // operations on coll2 should give error (because, only coll1 is defined in collConf) + h.simulateDataTx("", func(s *simulator) { + h.assertNoError(s.GetPrivateData("cc1", "coll1", "key1")) + h.assertNoError(s.SetPrivateData("cc1", "coll1", "key2", []byte("value"))) + h.assertNoError(s.DeletePrivateData("cc1", "coll1", "key3")) + h.assertError(s.GetPrivateData("cc1", "coll2", "key")) + h.assertError(s.SetPrivateData("cc1", "coll2", "key", []byte("value"))) + h.assertError(s.DeletePrivateData("cc1", "coll2", "key")) + }) +} + +func TestTxWithMissingPvtdata(t *testing.T) { + env := newEnv(defaultConfig, t) + defer env.cleanup() + h := newTestHelperCreateLgr("ledger1", t) + + collConf := []*collConf{{name: "coll1", btl: 5}} + + // deploy cc1 with 'collConf' + h.simulateDeployTx("cc1", collConf) + h.cutBlockAndCommitWithPvtdata() + + // pvtdata simulation + h.simulateDataTx("", func(s *simulator) { + s.setPvtdata("cc1", "coll1", "key1", "value1") + }) + // another pvtdata simulation + h.simulateDataTx("", func(s *simulator) { + s.setPvtdata("cc1", "coll1", "key2", "value2") + }) + h.simulatedTrans[0].Pvtws = nil // drop pvt writeset from first simulation + blk2 := h.cutBlockAndCommitWithPvtdata() + + h.verifyPvtState("cc1", "coll1", "key2", "value2") // key2 should have been committed + h.simulateDataTx("", func(s *simulator) { + h.assertError(s.GetPrivateData("cc1", "coll1", "key1")) // key1 would be stale with respect to hashed version + }) + + // another data tx overwritting key1 + h.simulateDataTx("", func(s *simulator) { + s.setPvtdata("cc1", "coll1", "key1", "newvalue1") + }) + blk3 := h.cutBlockAndCommitWithPvtdata() + h.verifyPvtState("cc1", "coll1", "key1", "newvalue1") // key1 should have been committed with new value + h.verifyBlockAndPvtDataSameAs(2, blk2) + h.verifyBlockAndPvtDataSameAs(3, blk3) +} + +func TestTxWithWrongPvtdata(t *testing.T) { + env := newEnv(defaultConfig, t) + defer env.cleanup() + h := newTestHelperCreateLgr("ledger1", t) + + collConf := []*collConf{{name: "coll1", btl: 5}} + + // deploy cc1 with 'collConf' + h.simulateDeployTx("cc1", collConf) + h.cutBlockAndCommitWithPvtdata() + + // pvtdata simulation + h.simulateDataTx("", func(s *simulator) { + s.setPvtdata("cc1", "coll1", "key1", "value1") + }) + // another pvtdata simulation + h.simulateDataTx("", func(s *simulator) { + s.setPvtdata("cc1", "coll1", "key2", "value2") + }) + h.simulatedTrans[0].Pvtws = h.simulatedTrans[1].Pvtws // put wrong pvt writeset in first simulation + // the commit of block is rejected if the hash of collection present in the block does not match with the pvtdata + h.cutBlockAndCommitExpectError() + h.verifyPvtState("cc1", "coll1", "key2", "") +} + +func TestBTL(t *testing.T) { + env := newEnv(defaultConfig, t) + defer env.cleanup() + h := newTestHelperCreateLgr("ledger1", t) + collConf := []*collConf{{name: "coll1", btl: 0}, {name: "coll2", btl: 5}} + + // deploy cc1 with 'collConf' + h.simulateDeployTx("cc1", collConf) + h.cutBlockAndCommitWithPvtdata() + + // commit pvtdata writes in block 2. + h.simulateDataTx("", func(s *simulator) { + s.setPvtdata("cc1", "coll1", "key1", "value1") // (key1 would never expire) + s.setPvtdata("cc1", "coll2", "key2", "value2") // (key2 would expire at block 8) + }) + blk2 := h.cutBlockAndCommitWithPvtdata() + + // commit 5 more blocks with some random key/vals + for i := 0; i < 5; i++ { + h.simulateDataTx("", func(s *simulator) { + s.setPvtdata("cc1", "coll1", "someOtherKey", "someOtherVal") + s.setPvtdata("cc1", "coll2", "someOtherKey", "someOtherVal") + }) + h.cutBlockAndCommitWithPvtdata() + } + + // After commit of block 7 + h.verifyPvtState("cc1", "coll1", "key1", "value1") // key1 should still exist in the state + h.verifyPvtState("cc1", "coll2", "key2", "value2") // key2 should still exist in the state + h.verifyBlockAndPvtDataSameAs(2, blk2) // key1 and key2 should still exist in the pvtdata storage + + // commit block 8 with some random key/vals + h.simulateDataTx("", func(s *simulator) { + s.setPvtdata("cc1", "coll1", "someOtherKey", "someOtherVal") + s.setPvtdata("cc1", "coll2", "someOtherKey", "someOtherVal") + }) + h.cutBlockAndCommitWithPvtdata() + + // After commit of block 8 + h.verifyPvtState("cc1", "coll1", "key1", "value1") // key1 should still exist in the state + h.verifyPvtState("cc1", "coll2", "key2", "") // key2 should have been purged from the state + h.verifyBlockAndPvtData(2, nil, func(r *retrievedBlockAndPvtdata) { // retrieve the pvtdata for block 2 from pvtdata storage + r.pvtdataShouldContain(0, "cc1", "coll1", "key1", "value1") // key1 should still exist in the pvtdata storage + r.pvtdataShouldNotContain("cc1", "coll2") // shold have been purged from the pvtdata storage + }) +} diff --git a/core/ledger/kvledger/tests/sample_data_helper.go b/core/ledger/kvledger/tests/sample_data_helper.go new file mode 100644 index 00000000000..ab7fe166c30 --- /dev/null +++ b/core/ledger/kvledger/tests/sample_data_helper.go @@ -0,0 +1,265 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package tests + +import ( + "bytes" + "encoding/gob" + "fmt" + "testing" + + "github.com/hyperledger/fabric/core/ledger" + protopeer "github.com/hyperledger/fabric/protos/peer" + "github.com/stretchr/testify/assert" +) + +type submittedData map[string]*submittedLedgerData + +type submittedLedgerData struct { + Blocks []*ledger.BlockAndPvtData + Txs []*txAndPvtdata +} + +func (s submittedData) initForLedger(lgrid string) { + ld := s[lgrid] + if ld == nil { + ld = &submittedLedgerData{} + s[lgrid] = ld + } +} + +func (s submittedData) recordSubmittedBlks(lgrid string, blk ...*ledger.BlockAndPvtData) { + s.initForLedger(lgrid) + s[lgrid].Blocks = append(s[lgrid].Blocks, blk...) +} + +func (s submittedData) recordSubmittedTxs(lgrid string, tx ...*txAndPvtdata) { + s.initForLedger(lgrid) + s[lgrid].Txs = append(s[lgrid].Txs, tx...) +} + +type sampleDataHelper struct { + submittedData submittedData + assert *assert.Assertions + t *testing.T +} + +func newSampleDataHelper(t *testing.T) *sampleDataHelper { + return &sampleDataHelper{make(submittedData), assert.New(t), t} +} + +func (d *sampleDataHelper) populateLedger(h *testhelper) { + lgrid := h.lgrid + // blk1 deploys 2 chaincodes + txdeploy1 := h.simulateDeployTx("cc1", nil) + txdeploy2 := h.simulateDeployTx("cc2", nil) + blk1 := h.cutBlockAndCommitWithPvtdata() + + // blk2 contains 2 public data txs + txdata1 := h.simulateDataTx("txid1", func(s *simulator) { + s.setState("cc1", "key1", d.sampleVal("value01", lgrid)) + s.setState("cc1", "key2", d.sampleVal("value02", lgrid)) + }) + + txdata2 := h.simulateDataTx("txid2", func(s *simulator) { + s.setState("cc2", "key1", d.sampleVal("value03", lgrid)) + s.setState("cc2", "key2", d.sampleVal("value04", lgrid)) + }) + blk2 := h.cutBlockAndCommitWithPvtdata() + + // blk3 upgrades both chaincodes + txupgrade1 := h.simulateUpgradeTx("cc1", d.sampleCollConf1(lgrid, "cc1")) + txupgrade2 := h.simulateUpgradeTx("cc2", d.sampleCollConf1(lgrid, "cc2")) + blk3 := h.cutBlockAndCommitWithPvtdata() + + // blk4 contains 2 data txs with private data + txdata3 := h.simulateDataTx("txid3", func(s *simulator) { + s.setPvtdata("cc1", "coll1", "key3", d.sampleVal("value05", lgrid)) + s.setPvtdata("cc1", "coll1", "key4", d.sampleVal("value06", lgrid)) + }) + txdata4 := h.simulateDataTx("txid4", func(s *simulator) { + s.setPvtdata("cc2", "coll1", "key3", d.sampleVal("value07", lgrid)) + s.setPvtdata("cc2", "coll1", "key4", d.sampleVal("value08", lgrid)) + }) + blk4 := h.cutBlockAndCommitWithPvtdata() + + // blk5 upgrades both chaincodes + txupgrade3 := h.simulateUpgradeTx("cc1", d.sampleCollConf2(lgrid, "cc1")) + txupgrade4 := h.simulateDeployTx("cc2", d.sampleCollConf2(lgrid, "cc2")) + blk5 := h.cutBlockAndCommitWithPvtdata() + + // blk6 contains 2 data txs with private data + txdata5 := h.simulateDataTx("txid5", func(s *simulator) { + s.setPvtdata("cc1", "coll2", "key3", d.sampleVal("value09", lgrid)) + s.setPvtdata("cc1", "coll2", "key4", d.sampleVal("value10", lgrid)) + }) + txdata6 := h.simulateDataTx("txid6", func(s *simulator) { + s.setPvtdata("cc2", "coll2", "key3", d.sampleVal("value11", lgrid)) + s.setPvtdata("cc2", "coll2", "key4", d.sampleVal("value12", lgrid)) + }) + blk6 := h.cutBlockAndCommitWithPvtdata() + + // blk7 contains one data txs + txdata7 := h.simulateDataTx("txid7", func(s *simulator) { + s.setState("cc1", "key1", d.sampleVal("value13", lgrid)) + s.DeleteState("cc1", "key2") + s.setPvtdata("cc1", "coll1", "key3", d.sampleVal("value14", lgrid)) + s.DeletePrivateData("cc1", "coll1", "key4") + }) + h.simulatedTrans = nil + + // blk8 contains one data txs that should be marked as invalid because of mvcc conflict with tx in blk7 + txdata8 := h.simulateDataTx("txid8", func(s *simulator) { + s.getState("cc1", "key1") + s.setState("cc1", "key1", d.sampleVal("value15", lgrid)) + }) + blk7 := h.committer.cutBlockAndCommitWithPvtdata(txdata7) + blk8 := h.cutBlockAndCommitWithPvtdata() + + d.submittedData.recordSubmittedBlks(lgrid, + blk1, blk2, blk3, blk4, blk5, blk6, blk7, blk8) + d.submittedData.recordSubmittedTxs(lgrid, + txdeploy1, txdeploy2, txdata1, txdata2, txupgrade1, txupgrade2, + txdata3, txdata4, txupgrade3, txupgrade4, txdata5, txdata6, txdata7, txdata8) +} + +func (d *sampleDataHelper) serilizeSubmittedData() []byte { + gob.Register(submittedData{}) + b := bytes.Buffer{} + encoder := gob.NewEncoder(&b) + d.assert.NoError(encoder.Encode(d.submittedData)) + by := b.Bytes() + d.t.Logf("Serialized submitted data to bytes of len [%d]", len(by)) + return by +} + +func (d *sampleDataHelper) loadSubmittedData(b []byte) { + gob.Register(submittedData{}) + sd := make(submittedData) + buf := bytes.NewBuffer(b) + decoder := gob.NewDecoder(buf) + d.assert.NoError(decoder.Decode(&sd)) + d.t.Logf("Deserialized submitted data from bytes of len [%d], submitted data = %#v", len(b), sd) + d.submittedData = sd +} + +func (d *sampleDataHelper) verifyLedgerContent(h *testhelper) { + d.verifyState(h) + d.verifyConfigHistory(h) + d.verifyBlockAndPvtdata(h) + d.verifyGetTransactionByID(h) + + // the submitted data could not be available if the test ledger is loaded from disk in a fresh run + // (e.g., a backup of a test lesger from a previous fabric version) + if len(d.submittedData) != 0 { + d.t.Log("Verifying using submitted data") + d.verifyBlockAndPvtdataUsingSubmittedData(h) + d.verifyGetTransactionByIDUsingSubmittedData(h) + } else { + d.t.Log("Skipping verifying using submitted data") + } +} + +func (d *sampleDataHelper) verifyState(h *testhelper) { + lgrid := h.lgrid + h.verifyPubState("cc1", "key1", d.sampleVal("value13", lgrid)) + h.verifyPubState("cc1", "key2", "") + h.verifyPvtState("cc1", "coll1", "key3", d.sampleVal("value14", lgrid)) + h.verifyPvtState("cc1", "coll1", "key4", "") + h.verifyPvtState("cc1", "coll2", "key3", d.sampleVal("value09", lgrid)) + h.verifyPvtState("cc1", "coll2", "key4", d.sampleVal("value10", lgrid)) + + h.verifyPubState("cc2", "key1", d.sampleVal("value03", lgrid)) + h.verifyPubState("cc2", "key2", d.sampleVal("value04", lgrid)) + h.verifyPvtState("cc2", "coll1", "key3", d.sampleVal("value07", lgrid)) + h.verifyPvtState("cc2", "coll1", "key4", d.sampleVal("value08", lgrid)) + h.verifyPvtState("cc2", "coll2", "key3", d.sampleVal("value11", lgrid)) + h.verifyPvtState("cc2", "coll2", "key4", d.sampleVal("value12", lgrid)) +} + +func (d *sampleDataHelper) verifyConfigHistory(h *testhelper) { + lgrid := h.lgrid + h.verifyMostRecentCollectionConfigBelow(10, "cc1", + &expectedCollConfInfo{5, d.sampleCollConf2(lgrid, "cc1")}) + + h.verifyMostRecentCollectionConfigBelow(5, "cc1", + &expectedCollConfInfo{3, d.sampleCollConf1(lgrid, "cc1")}) + + h.verifyMostRecentCollectionConfigBelow(10, "cc2", + &expectedCollConfInfo{5, d.sampleCollConf2(lgrid, "cc2")}) + + h.verifyMostRecentCollectionConfigBelow(5, "cc2", + &expectedCollConfInfo{3, d.sampleCollConf1(lgrid, "cc2")}) +} + +func (d *sampleDataHelper) verifyBlockAndPvtdata(h *testhelper) { + lgrid := h.lgrid + h.verifyBlockAndPvtData(2, nil, func(r *retrievedBlockAndPvtdata) { + r.hasNumTx(2) + r.hasNoPvtdata() + }) + + h.verifyBlockAndPvtData(4, nil, func(r *retrievedBlockAndPvtdata) { + r.hasNumTx(2) + r.pvtdataShouldContain(0, "cc1", "coll1", "key3", d.sampleVal("value05", lgrid)) + r.pvtdataShouldContain(1, "cc2", "coll1", "key3", d.sampleVal("value07", lgrid)) + }) +} + +func (d *sampleDataHelper) verifyGetTransactionByID(h *testhelper) { + h.verifyTxValidationCode("txid7", protopeer.TxValidationCode_VALID) + h.verifyTxValidationCode("txid8", protopeer.TxValidationCode_MVCC_READ_CONFLICT) +} + +func (d *sampleDataHelper) verifyBlockAndPvtdataUsingSubmittedData(h *testhelper) { + lgrid := h.lgrid + submittedData := d.submittedData[lgrid] + for _, submittedBlk := range submittedData.Blocks { + blkNum := submittedBlk.Block.Header.Number + if blkNum != 8 { + h.verifyBlockAndPvtDataSameAs(uint64(blkNum), submittedBlk) + } else { + h.verifyBlockAndPvtData(uint64(8), nil, func(r *retrievedBlockAndPvtdata) { + r.sameBlockHeaderAndData(submittedBlk.Block) + r.containsValidationCode(0, protopeer.TxValidationCode_MVCC_READ_CONFLICT) + }) + } + } +} + +func (d *sampleDataHelper) verifyGetTransactionByIDUsingSubmittedData(h *testhelper) { + lgrid := h.lgrid + for _, submittedTx := range d.submittedData[lgrid].Txs { + expectedValidationCode := protopeer.TxValidationCode_VALID + if submittedTx.Txid == "txid8" { + expectedValidationCode = protopeer.TxValidationCode_MVCC_READ_CONFLICT + } + h.verifyGetTransactionByID(submittedTx.Txid, + &protopeer.ProcessedTransaction{TransactionEnvelope: submittedTx.Envelope, ValidationCode: int32(expectedValidationCode)}) + } +} + +func (d *sampleDataHelper) sampleVal(val, ledgerid string) string { + return fmt.Sprintf("%s:%s", val, ledgerid) +} + +func (d *sampleDataHelper) sampleCollConf1(ledgerid, ccName string) []*collConf { + return []*collConf{ + {name: "coll1"}, + {name: ledgerid}, + {name: ccName}, + } +} + +func (d *sampleDataHelper) sampleCollConf2(ledgerid string, ccName string) []*collConf { + return []*collConf{ + {name: "coll1"}, + {name: "coll2"}, + {name: ledgerid}, + {name: ccName}, + } +} diff --git a/core/ledger/kvledger/tests/test_helper.go b/core/ledger/kvledger/tests/test_helper.go new file mode 100644 index 00000000000..e2dcfdd651a --- /dev/null +++ b/core/ledger/kvledger/tests/test_helper.go @@ -0,0 +1,73 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package tests + +import ( + "testing" + + "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/ledger/ledgermgmt" + "github.com/stretchr/testify/assert" +) + +// testhelper embeds (1) a client, (2) a committer and (3) a verifier, all three operate on +// a ledger instance and add helping/resuable functionality on top of ledger apis that helps +// in avoiding the repeation in the actual tests code. +// the 'client' adds value to the simulation relation apis, the 'committer' helps in cutting the +// next block and committing the block, and finally, the verifier helps in veryfying that the +// ledger apis returns correct values based on the blocks submitted +type testhelper struct { + *client + *committer + *verifier + lgr ledger.PeerLedger + lgrid string + assert *assert.Assertions +} + +// newTestHelperCreateLgr creates a new ledger and retruns a 'testhelper' for the ledger +func newTestHelperCreateLgr(id string, t *testing.T) *testhelper { + genesisBlk, err := constructTestGenesisBlock(id) + assert.NoError(t, err) + lgr, err := ledgermgmt.CreateLedger(genesisBlk) + assert.NoError(t, err) + client, committer, verifier := newClient(lgr, t), newCommitter(lgr, t), newVerifier(lgr, t) + return &testhelper{client, committer, verifier, lgr, id, assert.New(t)} +} + +// newTestHelperOpenLgr opens an existing ledger and retruns a 'testhelper' for the ledger +func newTestHelperOpenLgr(id string, t *testing.T) *testhelper { + lgr, err := ledgermgmt.OpenLedger(id) + assert.NoError(t, err) + client, committer, verifier := newClient(lgr, t), newCommitter(lgr, t), newVerifier(lgr, t) + return &testhelper{client, committer, verifier, lgr, id, assert.New(t)} +} + +// cutBlockAndCommitWithPvtdata gathers all the transactions simulated by the test code (by calling +// the functions available in the 'client') and cuts the next block and commits to the ledger +func (h *testhelper) cutBlockAndCommitWithPvtdata() *ledger.BlockAndPvtData { + defer func() { h.simulatedTrans = nil }() + return h.committer.cutBlockAndCommitWithPvtdata(h.simulatedTrans...) +} + +func (h *testhelper) cutBlockAndCommitExpectError() (*ledger.BlockAndPvtData, error) { + defer func() { h.simulatedTrans = nil }() + return h.committer.cutBlockAndCommitExpectError(h.simulatedTrans...) +} + +// assertError is a helper function that can be called as assertError(f()) where 'f' is some other function +// this function assumes that the last return type of function 'f' is of type 'error' +func (h *testhelper) assertError(output ...interface{}) { + lastParam := output[len(output)-1] + assert.NotNil(h.t, lastParam) + h.assert.Error(lastParam.(error)) +} + +// assertNoError see comment on function 'assertError' +func (h *testhelper) assertNoError(output ...interface{}) { + h.assert.Nil(output[len(output)-1]) +} diff --git a/core/ledger/kvledger/tests/util.go b/core/ledger/kvledger/tests/util.go new file mode 100644 index 00000000000..2baca43cf8b --- /dev/null +++ b/core/ledger/kvledger/tests/util.go @@ -0,0 +1,81 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package tests + +import ( + "github.com/golang/protobuf/proto" + configtxtest "github.com/hyperledger/fabric/common/configtx/test" + "github.com/hyperledger/fabric/common/flogging" + lutils "github.com/hyperledger/fabric/core/ledger/util" + "github.com/hyperledger/fabric/protos/common" + "github.com/hyperledger/fabric/protos/ledger/rwset" + protopeer "github.com/hyperledger/fabric/protos/peer" + prototestutils "github.com/hyperledger/fabric/protos/testutils" + "github.com/hyperledger/fabric/protos/utils" +) + +var logger = flogging.MustGetLogger("test2") + +// collConf helps writing tests with less verbose code by specifying coll configuration +// in a simple struct in place of 'common.CollectionConfigPackage'. (the test heplers' apis +// use 'collConf' as parameters and return values and transform back and forth to/from proto +// message internally (using func 'convertToCollConfigProtoBytes' and 'convertFromCollConfigProto') +type collConf struct { + name string + btl uint64 +} + +type txAndPvtdata struct { + Txid string + Envelope *common.Envelope + Pvtws *rwset.TxPvtReadWriteSet +} + +func convertToCollConfigProtoBytes(collConfs []*collConf) ([]byte, error) { + var protoConfArray []*common.CollectionConfig + for _, c := range collConfs { + protoConf := &common.CollectionConfig{Payload: &common.CollectionConfig_StaticCollectionConfig{ + StaticCollectionConfig: &common.StaticCollectionConfig{Name: c.name, BlockToLive: c.btl}}} + protoConfArray = append(protoConfArray, protoConf) + } + return proto.Marshal(&common.CollectionConfigPackage{Config: protoConfArray}) +} + +func convertFromCollConfigProto(collConfPkg *common.CollectionConfigPackage) []*collConf { + var collConfs []*collConf + protoConfArray := collConfPkg.Config + for _, protoConf := range protoConfArray { + name, btl := protoConf.GetStaticCollectionConfig().Name, protoConf.GetStaticCollectionConfig().BlockToLive + collConfs = append(collConfs, &collConf{name, btl}) + } + return collConfs +} + +func constructTransaction(txid string, simulationResults []byte) (*common.Envelope, error) { + channelid := "dummyChannel" + ccid := &protopeer.ChaincodeID{ + Name: "dummyCC", + Version: "dummyVer", + } + txenv, _, err := prototestutils.ConstructUnsignedTxEnv(channelid, ccid, &protopeer.Response{Status: 200}, simulationResults, txid, nil, nil) + return txenv, err +} + +func constructTestGenesisBlock(channelid string) (*common.Block, error) { + blk, err := configtxtest.MakeGenesisBlock(channelid) + if err != nil { + return nil, err + } + setBlockFlagsToValid(blk) + return blk, nil +} + +func setBlockFlagsToValid(block *common.Block) { + utils.InitBlockMetadata(block) + block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = + lutils.NewTxValidationFlagsSetValue(len(block.Data.Data), protopeer.TxValidationCode_VALID) +} diff --git a/core/ledger/kvledger/tests/verifier.go b/core/ledger/kvledger/tests/verifier.go new file mode 100644 index 00000000000..fe7f732a2df --- /dev/null +++ b/core/ledger/kvledger/tests/verifier.go @@ -0,0 +1,196 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package tests + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/golang/protobuf/proto" + "github.com/hyperledger/fabric/core/ledger" + lgrutil "github.com/hyperledger/fabric/core/ledger/util" + "github.com/hyperledger/fabric/protos/common" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" + protopeer "github.com/hyperledger/fabric/protos/peer" + "github.com/stretchr/testify/assert" +) + +// verifier provides functions that help tests with less verbose code for querying the ledger +// and verifying the actual results with the expected results +// For the straight forward functions, tests can call them directly on the ledger +type verifier struct { + lgr ledger.PeerLedger + assert *assert.Assertions + t *testing.T +} + +func newVerifier(lgr ledger.PeerLedger, t *testing.T) *verifier { + return &verifier{lgr, assert.New(t), t} +} + +func (v *verifier) verifyLedgerHeight(expectedHt uint64) { + info, err := v.lgr.GetBlockchainInfo() + v.assert.NoError(err) + v.assert.Equal(expectedHt, info.Height) +} + +func (v *verifier) verifyPubState(ns, key string, expectedVal string) { + qe, err := v.lgr.NewQueryExecutor() + v.assert.NoError(err) + defer qe.Done() + committedVal, err := qe.GetState(ns, key) + v.assert.NoError(err) + v.t.Logf("val=%s", committedVal) + var expectedValBytes []byte + if expectedVal != "" { + expectedValBytes = []byte(expectedVal) + } + v.assert.Equal(expectedValBytes, committedVal) +} + +func (v *verifier) verifyPvtState(ns, coll, key string, expectedVal string) { + qe, err := v.lgr.NewQueryExecutor() + v.assert.NoError(err) + defer qe.Done() + committedVal, err := qe.GetPrivateData(ns, coll, key) + v.assert.NoError(err) + v.t.Logf("val=%s", committedVal) + var expectedValBytes []byte + if expectedVal != "" { + expectedValBytes = []byte(expectedVal) + } + v.assert.Equal(expectedValBytes, committedVal) +} + +func (v *verifier) verifyMostRecentCollectionConfigBelow(blockNum uint64, chaincodeName string, expectOut *expectedCollConfInfo) { + configHistory, err := v.lgr.GetConfigHistoryRetriever() + v.assert.NoError(err) + actualCollectionConfigInfo, err := configHistory.MostRecentCollectionConfigBelow(blockNum, chaincodeName) + v.assert.NoError(err) + if expectOut == nil { + v.assert.Nil(actualCollectionConfigInfo) + return + } + v.t.Logf("Retrieved CollectionConfigInfo=%s", spew.Sdump(actualCollectionConfigInfo)) + actualCommittingBlockNum := actualCollectionConfigInfo.CommittingBlockNum + actualCollConf := convertFromCollConfigProto(actualCollectionConfigInfo.CollectionConfig) + v.assert.Equal(expectOut.committingBlockNum, actualCommittingBlockNum) + v.assert.Equal(expectOut.collConfs, actualCollConf) +} + +func (v *verifier) verifyBlockAndPvtData(blockNum uint64, filter ledger.PvtNsCollFilter, verifyLogic func(r *retrievedBlockAndPvtdata)) { + out, err := v.lgr.GetPvtDataAndBlockByNum(blockNum, filter) + v.assert.NoError(err) + v.t.Logf("Retrieved Block = %s, pvtdata = %s", spew.Sdump(out.Block), spew.Sdump(out.BlockPvtData)) + verifyLogic(&retrievedBlockAndPvtdata{out, v.assert}) +} + +func (v *verifier) verifyBlockAndPvtDataSameAs(blockNum uint64, expectedOut *ledger.BlockAndPvtData) { + v.verifyBlockAndPvtData(blockNum, nil, func(r *retrievedBlockAndPvtdata) { + r.sameAs(expectedOut) + }) +} + +func (v *verifier) verifyGetTransactionByID(txid string, expectedOut *protopeer.ProcessedTransaction) { + tran, err := v.lgr.GetTransactionByID(txid) + v.assert.NoError(err) + envelopEqual := proto.Equal(expectedOut.TransactionEnvelope, tran.TransactionEnvelope) + v.assert.True(envelopEqual) + v.assert.Equal(expectedOut.ValidationCode, tran.ValidationCode) +} + +func (v *verifier) verifyTxValidationCode(txid string, expectedCode protopeer.TxValidationCode) { + tran, err := v.lgr.GetTransactionByID(txid) + v.assert.NoError(err) + v.assert.Equal(int32(expectedCode), tran.ValidationCode) +} + +//////////// structs used by verifier ////////////////////////////////////////////////////////////// +type expectedCollConfInfo struct { + committingBlockNum uint64 + collConfs []*collConf +} + +type retrievedBlockAndPvtdata struct { + *ledger.BlockAndPvtData + assert *assert.Assertions +} + +func (r *retrievedBlockAndPvtdata) sameAs(expectedBlockAndPvtdata *ledger.BlockAndPvtData) { + r.samePvtdata(expectedBlockAndPvtdata.BlockPvtData) + r.sameBlockHeaderAndData(expectedBlockAndPvtdata.Block) + r.sameMetadata(expectedBlockAndPvtdata.Block) +} + +func (r *retrievedBlockAndPvtdata) hasNumTx(numTx int) { + r.assert.Len(r.Block.Data.Data, numTx) +} + +func (r *retrievedBlockAndPvtdata) hasNoPvtdata() { + r.assert.Len(r.BlockPvtData, 0) +} + +func (r *retrievedBlockAndPvtdata) pvtdataShouldContain(txSeq int, ns, coll, key, value string) { + txPvtData := r.BlockAndPvtData.BlockPvtData[uint64(txSeq)] + for _, nsdata := range txPvtData.WriteSet.NsPvtRwset { + if nsdata.Namespace == ns { + for _, colldata := range nsdata.CollectionPvtRwset { + if colldata.CollectionName == coll { + rwset := &kvrwset.KVRWSet{} + r.assert.NoError(proto.Unmarshal(colldata.Rwset, rwset)) + for _, w := range rwset.Writes { + if w.Key == key { + r.assert.Equal([]byte(value), w.Value) + return + } + } + } + } + } + } + r.assert.FailNow("Requested kv not found") +} + +func (r *retrievedBlockAndPvtdata) pvtdataShouldNotContain(ns, coll string) { + allTxPvtData := r.BlockAndPvtData.BlockPvtData + for _, txPvtData := range allTxPvtData { + r.assert.False(txPvtData.Has(ns, coll)) + } +} + +func (r *retrievedBlockAndPvtdata) sameBlockHeaderAndData(expectedBlock *common.Block) { + r.assert.True(proto.Equal(expectedBlock.Data, r.BlockAndPvtData.Block.Data)) + r.assert.True(proto.Equal(expectedBlock.Header, r.BlockAndPvtData.Block.Header)) +} + +func (r *retrievedBlockAndPvtdata) sameMetadata(expectedBlock *common.Block) { + // marshalling/unmarshalling treats a nil byte and empty byte interchangeably (based on which scheme is chosen proto vs gob) + // so explicitly comparing each metadata + retrievedMetadata := r.Block.Metadata.Metadata + expectedMetadata := expectedBlock.Metadata.Metadata + r.assert.Equal(len(expectedMetadata), len(retrievedMetadata)) + for i := 0; i < len(retrievedMetadata); i++ { + if len(expectedMetadata[i])+len(retrievedMetadata[i]) != 0 { + r.assert.Equal(expectedMetadata[i], retrievedMetadata[i]) + } + } +} + +func (r *retrievedBlockAndPvtdata) containsValidationCode(txSeq int, validationCode protopeer.TxValidationCode) { + var txFilter lgrutil.TxValidationFlags + txFilter = r.BlockAndPvtData.Block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] + r.assert.Equal(validationCode, txFilter.Flag(txSeq)) +} + +func (r *retrievedBlockAndPvtdata) samePvtdata(expectedPvtdata map[uint64]*ledger.TxPvtData) { + r.assert.Equal(len(expectedPvtdata), len(r.BlockAndPvtData.BlockPvtData)) + for txNum, pvtData := range expectedPvtdata { + actualPvtData := r.BlockAndPvtData.BlockPvtData[txNum] + r.assert.Equal(pvtData.SeqInBlock, actualPvtData.SeqInBlock) + r.assert.True(proto.Equal(pvtData.WriteSet, actualPvtData.WriteSet)) + } +} diff --git a/core/ledger/ledgermgmt/ledger_mgmt_test_exports.go b/core/ledger/ledgermgmt/ledger_mgmt_test_exports.go index e2c1ca37455..ec5d8d464e5 100644 --- a/core/ledger/ledgermgmt/ledger_mgmt_test_exports.go +++ b/core/ledger/ledgermgmt/ledger_mgmt_test_exports.go @@ -47,6 +47,16 @@ func InitializeTestEnvWithCustomProcessors(customTxProcessors customtx.Processor }) } +// InitializeExistingTestEnvWithCustomProcessors initializes ledgermgmt for tests with existing ledgers +// This function does not remove the existing ledgers and is used in upgrade tests +// TODO ledgermgmt should be reworked to move the package scoped functions to a struct +func InitializeExistingTestEnvWithCustomProcessors(customTxProcessors customtx.Processors) { + customtx.InitializeTestEnv(customTxProcessors) + initialize(&Initializer{ + CustomTxProcessors: customTxProcessors, + }) +} + // CleanupTestEnv closes the ledgermagmt and removes the store directory func CleanupTestEnv() { Close()