diff --git a/test/tools/LTE/README.md b/test/tools/LTE/README.md index a1c34a79710..c097e5de314 100644 --- a/test/tools/LTE/README.md +++ b/test/tools/LTE/README.md @@ -111,13 +111,19 @@ By default, the tests use golveldb as the state database. Fabric provides the option of using CouchDB as a pluggable state database. To run the existing tests with CouchDB, use the parameter file `parameters_couchdb_daily_CI.sh`: ``` -./runbenchmark.sh -f parameters_couchdb_daily_CI.sh all +./runbenchmarks.sh -f parameters_couchdb_daily_CI.sh all ``` Note that this parameter file (`parameters_couchdb_daily_CI.sh`) contains the following line, which is required to run the tests with CouchDB: ``` export useCouchDB="yes" ``` +CouchDB can store values in JSON or binary formats. The following option in +`parameters_couchdb_daily_CI.sh` is used to switch between JSON and binary +values: +``` +UseJSONFormat="true" +``` ## How to View the Test Results diff --git a/test/tools/LTE/experiments/conf.go b/test/tools/LTE/experiments/conf.go index 466469cddf1..7a68dfae19f 100644 --- a/test/tools/LTE/experiments/conf.go +++ b/test/tools/LTE/experiments/conf.go @@ -15,19 +15,24 @@ import ( // txConf captures the transaction related configurations // numTotalTxs specifies the total transactions that should be executed and committed across chains // numParallelTxsPerChain specifies the parallel transactions on each of the chains -// numKeysInEachTx specifies the number of keys that each of transactions should operate +// numWritesPerTx specifies the number of keys to write in each transaction +// numReadsPerTx specifies the number of keys to read in each transaction, Note: this parameters +// match the numWritesPerTx for normal benchmarks. This can be set to zero to make batch update measurements. type txConf struct { numTotalTxs int numParallelTxsPerChain int - numKeysInEachTx int + numWritesPerTx int + numReadsPerTx int } // dataConf captures the data related configurations // numKVs specifies number of total key-values across chains // kvSize specifies the size of a key-value (in bytes) +// useJSON specifies if the value stored is in JSON format type dataConf struct { - numKVs int - kvSize int + numKVs int + kvSize int + useJSON bool } // configuration captures all the configurations for an experiment @@ -44,8 +49,8 @@ func defaultConf() *configuration { conf := &configuration{} conf.chainMgrConf = &chainmgmt.ChainMgrConf{DataDir: "/tmp/fabric/ledgerPerfTests", NumChains: 1} conf.batchConf = &chainmgmt.BatchConf{BatchSize: 10, SignBlock: false} - conf.txConf = &txConf{numTotalTxs: 100000, numParallelTxsPerChain: 100, numKeysInEachTx: 4} - conf.dataConf = &dataConf{numKVs: 100000, kvSize: 200} + conf.txConf = &txConf{numTotalTxs: 100000, numParallelTxsPerChain: 100, numWritesPerTx: 4, numReadsPerTx: 4} + conf.dataConf = &dataConf{numKVs: 100000, kvSize: 200, useJSON: false} return conf } @@ -76,8 +81,11 @@ func confFromTestParams(testParams []string) *configuration { numTotalTxs := flags.Int("NumTotalTx", conf.txConf.numTotalTxs, "Number of total transactions") - numKeysInEachTx := flags.Int("NumKeysInEachTx", - conf.txConf.numKeysInEachTx, "number of keys operated upon in each Tx") + numWritesPerTx := flags.Int("NumWritesPerTx", + conf.txConf.numWritesPerTx, "number of keys written in each Tx") + + numReadsPerTx := flags.Int("NumReadsPerTx", + conf.txConf.numReadsPerTx, "number of keys to read in each Tx") // batchConf batchSize := flags.Int("BatchSize", @@ -90,15 +98,19 @@ func confFromTestParams(testParams []string) *configuration { kvSize := flags.Int("KVSize", conf.dataConf.kvSize, "size of the key-value in bytes") + useJSON := flags.Bool("UseJSONFormat", conf.dataConf.useJSON, "should CouchDB use JSON for values") + flags.Parse(testParams) conf.chainMgrConf.DataDir = *dataDir conf.chainMgrConf.NumChains = *numChains conf.txConf.numParallelTxsPerChain = *numParallelTxsPerChain conf.txConf.numTotalTxs = *numTotalTxs - conf.txConf.numKeysInEachTx = *numKeysInEachTx + conf.txConf.numWritesPerTx = *numWritesPerTx + conf.txConf.numReadsPerTx = *numReadsPerTx conf.batchConf.BatchSize = *batchSize conf.dataConf.numKVs = *numKVs conf.dataConf.kvSize = *kvSize + conf.dataConf.useJSON = *useJSON return conf } diff --git a/test/tools/LTE/experiments/insert_txs_test.go b/test/tools/LTE/experiments/insert_txs_test.go index 7f401d02493..4a6978df285 100644 --- a/test/tools/LTE/experiments/insert_txs_test.go +++ b/test/tools/LTE/experiments/insert_txs_test.go @@ -65,16 +65,22 @@ func runInsertClientsForChain(chain *chainmgmt.Chain) { } func runInsertClient(chain *chainmgmt.Chain, startKey, endKey int, wg *sync.WaitGroup) { - numKeysPerTx := conf.txConf.numKeysInEachTx + numWritesPerTx := conf.txConf.numWritesPerTx kvSize := conf.dataConf.kvSize + useJSON := conf.dataConf.useJSON currentKey := startKey for currentKey <= endKey { simulator, err := chain.NewTxSimulator(util.GenerateUUID()) common.PanicOnError(err) - for i := 0; i < numKeysPerTx; i++ { - common.PanicOnError(simulator.SetState( - chaincodeName, constructKey(currentKey), constructValue(currentKey, kvSize))) + for i := 0; i < numWritesPerTx; i++ { + if useJSON { + common.PanicOnError(simulator.SetState( + chaincodeName, constructKey(currentKey), constructJSONValue(currentKey, kvSize))) + } else { + common.PanicOnError(simulator.SetState( + chaincodeName, constructKey(currentKey), constructValue(currentKey, kvSize))) + } currentKey++ if currentKey > endKey { break diff --git a/test/tools/LTE/experiments/readwrite_txs_test.go b/test/tools/LTE/experiments/readwrite_txs_test.go index 6450447e30c..39b1a6fe2bb 100644 --- a/test/tools/LTE/experiments/readwrite_txs_test.go +++ b/test/tools/LTE/experiments/readwrite_txs_test.go @@ -66,19 +66,38 @@ func runReadWriteClientsForChain(chain *chainmgmt.Chain) { } func runReadWriteClient(chain *chainmgmt.Chain, rand *rand.Rand, numTx int, wg *sync.WaitGroup) { - numKeysPerTx := conf.txConf.numKeysInEachTx + numWritesPerTx := conf.txConf.numWritesPerTx + numReadsPerTx := conf.txConf.numReadsPerTx maxKeyNumber := calculateShare(conf.dataConf.numKVs, conf.chainMgrConf.NumChains, int(chain.ID)) + kvSize := conf.dataConf.kvSize + useJSON := conf.dataConf.useJSON + var value []byte for i := 0; i < numTx; i++ { simulator, err := chain.NewTxSimulator(util.GenerateUUID()) common.PanicOnError(err) - for i := 0; i < numKeysPerTx; i++ { + for i := 0; i < numWritesPerTx; i++ { keyNumber := rand.Intn(maxKeyNumber) key := constructKey(keyNumber) - value, err := simulator.GetState(chaincodeName, key) - common.PanicOnError(err) - if !verifyValue(keyNumber, value) { - panic(fmt.Errorf("Value %s is not expected for key number %d", value, keyNumber)) + // check to see if the number of reads is exceeded + if i < numReadsPerTx-1 { + value, err = simulator.GetState(chaincodeName, key) + common.PanicOnError(err) + if useJSON { + if !verifyJSONValue(keyNumber, value) { + panic(fmt.Errorf("Value %s is not expected for key number %d", value, keyNumber)) + } + } else { + if !verifyValue(keyNumber, value) { + panic(fmt.Errorf("Value %s is not expected for key number %d", value, keyNumber)) + } + } + } else { + if useJSON { + value = []byte(constructJSONValue(keyNumber, kvSize)) + } else { + value = []byte(constructValue(keyNumber, kvSize)) + } } common.PanicOnError(simulator.SetState(chaincodeName, key, value)) } diff --git a/test/tools/LTE/experiments/util.go b/test/tools/LTE/experiments/util.go index f85ca32e9d5..6f6dc5e5653 100644 --- a/test/tools/LTE/experiments/util.go +++ b/test/tools/LTE/experiments/util.go @@ -18,14 +18,52 @@ package experiments import ( "bytes" + "encoding/json" "fmt" "math/rand" + "strconv" logging "github.com/op/go-logging" ) var logger = logging.MustGetLogger("experiments") +type marbleRecord struct { + ID string `json:"_id,omitempty"` + Rev string `json:"_rev,omitempty"` + Prefix string `json:"prefix,omitempty"` + AssetType string `json:"asset_type,omitempty"` + AssetName string `json:"asset_name,omitempty"` + Color string `json:"color,omitempty"` + Size int `json:"size,omitempty"` + Owner string `json:"owner,omitempty"` + DataPadding string `json:"datapadding,omitempty"` +} + +var colors = []string{ + "red", + "green", + "purple", + "yellow", + "white", + "black", +} + +var owners = []string{ + "fred", + "jerry", + "tom", + "alice", + "kim", + "angela", + "john", +} + +//TestValue is a struct for holding the test value +type TestValue struct { + Value string +} + func constructKey(keyNumber int) string { return fmt.Sprintf("%s%09d", "key_", keyNumber) } @@ -33,9 +71,37 @@ func constructKey(keyNumber int) string { func constructValue(keyNumber int, kvSize int) []byte { prefix := constructValuePrefix(keyNumber) randomBytes := constructRandomBytes(kvSize - len(prefix)) + return append(prefix, randomBytes...) } +func constructJSONValue(keyNumber int, kvSize int) []byte { + + prefix := constructValuePrefix(keyNumber) + + rand.Seed(int64(keyNumber)) + color := colors[rand.Intn(len(colors))] + size := rand.Intn(len(colors))*10 + 10 + owner := owners[rand.Intn(len(owners))] + assetName := "marble" + strconv.Itoa(keyNumber) + + testRecord := marbleRecord{Prefix: string(prefix), AssetType: "marble", AssetName: assetName, Color: color, Size: size, Owner: owner} + + jsonValue, _ := json.Marshal(testRecord) + + if kvSize > len(jsonValue) { + randomJSONBytes := constructRandomBytes(kvSize - len(jsonValue)) + + //add in extra bytes + testRecord.DataPadding = string(randomJSONBytes) + + jsonValue, _ = json.Marshal(testRecord) + } + + return jsonValue + +} + func constructValuePrefix(keyNumber int) []byte { return []byte(fmt.Sprintf("%s%09d", "value_", keyNumber)) } @@ -45,7 +111,27 @@ func verifyValue(keyNumber int, value []byte) bool { if len(value) < len(prefix) { return false } + return bytes.Equal(value[:len(prefix)], prefix) + +} + +func verifyJSONValue(keyNumber int, value []byte) bool { + prefix := constructValuePrefix(keyNumber) + if len(value) < len(prefix) { + return false + } + + var marble marbleRecord + + json.Unmarshal(value, &marble) + + if len(value) < len(prefix) { + return false + } + + valuePrefix := []byte(marble.Prefix) + return bytes.Equal(valuePrefix, prefix) } func disableLogging() { diff --git a/test/tools/LTE/scripts/benchmarks.sh b/test/tools/LTE/scripts/benchmarks.sh index 65bb44e7d64..51eb1c688ba 100755 --- a/test/tools/LTE/scripts/benchmarks.sh +++ b/test/tools/LTE/scripts/benchmarks.sh @@ -22,7 +22,7 @@ source ./common.sh PKG_NAME="github.com/hyperledger/fabric/test/tools/LTE/experiments" function setCommonTestParams { - TEST_PARAMS="-DataDir=$DataDir, -NumChains=$NumChains, -NumParallelTxPerChain=$NumParallelTxPerChain, -NumKeysInEachTx=$NumKeysInEachTx, -BatchSize=$BatchSize, -NumKVs=$NumKVs, -KVSize=$KVSize" + TEST_PARAMS="-DataDir=$DataDir, -NumChains=$NumChains, -NumParallelTxPerChain=$NumParallelTxPerChain, -NumWritesPerTx=$NumWritesPerTx, -NumReadsPerTx=$NumReadsPerTx, -BatchSize=$BatchSize, -NumKVs=$NumKVs, -KVSize=$KVSize, -UseJSONFormat=$UseJSONFormat" RESULTANT_DIRS="$DataDir/ledgersData/chains/chains $DataDir/ledgersData/chains/index $DataDir/ledgersData/stateLeveldb $DataDir/ledgersData/historyLeveldb" } @@ -36,7 +36,7 @@ function runReadWriteTxs { FUNCTION_NAME="BenchmarkReadWriteTxs" if [ "$CLEAR_OS_CACHE" == "true" ]; then clearOSCache - fi + fi setCommonTestParams TEST_PARAMS="$TEST_PARAMS, -NumTotalTx=$NumTotalTx" executeTest diff --git a/test/tools/LTE/scripts/parameters_couchdb_daily_CI.sh b/test/tools/LTE/scripts/parameters_couchdb_daily_CI.sh index 6c329aabe25..5436c9571a4 100644 --- a/test/tools/LTE/scripts/parameters_couchdb_daily_CI.sh +++ b/test/tools/LTE/scripts/parameters_couchdb_daily_CI.sh @@ -6,12 +6,14 @@ # export useCouchDB="yes" +UseJSONFormat="true" DataDir="/tmp/fabric/test/tools/LTE/data" NumChains=10 NumParallelTxPerChain=10 NumKVs=10000 NumTotalTx=10000 -NumKeysInEachTx=4 +NumWritesPerTx=4 +NumReadsPerTx=4 BatchSize=50 KVSize=200 @@ -25,12 +27,12 @@ KVSize=200 # NumParallelTxPerChain=10 # NumKVs=10000 # NumTotalTx=10000 -# NumKeysInEachTx=4 +# NumWritesPerTx=4 # BatchSize=50 # KVSize=200 ArrayNumParallelTxPerChain=(1 5 10 20 50 100) ArrayNumChains=(1 5 10 20 50) -ArrayNumKeysInEachTx=(1 2 5 10 20) +ArrayNumWritesPerTx=(1 2 5 10 20) ArrayKVSize=(100 200 500 1000 2000) ArrayBatchSize=(10 20 100 500) ArrayNumParallelTxWithSingleChain=(1 5 10 20 50 100) diff --git a/test/tools/LTE/scripts/parameters_daily_CI.sh b/test/tools/LTE/scripts/parameters_daily_CI.sh index 9192145fd6a..6b31c509ca2 100644 --- a/test/tools/LTE/scripts/parameters_daily_CI.sh +++ b/test/tools/LTE/scripts/parameters_daily_CI.sh @@ -9,7 +9,8 @@ NumChains=10 NumParallelTxPerChain=10 NumKVs=100000 NumTotalTx=100000 -NumKeysInEachTx=4 +NumWritesPerTx=4 +NumReadsPerTx=4 BatchSize=50 KVSize=200 @@ -28,7 +29,7 @@ KVSize=200 # KVSize=200 ArrayNumParallelTxPerChain=(1 5 10 20 50 100 500 2000) ArrayNumChains=(1 5 10 20 50 100 500 2000) -ArrayNumKeysInEachTx=(1 2 5 10 20) +ArrayNumWritesPerTx=(1 2 5 10 20) ArrayKVSize=(100 200 500 1000 2000) ArrayBatchSize=(10 20 100 500) ArrayNumParallelTxWithSingleChain=(1 5 10 20 50 100 500 2000) diff --git a/test/tools/LTE/scripts/runbenchmarks.sh b/test/tools/LTE/scripts/runbenchmarks.sh index 18dbe4dd8f0..38083d6d415 100755 --- a/test/tools/LTE/scripts/runbenchmarks.sh +++ b/test/tools/LTE/scripts/runbenchmarks.sh @@ -27,10 +27,10 @@ function varyNumChains { done } -function varyNumKeysInEachTx { - for v in "${ArrayNumKeysInEachTx[@]}" +function varyNumWritesPerTx { + for v in "${ArrayNumWritesPerTx[@]}" do - NumKeysInEachTx=$v + NumWritesPerTx=$v rm -rf $DataDir;runInsertTxs;runReadWriteTxs done } @@ -113,7 +113,7 @@ varyNumParallelTxPerChain varyNumChains varyNumParallelTxWithSingleChain varyNumChainsWithNoParallelism -varyNumKeysInEachTx +varyNumWritesPerTx varyKVSize varyBatchSize varyNumTxs @@ -158,8 +158,8 @@ case $1 in varyNumParallelTxWithSingleChain ;; varyNumChainsWithNoParallelism) varyNumChainsWithNoParallelism ;; - varyNumKeysInEachTx) - varyNumKeysInEachTx ;; + varyNumWritesPerTx) + varyNumWritesPerTx ;; varyKVSize) varyKVSize ;; varyBatchSize) @@ -176,7 +176,7 @@ case $1 in varyNumChains varyNumParallelTxWithSingleChain varyNumChainsWithNoParallelism - varyNumKeysInEachTx + varyNumWritesPerTx varyKVSize varyBatchSize varyNumTxs