diff --git a/bddtests/syschaincode/noop/chaincode.go b/bddtests/syschaincode/noop/chaincode.go index f0c66c49dd0..28b3f26b85c 100644 --- a/bddtests/syschaincode/noop/chaincode.go +++ b/bddtests/syschaincode/noop/chaincode.go @@ -21,7 +21,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric/core/chaincode/shim" - ld "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/protos" ) @@ -40,10 +39,6 @@ type SystemChaincode struct { func (t *SystemChaincode) getLedger() ledgerHandler { if t.mockLedgerH == nil { - lh, err := ld.GetLedger() - if err == nil { - return lh - } panic("Chaincode is unable to get the ledger.") } else { return t.mockLedgerH diff --git a/core/chaincode/chaincode_support.go b/core/chaincode/chaincode_support.go index 202b8c5355f..dc5edecd6ea 100644 --- a/core/chaincode/chaincode_support.go +++ b/core/chaincode/chaincode_support.go @@ -34,7 +34,7 @@ import ( "github.com/hyperledger/fabric/core/container" "github.com/hyperledger/fabric/core/container/ccintf" "github.com/hyperledger/fabric/core/crypto" - ledgernext "github.com/hyperledger/fabric/core/ledgernext" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/flogging" pb "github.com/hyperledger/fabric/protos" ) @@ -64,8 +64,8 @@ func init() { } //use this for ledger access and make sure TXSimulator is being used -func getTxSimulator(context context.Context) ledgernext.TxSimulator { - if txsim, ok := context.Value(TXSimulatorKey).(ledgernext.TxSimulator); ok { +func getTxSimulator(context context.Context) ledger.TxSimulator { + if txsim, ok := context.Value(TXSimulatorKey).(ledger.TxSimulator); ok { return txsim } panic("!!!---Not Using ledgernext---!!!") diff --git a/core/chaincode/exectransaction_test.go b/core/chaincode/exectransaction_test.go index 262b1414af7..7c573f459fc 100644 --- a/core/chaincode/exectransaction_test.go +++ b/core/chaincode/exectransaction_test.go @@ -31,8 +31,8 @@ import ( "github.com/hyperledger/fabric/core/container" "github.com/hyperledger/fabric/core/container/ccintf" "github.com/hyperledger/fabric/core/crypto" - ledgernext "github.com/hyperledger/fabric/core/ledgernext" - "github.com/hyperledger/fabric/core/ledgernext/kvledger" + "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/ledger/kvledger" "github.com/hyperledger/fabric/core/util" "github.com/hyperledger/fabric/membersrvc/ca" pb "github.com/hyperledger/fabric/protos" @@ -154,7 +154,7 @@ func finitPeer(lis net.Listener) { os.RemoveAll(filepath.Join(os.TempDir(), "hyperledger")) } -func startTxSimulation(ctxt context.Context) (context.Context, ledgernext.TxSimulator, error) { +func startTxSimulation(ctxt context.Context) (context.Context, ledger.TxSimulator, error) { ledgername := string(DefaultChain) lgr := kvledger.GetLedger(ledgername) txsim, err := lgr.NewTxSimulator() @@ -166,7 +166,7 @@ func startTxSimulation(ctxt context.Context) (context.Context, ledgernext.TxSimu return ctxt, txsim, nil } -func endTxSimulation(txsim ledgernext.TxSimulator, payload []byte, commit bool) error { +func endTxSimulation(txsim ledger.TxSimulator, payload []byte, commit bool) error { txsim.Done() ledgername := string(DefaultChain) if lgr := kvledger.GetLedger(ledgername); lgr != nil { @@ -361,7 +361,7 @@ func invoke(ctx context.Context, spec *pb.ChaincodeSpec) (ccevt *pb.ChaincodeEve return nil, uuid, nil, fmt.Errorf("Error invoking chaincode: %s ", err) } - var txsim ledgernext.TxSimulator + var txsim ledger.TxSimulator ctx, txsim, err = startTxSimulation(ctx) if err != nil { return nil, uuid, nil, fmt.Errorf("Failed to get handle to simulator: %s ", err) @@ -1120,7 +1120,7 @@ func TestChaincodeQueryChaincodeWithSec(t *testing.T) { // Test the invocation of a transaction. func TestRangeQuery(t *testing.T) { - //TODO enable after ledgernext enables RangeQuery + //TODO enable after ledger enables RangeQuery t.Skip() lis, err := initPeer() diff --git a/core/chaincode/handler.go b/core/chaincode/handler.go index d1e929e5322..82697d45339 100644 --- a/core/chaincode/handler.go +++ b/core/chaincode/handler.go @@ -25,7 +25,7 @@ import ( "github.com/golang/protobuf/proto" ccintf "github.com/hyperledger/fabric/core/container/ccintf" "github.com/hyperledger/fabric/core/crypto" - ledgernext "github.com/hyperledger/fabric/core/ledgernext" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/util" pb "github.com/hyperledger/fabric/protos" "github.com/looplab/fsm" @@ -58,9 +58,9 @@ type transactionContext struct { responseNotifier chan *pb.ChaincodeMessage // tracks open iterators used for range queries - rangeQueryIteratorMap map[string]ledgernext.ResultsIterator + rangeQueryIteratorMap map[string]ledger.ResultsIterator - txsimulator ledgernext.TxSimulator + txsimulator ledger.TxSimulator } type nextStateInfo struct { @@ -123,7 +123,7 @@ func (handler *Handler) createTxContext(ctxt context.Context, txid string, tx *p return nil, fmt.Errorf("txid:%s exists", txid) } txctx := &transactionContext{transactionSecContext: tx, responseNotifier: make(chan *pb.ChaincodeMessage, 1), - rangeQueryIteratorMap: make(map[string]ledgernext.ResultsIterator)} + rangeQueryIteratorMap: make(map[string]ledger.ResultsIterator)} handler.txCtxs[txid] = txctx txctx.txsimulator = getTxSimulator(ctxt) @@ -145,13 +145,13 @@ func (handler *Handler) deleteTxContext(txid string) { } func (handler *Handler) putRangeQueryIterator(txContext *transactionContext, txid string, - rangeScanIterator ledgernext.ResultsIterator) { + rangeScanIterator ledger.ResultsIterator) { handler.Lock() defer handler.Unlock() txContext.rangeQueryIteratorMap[txid] = rangeScanIterator } -func (handler *Handler) getRangeQueryIterator(txContext *transactionContext, txid string) ledgernext.ResultsIterator { +func (handler *Handler) getRangeQueryIterator(txContext *transactionContext, txid string) ledger.ResultsIterator { handler.Lock() defer handler.Unlock() return txContext.rangeQueryIteratorMap[txid] @@ -727,7 +727,7 @@ func (handler *Handler) handleRangeQueryState(msg *pb.ChaincodeMessage) { return } //PDMP - let it panic if not KV - kv := qresult.(ledgernext.KV) + kv := qresult.(ledger.KV) // Decrypt the data if the confidential is enabled decryptedValue, decryptErr := handler.decrypt(msg.Txid, kv.Value) if decryptErr != nil { @@ -835,7 +835,7 @@ func (handler *Handler) handleRangeQueryStateNext(msg *pb.ChaincodeMessage) { return } //PDMP - let it panic if not KV - kv := qresult.(ledgernext.KV) + kv := qresult.(ledger.KV) // Decrypt the data if the confidential is enabled decryptedValue, decryptErr := handler.decrypt(msg.Txid, kv.Value) if decryptErr != nil { diff --git a/core/chaincode/importsysccs.go b/core/chaincode/importsysccs.go index 2cd096605b2..b43ca313f15 100644 --- a/core/chaincode/importsysccs.go +++ b/core/chaincode/importsysccs.go @@ -18,20 +18,12 @@ package chaincode import ( //import system chain codes here - "github.com/hyperledger/fabric/bddtests/syschaincode/noop" "github.com/hyperledger/fabric/core/system_chaincode/escc" "github.com/hyperledger/fabric/core/system_chaincode/vscc" ) //see systemchaincode_test.go for an example using "sample_syscc" var systemChaincodes = []*SystemChaincode{ - { - Enabled: true, - Name: "noop", - Path: "github.com/hyperledger/fabric/bddtests/syschaincode/noop", - InitArgs: [][]byte{}, - Chaincode: &noop.SystemChaincode{}, - }, { Enabled: true, Name: "lccc", diff --git a/core/chaincode/lccc.go b/core/chaincode/lccc.go index e1c469d58d8..c7da33c7a87 100644 --- a/core/chaincode/lccc.go +++ b/core/chaincode/lccc.go @@ -21,8 +21,8 @@ import ( "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric/core/chaincode/shim" - ledger "github.com/hyperledger/fabric/core/ledgernext" - "github.com/hyperledger/fabric/core/ledgernext/kvledger" + "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/ledger/kvledger" pb "github.com/hyperledger/fabric/protos" "github.com/op/go-logging" "golang.org/x/net/context" diff --git a/core/chaincode/sysccapi.go b/core/chaincode/sysccapi.go index 6cb7e11f0a8..19e6e1f1e7c 100644 --- a/core/chaincode/sysccapi.go +++ b/core/chaincode/sysccapi.go @@ -23,8 +23,8 @@ import ( "github.com/hyperledger/fabric/core/chaincode/shim" "github.com/hyperledger/fabric/core/container/inproccontroller" - ledgernext "github.com/hyperledger/fabric/core/ledgernext" - "github.com/hyperledger/fabric/core/ledgernext/kvledger" + "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/ledger/kvledger" "github.com/hyperledger/fabric/protos" "github.com/op/go-logging" "github.com/spf13/viper" @@ -80,7 +80,7 @@ func RegisterSysCC(syscc *SystemChaincode) error { chainName := string(DefaultChain) lgr := kvledger.GetLedger(chainName) - var txsim ledgernext.TxSimulator + var txsim ledger.TxSimulator if txsim, err = lgr.NewTxSimulator(); err != nil { return err } diff --git a/core/chaincode/systemchaincode_test.go b/core/chaincode/systemchaincode_test.go index 9de0e3fc266..98df42f14d0 100644 --- a/core/chaincode/systemchaincode_test.go +++ b/core/chaincode/systemchaincode_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "github.com/hyperledger/fabric/core/ledgernext/kvledger" + "github.com/hyperledger/fabric/core/ledger/kvledger" "github.com/hyperledger/fabric/core/system_chaincode/samplesyscc" "github.com/hyperledger/fabric/core/util" pb "github.com/hyperledger/fabric/protos" diff --git a/core/committer/noopssinglechain/committer.go b/core/committer/noopssinglechain/committer.go index 750214bf2fb..9e0dff737d2 100644 --- a/core/committer/noopssinglechain/committer.go +++ b/core/committer/noopssinglechain/committer.go @@ -26,7 +26,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric/core/chaincode" "github.com/hyperledger/fabric/core/committer" - "github.com/hyperledger/fabric/core/ledgernext/kvledger" + "github.com/hyperledger/fabric/core/ledger/kvledger" ab "github.com/hyperledger/fabric/orderer/atomicbroadcast" "golang.org/x/net/context" "google.golang.org/grpc" diff --git a/core/crypto/validator_validity_period.go b/core/crypto/validator_validity_period.go deleted file mode 100644 index b934b9a5ba8..00000000000 --- a/core/crypto/validator_validity_period.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package crypto - -import ( - "errors" - "strconv" - "time" - - "github.com/spf13/viper" - - "github.com/hyperledger/fabric/core/crypto/primitives" - "github.com/hyperledger/fabric/core/ledger" - obc "github.com/hyperledger/fabric/protos" -) - -//We are temporarily disabling the validity period functionality -var allowValidityPeriodVerification = false - -func validityPeriodVerificationEnabled() bool { - // If the verification of the validity period is enabled in the configuration file return the configured value - if viper.IsSet("peer.validator.validity-period.verification") { - return viper.GetBool("peer.validator.validity-period.verification") - } - - // Validity period verification is enabled by default if no configuration was specified. - return true -} - -func (validator *validatorImpl) verifyValidityPeriod(tx *obc.Transaction) (*obc.Transaction, error) { - if tx.Cert != nil && tx.Signature != nil { - - // Unmarshal cert - cert, err := primitives.DERToX509Certificate(tx.Cert) - if err != nil { - validator.Errorf("verifyValidityPeriod: failed unmarshalling cert %s:", err) - return tx, err - } - - cid := viper.GetString("pki.validity-period.chaincodeHash") - - ledger, err := ledger.GetLedger() - if err != nil { - validator.Errorf("verifyValidityPeriod: failed getting access to the ledger %s:", err) - return tx, err - } - - vpBytes, err := ledger.GetState(cid, "system.validity.period", true) - if err != nil { - validator.Errorf("verifyValidityPeriod: failed reading validity period from the ledger %s:", err) - return tx, err - } - - i, err := strconv.ParseInt(string(vpBytes[:]), 10, 64) - if err != nil { - validator.Errorf("verifyValidityPeriod: failed to parse validity period %s:", err) - return tx, err - } - - vp := time.Unix(i, 0) - - var errMsg string - - // Verify the validity period of the TCert - switch { - case cert.NotAfter.Before(cert.NotBefore): - errMsg = "verifyValidityPeriod: certificate validity period is invalid" - case vp.Before(cert.NotBefore): - errMsg = "verifyValidityPeriod: certificate validity period is in the future" - case vp.After(cert.NotAfter): - errMsg = "verifyValidityPeriod: certificate validity period is in the past" - } - - if errMsg != "" { - validator.Error(errMsg) - return tx, errors.New(errMsg) - } - } - - return tx, nil -} diff --git a/core/endorser/endorser.go b/core/endorser/endorser.go index 88e39ac92cc..1ca8a778a43 100644 --- a/core/endorser/endorser.go +++ b/core/endorser/endorser.go @@ -24,8 +24,8 @@ import ( "golang.org/x/net/context" "github.com/hyperledger/fabric/core/chaincode" - ledger "github.com/hyperledger/fabric/core/ledgernext" - "github.com/hyperledger/fabric/core/ledgernext/kvledger" + "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/ledger/kvledger" "github.com/hyperledger/fabric/core/peer" "github.com/hyperledger/fabric/core/util" pb "github.com/hyperledger/fabric/protos" diff --git a/core/endorser/endorser_test.go b/core/endorser/endorser_test.go index 9777722336d..2588d2bfe03 100644 --- a/core/endorser/endorser_test.go +++ b/core/endorser/endorser_test.go @@ -30,7 +30,7 @@ import ( "github.com/hyperledger/fabric/core/container" "github.com/hyperledger/fabric/core/crypto" "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledgernext/kvledger" + "github.com/hyperledger/fabric/core/ledger/kvledger" u "github.com/hyperledger/fabric/core/util" pb "github.com/hyperledger/fabric/protos" "github.com/spf13/viper" diff --git a/core/ledger/README.md b/core/ledger/README.md deleted file mode 100644 index 24d3fe3e580..00000000000 --- a/core/ledger/README.md +++ /dev/null @@ -1,17 +0,0 @@ -## Ledger Package - -This package implements the ledger, which includes the blockchain and global state. - -If you're looking for API to work with the blockchain or state, look in `ledger.go`. This is the file where all public functions are exposed and is extensively documented. The sections in the file are: - -### Transaction-batch functions - -These are functions that consensus should call. `BeginTxBatch` followed by `CommitTxBatch` or `RollbackTxBatch`. These functions will add a block to the blockchain with the specified transactions. - -### World-state functions - -These functions are used to modify the global state. They would generally be called by the VM based on requests from chaincode. - -### Blockchain functions - -These functions can be used to retrieve blocks/transactions from the blockchain or other information such as the blockchain size. Addition of blocks to the blockchain is done though the transaction-batch related functions. diff --git a/core/ledgernext/ReadWriteSet.md b/core/ledger/ReadWriteSet.md similarity index 100% rename from core/ledgernext/ReadWriteSet.md rename to core/ledger/ReadWriteSet.md diff --git a/core/ledger/benchmark_scripts/buckettree/buckettree.sh b/core/ledger/benchmark_scripts/buckettree/buckettree.sh deleted file mode 100755 index c11b8ab7109..00000000000 --- a/core/ledger/benchmark_scripts/buckettree/buckettree.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash -source ../common.sh - -PKG_PATH="github.com/hyperledger/fabric/core/ledger/statemgmt/buckettree" -FUNCTION_NAME="BenchmarkStateHash" -NUM_CPUS=1 -CHART_DATA_COLUMN="NUM EXISTING KEYS" -export PEER_LEDGER_TEST_LOADYAML=false - -function runTest { - OUTPUT_DIR="$FUNCTION_NAME/${NumBuckets}_${KVSize}" - DB_DIR="$FUNCTION_NAME/${NumBuckets}_${KVSize}" - TEST_PARAMS="-NumBuckets=$NumBuckets,\ - -MaxGroupingAtEachLevel=$MaxGroupingAtEachLevel,\ - -ChaincodeIDPrefix=$ChaincodeIDPrefix,\ - -NumChaincodes=$NumChaincodes,\ - -MaxKeySuffix=$MaxKeySuffix,\ - -NumKeysToInsert=$NumKeysToInsert,\ - -KVSize=$KVSize" - - setupAndCompileTest - - for i in `seq 0 999`; do - EXISTING_KEYS_IN_DB=$(($i*$NumKeysToInsert)) - echo "executing with existing keys=$EXISTING_KEYS_IN_DB" - CHART_COLUMN_VALUE=$EXISTING_KEYS_IN_DB - executeTest - done - - ADDITIONAL_TEST_FLAGS="-test.cpuprofile=cpu.out -test.outputdir=`getOuputDir`" - CHART_COLUMN_VALUE=$(($(($i+1))*$NumKeysToInsert)) - executeTest - constructChart -} - -##### TEST PARAMS -MaxGroupingAtEachLevel=5 -ChaincodeIDPrefix="chaincode" -NumChaincodes=5 -MaxKeySuffix=1000000 -NumKeysToInsert=1000 - -NumBuckets=1009;KVSize=20;runTest -NumBuckets=10009;KVSize=20;runTest -NumBuckets=100003;KVSize=20;runTest -NumBuckets=1000003;KVSize=20;runTest - -NumBuckets=1009;KVSize=50;runTest -NumBuckets=10009;KVSize=50;runTest -NumBuckets=100003;KVSize=50;runTest -NumBuckets=1000003;KVSize=50;runTest - -NumBuckets=1009;KVSize=100;runTest -NumBuckets=10009;KVSize=100;runTest -NumBuckets=100003;KVSize=100;runTest -NumBuckets=1000003;KVSize=100;runTest - -NumBuckets=1009;KVSize=300;runTest -NumBuckets=10009;KVSize=300;runTest -NumBuckets=100003;KVSize=300;runTest -NumBuckets=1000003;KVSize=300;runTest - -NumBuckets=1009;KVSize=500;runTest -NumBuckets=10009;KVSize=500;runTest -NumBuckets=100003;KVSize=500;runTest -NumBuckets=1000003;KVSize=500;runTest - -NumBuckets=1009;KVSize=1000;runTest -NumBuckets=10009;KVSize=1000;runTest -NumBuckets=100003;KVSize=1000;runTest -NumBuckets=1000003;KVSize=1000;runTest - -NumBuckets=1009;KVSize=2000;runTest -NumBuckets=10009;KVSize=2000;runTest -NumBuckets=100003;KVSize=2000;runTest -NumBuckets=1000003;KVSize=2000;runTest - -NumBuckets=1009;KVSize=5000;runTest -NumBuckets=10009;KVSize=5000;runTest -NumBuckets=100003;KVSize=5000;runTest -NumBuckets=1000003;KVSize=5000;runTest diff --git a/core/ledger/benchmark_scripts/buckettree/plot.pg b/core/ledger/benchmark_scripts/buckettree/plot.pg deleted file mode 100644 index ec014546243..00000000000 --- a/core/ledger/benchmark_scripts/buckettree/plot.pg +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/local/bin/gnuplot -reset - -# Chart specific settings -set ylabel "milli second" -set xlabel "Existing Data" -set title "Buckettree performance" - -# General settings -set key reverse Left outside -set grid -set terminal postscript dashed color -set style data linespoints - -# plot command -plot dataFile using 1:($2/1000000) title "time taken" diff --git a/core/ledger/benchmark_scripts/buckettree/plots/all.pg b/core/ledger/benchmark_scripts/buckettree/plots/all.pg deleted file mode 100644 index b5bdd0eef16..00000000000 --- a/core/ledger/benchmark_scripts/buckettree/plots/all.pg +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/local/bin/gnuplot -reset - -# Chart specific settings -set ylabel "milli second" -set xlabel "Approx number of existing keys" -set title "Buckettree performance" - -# General settings -#set key bottom center outside reverse box -set key left top reverse box Left -set key spacing 1 font ",9" -set grid -set terminal postscript enhanced color -#set style data linespoints -set style data lines - -# plot command -plot '../output1/chart.dat' using 1:($2/1000000) title "NumBuckets=10009, MaxGroupingAtEachLevel=5, ValueSize=1000", \ -'../output2/chart.dat' using 1:($2/1000000) title "NumBuckets=10009, MaxGroupingAtEachLevel=5, ValueSize=100", \ -'../output3/chart.dat' using 1:($2/1000000) title "NumBuckets=10009, MaxGroupingAtEachLevel=5, ValueSize=10", \ -'../output5/chart.dat' using 1:($2/1000000) title "NumBuckets=100003, MaxGroupingAtEachLevel=5, ValueSize=1000", \ -'../output4/chart.dat' using 1:($2/1000000) title "NumBuckets=1000003,MaxGroupingAtEachLevel=5, ValueSize=1000", \ -'../output8/chart.dat' using 1:($2/1000000) title "NumBuckets=1000003,MaxGroupingAtEachLevel=5, ValueSize=10" diff --git a/core/ledger/benchmark_scripts/common.sh b/core/ledger/benchmark_scripts/common.sh deleted file mode 100644 index 1f78cf35f97..00000000000 --- a/core/ledger/benchmark_scripts/common.sh +++ /dev/null @@ -1,128 +0,0 @@ -#!/bin/bash - -set -e - -OUTPUT_DIR_ROOT=`echo ~/obc_perf/output` -DB_DIR_ROOT=`echo ~/obc_perf/db` -BINARY_DIR=`echo ~/obc_perf/bin` - -mkdir -p $OUTPUT_DIR_ROOT -mkdir -p $DB_DIR_ROOT -mkdir -p $BINARY_DIR - -BENCHMARK_OUTPUT_FILE="benchmark.out" -CHART_DATA_FILE="chart.dat" -CHART_FILE="chart.ps" - -regex="^Benchmark.*[[:blank:]]+[[:digit:]]+[[:blank:]]+([[:digit:]]+).*$" -function writeBenchmarkOutput { - #echo "$@" - outFile=$1 - benchmarkFile=$2 - paramValue=$3 - cmdOutput=$4 - echo "outFile=$outFile, benchmarkFile=$benchmarkFile, paramValue=$paramValue" - echo "Test Output Start:" - echo "$cmdOutput" - echo "Test Output Finish" - while read -r line; do - echo $line >> $outFile - if [[ $line =~ $regex ]]; then - benchmarkDataLine="$paramValue ${BASH_REMATCH[1]}" - echo $benchmarkDataLine >> $benchmarkFile - fi - done <<< "$cmdOutput" -} - -function setupAndCompileTest { - createOutputDir - configureDBPath - compileTest - writeBenchmarkHeader -} - -function compileTest { - cmd="go test $PKG_PATH -c -o `getBinaryFileName`" - `eval $cmd` -} - -function writeBenchmarkHeader { - outputDir=`getOuputDir` - echo "# `date`" >> $outputDir/$CHART_DATA_FILE - echo "# TEST_PARAMS $TEST_PARAMS" >> $outputDir/$CHART_DATA_FILE - echo "# $CHART_DATA_COLUMN | ns/ops" >> $outputDir/$CHART_DATA_FILE -} - -## Execute test and generate data file -function executeTest { - cmd="`getBinaryFileName` -testParams=\"$TEST_PARAMS\" -test.run=XXX -test.bench=$FUNCTION_NAME -test.cpu=$NUM_CPUS $ADDITIONAL_TEST_FLAGS $PKG_PATH" - outputDir=`getOuputDir` - dbDir=`getDBDir` - echo "" - echo "Executing test... [OUTPUT_DIR=$outputDir, DB_DIR=$dbDir]" - echo $cmd - cmdOutput=`eval $cmd` - writeBenchmarkOutput $outputDir/$BENCHMARK_OUTPUT_FILE $outputDir/$CHART_DATA_FILE $CHART_COLUMN_VALUE "$cmdOutput" -} - -function getBinaryFileName { - pkgName=$(basename $PKG_PATH) - echo "$BINARY_DIR/$pkgName.test" -} - -function getOuputDir { - pkgName=$(basename $PKG_PATH) - outputDir="$OUTPUT_DIR_ROOT/$pkgName/$FUNCTION_NAME" - if [ ! -z "$OUTPUT_DIR" ]; then - outputDir="$OUTPUT_DIR_ROOT/$pkgName/$OUTPUT_DIR" - fi - echo $outputDir -} - -function getDBDir { - pkgName=$(basename $PKG_PATH) - dbDir="$DB_DIR_ROOT/$pkgName/$FUNCTION_NAME" - if [ ! -z "$DB_DIR" ]; then - dbDir="$DB_DIR_ROOT/$pkgName/$DB_DIR" - fi - echo $dbDir -} - -function createOutputDir { - outputDir=`getOuputDir` - if [ ! -d "$outputDir" ]; then - mkdir -p $outputDir - else - echo "INFO: outputDIR [$outputDir] already exists. Output will be appended to existing file" - fi -} - -function configureDBPath { - dbDir=`getDBDir` - if [ -d "$dbDir" ]; then - echo "INFO: dbDir [$dbDir] already exists. Data will be merged in the existing data" - fi - ulimit -n 10000 - echo "setting ulimit=`ulimit -n`" - export PEER_FILESYSTEMPATH="$dbDir" -} - -function constructChart { - outputDir=`getOuputDir` - gnuplot -e "dataFile='$outputDir/$CHART_DATA_FILE'" plot.pg > $outputDir/$CHART_FILE -} - -function openChart { - outputDir=`getOuputDir` - open "$outputDir/$CHART_FILE" -} - -function clearOSCache { - platform=`uname` - if [[ $platform == 'Darwin' ]]; then - echo "Clearing os cache" - sudo purge - else - echo "WARNING: Platform [$platform] is not supported for clearing os cache." - fi -} diff --git a/core/ledger/benchmark_scripts/ledger/db.sh b/core/ledger/benchmark_scripts/ledger/db.sh deleted file mode 100755 index 26138e2ff23..00000000000 --- a/core/ledger/benchmark_scripts/ledger/db.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -source ../common.sh - -PKG_PATH="github.com/hyperledger/fabric/core/ledger" -FUNCTION_NAME="BenchmarkDB" -NUM_CPUS=1 -CHART_DATA_COLUMN="Number of Bytes" - -setupAndCompileTest - -KVSize=1000 -MaxKeySuffix=1000000 -KeyPrefix=ChaincodeKey_ - -CHART_COLUMN_VALUE=$KVSize - -## now populate the db with 'MaxKeySuffix' number of key-values -TEST_PARAMS="-KVSize=$KVSize, -PopulateDB=true, -MaxKeySuffix=$MaxKeySuffix, -KeyPrefix=$KeyPrefix" -executeTest - -# now perform random access test. If you want to perform the random access test -TEST_PARAMS="-KVSize=$KVSize, -PopulateDB=false, -MaxKeySuffix=$MaxKeySuffix, -KeyPrefix=$KeyPrefix" -executeTest - -# now perform random access test after clearing OS file-system cache. If you want to perform the random access test -clearOSCache -executeTest diff --git a/core/ledger/benchmark_scripts/ledger/randomTransactions.sh b/core/ledger/benchmark_scripts/ledger/randomTransactions.sh deleted file mode 100755 index f491229cba4..00000000000 --- a/core/ledger/benchmark_scripts/ledger/randomTransactions.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash -source ../common.sh - - -PKG_PATH="github.com/hyperledger/fabric/core/ledger" -NUM_CPUS=4 -CHART_DATA_COLUMN="KVSize" - -compileTest -OUTPUT_DIR="BenchmarkLedgerRandomTransactions" -createOutputDir -CHART_DATA_COLUMN="TEST_PARAMS" -writeBenchmarkHeader - -function populateDB { - FUNCTION_NAME="BenchmarkLedgerPopulate" - TEST_PARAMS="-KeyPrefix=$KeyPrefix, -KVSize=$KVSize, -BatchSize=$BatchSize, -MaxKeySuffix=$MaxKeySuffix" - CHART_COLUMN_VALUE="POPULATE_DB:Type=$LEDGER_STATE_DATASTRUCTURE_NAME:KeyPrefix=$KeyPrefix:KVSize=$KVSize:BatchSize=$BatchSize:MaxKeySuffix=$MaxKeySuffix:TestNumber=$TestNumber" - executeTest -} - -function runRandomTransactions { - FUNCTION_NAME="BenchmarkLedgerRandomTransactions" - TEST_PARAMS="-KeyPrefix=$KeyPrefix, -KVSize=$KVSize, -BatchSize=$BatchSize, -MaxKeySuffix=$MaxKeySuffix, -NumBatches=$NumBatches, -NumReadsFromLedger=$NumReadsFromLedger, -NumWritesToLedger=$NumWritesToLedger" - CHART_COLUMN_VALUE="RANDOM_TRANSACTION_EXE:Type=$LEDGER_STATE_DATASTRUCTURE_NAME:KeyPrefix=$KeyPrefix:KVSize=$KVSize:BatchSize=$BatchSize:MaxKeySuffix=$MaxKeySuffix:NumBatches=$NumBatches:NumReadsFromLedger=$NumReadsFromLedger:NumWritesToLedger=$NumWritesToLedger:TestNumber=$TestNumber" - executeTest -} - -function initDBPath { - DB_DIR="BenchmarkLedgerRandomTransactions/TestNumber=$TestNumber" - configureDBPath -} - -function runTest { - initDBPath - populateDB - if [ "$CLEAR_OS_CACHE" == "true" ]; then - clearOSCache - fi - runRandomTransactions -} - -KeyPrefix=key_ -MaxKeySuffix=1000000 - -export LEDGER_STATE_DATASTRUCTURE_NAME="buckettree" - -# Before performing any of the following tests, manually delete the following folders from previous runs (if any) -# ~/obc_perf/db (Contains the db from the test run) -# ~/obc_perf/output/ledger (Contains the output from the test run) - -################## Measure the effect of bucket-cache START ############################ -# For enabling cache - -# 1) Change the value of 0 of 'bucketCacheSize' in test.yaml to 100 -# 2) Comment the following three lines and uncomment the next three lines - -TestNumber=1;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest -TestNumber=2;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest -TestNumber=3;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest - -#TestNumber=4;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest -#TestNumber=5;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest -#TestNumber=6;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest -################## Measure the effect of bucket-cache END ############################ - -: ' -################### Compare with raw state implementation START ############################ -CLEAR_OS_CACHE=false -export LEDGER_STATE_DATASTRUCTURE_NAME="raw" -TestNumber=1;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest -TestNumber=2;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest -TestNumber=3;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest - -export LEDGER_STATE_DATASTRUCTURE_NAME="buckettree" -TestNumber=4;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest -TestNumber=5;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest -TestNumber=6;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest - -CLEAR_OS_CACHE=true -export LEDGER_STATE_DATASTRUCTURE_NAME="raw" -TestNumber=7;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest -TestNumber=8;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest -TestNumber=9;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest - -export LEDGER_STATE_DATASTRUCTURE_NAME="buckettree" -TestNumber=10;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest -TestNumber=11;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest -TestNumber=12;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest -################### Compare with raw state implementation END ############################ -' diff --git a/core/ledger/benchmark_scripts/ledger/singleKeyTransaction.sh b/core/ledger/benchmark_scripts/ledger/singleKeyTransaction.sh deleted file mode 100755 index b5eef2b6e1d..00000000000 --- a/core/ledger/benchmark_scripts/ledger/singleKeyTransaction.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -source ../common.sh - -PKG_PATH="github.com/hyperledger/fabric/core/ledger" -FUNCTION_NAME="BenchmarkLedgerSingleKeyTransaction" -NUM_CPUS=4 -CHART_DATA_COLUMN="Number of Bytes" - -setupAndCompileTest - -Key=key -KVSize=100 -BatchSize=100 -NumBatches=10000 -NumWritesToLedger=2 - -CHART_COLUMN_VALUE=$KVSize - -TEST_PARAMS="-Key=$Key, -KVSize=$KVSize, -BatchSize=$BatchSize, -NumBatches=$NumBatches, -NumWritesToLedger=$NumWritesToLedger" -executeTest diff --git a/core/ledger/benchmark_scripts/ledger/test.yaml b/core/ledger/benchmark_scripts/ledger/test.yaml deleted file mode 100644 index 37a75609914..00000000000 --- a/core/ledger/benchmark_scripts/ledger/test.yaml +++ /dev/null @@ -1,14 +0,0 @@ -############################################################################### -# -# Peer section -# -############################################################################### -peer: - -ledger: - state: - dataStructure: - configs: - numBuckets: 1000003 - maxGroupingAtEachLevel: 5 - bucketCacheSize: 100 diff --git a/core/ledger/benchmark_scripts/statemgmt/cryptoHash.sh b/core/ledger/benchmark_scripts/statemgmt/cryptoHash.sh deleted file mode 100755 index 4c467f6de00..00000000000 --- a/core/ledger/benchmark_scripts/statemgmt/cryptoHash.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -source ../common.sh - -PKG_PATH="github.com/hyperledger/fabric/core/ledger/statemgmt" -FUNCTION_NAME="BenchmarkCryptoHash" -NUM_CPUS=1 -CHART_DATA_COLUMN="Number of Bytes" - -setupAndCompileTest - -for i in 1 5 10 20 50 100 200 400 600 800 1000 2000 5000 10000 20000 50000 100000; do - TEST_PARAMS="-NumBytes=$i" - CHART_COLUMN_VALUE=$i - executeTest -done - -constructChart diff --git a/core/ledger/benchmark_scripts/statemgmt/plot.pg b/core/ledger/benchmark_scripts/statemgmt/plot.pg deleted file mode 100644 index d8740857280..00000000000 --- a/core/ledger/benchmark_scripts/statemgmt/plot.pg +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/local/bin/gnuplot -reset - -# Chart specific settings -set ylabel "nano second" -set xlabel "Number of bytes" -set title "CryptoHash performance" -set logscale xy 10 - -# General settings -set key reverse Left outside -set grid -set terminal postscript dashed color -set style data linespoints - -# plot command -plot dataFile using 1:2 title "time taken", \ -"" using 1:($2/$1) title "time taken per byte" diff --git a/core/ledgernext/blkstorage/blockstorage.go b/core/ledger/blkstorage/blockstorage.go similarity index 95% rename from core/ledgernext/blkstorage/blockstorage.go rename to core/ledger/blkstorage/blockstorage.go index 8d17c44ef17..adb7e0201f6 100644 --- a/core/ledgernext/blkstorage/blockstorage.go +++ b/core/ledger/blkstorage/blockstorage.go @@ -17,7 +17,7 @@ limitations under the License. package blkstorage import ( - "github.com/hyperledger/fabric/core/ledgernext" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/protos" ) diff --git a/core/ledgernext/blkstorage/fsblkstorage/block_stream.go b/core/ledger/blkstorage/fsblkstorage/block_stream.go similarity index 100% rename from core/ledgernext/blkstorage/fsblkstorage/block_stream.go rename to core/ledger/blkstorage/fsblkstorage/block_stream.go diff --git a/core/ledgernext/blkstorage/fsblkstorage/block_stream_test.go b/core/ledger/blkstorage/fsblkstorage/block_stream_test.go similarity index 98% rename from core/ledgernext/blkstorage/fsblkstorage/block_stream_test.go rename to core/ledger/blkstorage/fsblkstorage/block_stream_test.go index d8ad17e7bb6..84780db6fc7 100644 --- a/core/ledgernext/blkstorage/fsblkstorage/block_stream_test.go +++ b/core/ledger/blkstorage/fsblkstorage/block_stream_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/ledgernext/testutil" + "github.com/hyperledger/fabric/core/ledger/testutil" ) func TestBlockfileStream(t *testing.T) { diff --git a/core/ledgernext/blkstorage/fsblkstorage/blockfile_mgr.go b/core/ledger/blkstorage/fsblkstorage/blockfile_mgr.go similarity index 99% rename from core/ledgernext/blkstorage/fsblkstorage/blockfile_mgr.go rename to core/ledger/blkstorage/fsblkstorage/blockfile_mgr.go index 25a1447a55a..0c23260c32f 100644 --- a/core/ledgernext/blkstorage/fsblkstorage/blockfile_mgr.go +++ b/core/ledger/blkstorage/fsblkstorage/blockfile_mgr.go @@ -21,8 +21,8 @@ import ( "sync/atomic" "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/ledgernext/util" - "github.com/hyperledger/fabric/core/ledgernext/util/db" + "github.com/hyperledger/fabric/core/ledger/util" + "github.com/hyperledger/fabric/core/ledger/util/db" "github.com/hyperledger/fabric/protos" "github.com/op/go-logging" "github.com/tecbot/gorocksdb" diff --git a/core/ledgernext/blkstorage/fsblkstorage/blockfile_mgr_test.go b/core/ledger/blkstorage/fsblkstorage/blockfile_mgr_test.go similarity index 99% rename from core/ledgernext/blkstorage/fsblkstorage/blockfile_mgr_test.go rename to core/ledger/blkstorage/fsblkstorage/blockfile_mgr_test.go index e43da18cb0e..800cabe37f1 100644 --- a/core/ledgernext/blkstorage/fsblkstorage/blockfile_mgr_test.go +++ b/core/ledger/blkstorage/fsblkstorage/blockfile_mgr_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/ledgernext/testutil" + "github.com/hyperledger/fabric/core/ledger/testutil" "github.com/hyperledger/fabric/protos" ) diff --git a/core/ledgernext/blkstorage/fsblkstorage/blockfile_rw.go b/core/ledger/blkstorage/fsblkstorage/blockfile_rw.go similarity index 100% rename from core/ledgernext/blkstorage/fsblkstorage/blockfile_rw.go rename to core/ledger/blkstorage/fsblkstorage/blockfile_rw.go diff --git a/core/ledgernext/blkstorage/fsblkstorage/blockindex.go b/core/ledger/blkstorage/fsblkstorage/blockindex.go similarity index 98% rename from core/ledgernext/blkstorage/fsblkstorage/blockindex.go rename to core/ledger/blkstorage/fsblkstorage/blockindex.go index 97978ee2ddc..9a989d524fd 100644 --- a/core/ledgernext/blkstorage/fsblkstorage/blockindex.go +++ b/core/ledger/blkstorage/fsblkstorage/blockindex.go @@ -21,8 +21,8 @@ import ( "fmt" "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/ledgernext/util" - "github.com/hyperledger/fabric/core/ledgernext/util/db" + "github.com/hyperledger/fabric/core/ledger/util" + "github.com/hyperledger/fabric/core/ledger/util/db" "github.com/tecbot/gorocksdb" ) diff --git a/core/ledgernext/blkstorage/fsblkstorage/blockindex_test.go b/core/ledger/blkstorage/fsblkstorage/blockindex_test.go similarity index 98% rename from core/ledgernext/blkstorage/fsblkstorage/blockindex_test.go rename to core/ledger/blkstorage/fsblkstorage/blockindex_test.go index bae867cfb0e..68186ce9fb2 100644 --- a/core/ledgernext/blkstorage/fsblkstorage/blockindex_test.go +++ b/core/ledger/blkstorage/fsblkstorage/blockindex_test.go @@ -20,7 +20,7 @@ import ( "fmt" "testing" - "github.com/hyperledger/fabric/core/ledgernext/testutil" + "github.com/hyperledger/fabric/core/ledger/testutil" ) type noopIndex struct { diff --git a/core/ledgernext/blkstorage/fsblkstorage/blocks_itr.go b/core/ledger/blkstorage/fsblkstorage/blocks_itr.go similarity index 97% rename from core/ledgernext/blkstorage/fsblkstorage/blocks_itr.go rename to core/ledger/blkstorage/fsblkstorage/blocks_itr.go index a7c38867303..742bb559178 100644 --- a/core/ledgernext/blkstorage/fsblkstorage/blocks_itr.go +++ b/core/ledger/blkstorage/fsblkstorage/blocks_itr.go @@ -19,7 +19,7 @@ package fsblkstorage import ( "fmt" - "github.com/hyperledger/fabric/core/ledgernext" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/protos" ) diff --git a/core/ledgernext/blkstorage/fsblkstorage/config.go b/core/ledger/blkstorage/fsblkstorage/config.go similarity index 100% rename from core/ledgernext/blkstorage/fsblkstorage/config.go rename to core/ledger/blkstorage/fsblkstorage/config.go diff --git a/core/ledgernext/blkstorage/fsblkstorage/fs_blockstore.go b/core/ledger/blkstorage/fsblkstorage/fs_blockstore.go similarity index 97% rename from core/ledgernext/blkstorage/fsblkstorage/fs_blockstore.go rename to core/ledger/blkstorage/fsblkstorage/fs_blockstore.go index 33013bb160b..4614f7c0baa 100644 --- a/core/ledgernext/blkstorage/fsblkstorage/fs_blockstore.go +++ b/core/ledger/blkstorage/fsblkstorage/fs_blockstore.go @@ -17,7 +17,7 @@ limitations under the License. package fsblkstorage import ( - "github.com/hyperledger/fabric/core/ledgernext" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/protos" ) diff --git a/core/ledgernext/blkstorage/fsblkstorage/pkg_test.go b/core/ledger/blkstorage/fsblkstorage/pkg_test.go similarity index 94% rename from core/ledgernext/blkstorage/fsblkstorage/pkg_test.go rename to core/ledger/blkstorage/fsblkstorage/pkg_test.go index c6d68612349..40bf7addefc 100644 --- a/core/ledgernext/blkstorage/fsblkstorage/pkg_test.go +++ b/core/ledger/blkstorage/fsblkstorage/pkg_test.go @@ -21,7 +21,7 @@ import ( "os" "testing" - "github.com/hyperledger/fabric/core/ledgernext/testutil" + "github.com/hyperledger/fabric/core/ledger/testutil" "github.com/hyperledger/fabric/protos" ) @@ -30,7 +30,7 @@ type testEnv struct { } func newTestEnv(t testing.TB) *testEnv { - conf := NewConf("/tmp/tests/ledgernext/blkstorage/fsblkstorage", 0) + conf := NewConf("/tmp/tests/ledger/blkstorage/fsblkstorage", 0) os.RemoveAll(conf.dbPath) os.RemoveAll(conf.blockfilesDir) return &testEnv{conf} diff --git a/core/ledger/blockchain.go b/core/ledger/blockchain.go deleted file mode 100644 index 6515d3dd2f6..00000000000 --- a/core/ledger/blockchain.go +++ /dev/null @@ -1,333 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ledger - -import ( - "bytes" - "encoding/binary" - "strconv" - - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/util" - "github.com/hyperledger/fabric/protos" - "github.com/tecbot/gorocksdb" - "golang.org/x/net/context" -) - -// Blockchain holds basic information in memory. Operations on Blockchain are not thread-safe -// TODO synchronize access to in-memory variables -type blockchain struct { - size uint64 - previousBlockHash []byte - indexer blockchainIndexer - lastProcessedBlock *lastProcessedBlock -} - -type lastProcessedBlock struct { - block *protos.Block - blockNumber uint64 - blockHash []byte -} - -var indexBlockDataSynchronously = true - -func newBlockchain() (*blockchain, error) { - size, err := fetchBlockchainSizeFromDB() - if err != nil { - return nil, err - } - blockchain := &blockchain{0, nil, nil, nil} - blockchain.size = size - if size > 0 { - previousBlock, err := fetchBlockFromDB(size - 1) - if err != nil { - return nil, err - } - previousBlockHash, err := previousBlock.GetHash() - if err != nil { - return nil, err - } - blockchain.previousBlockHash = previousBlockHash - } - - err = blockchain.startIndexer() - if err != nil { - return nil, err - } - return blockchain, nil -} - -func (blockchain *blockchain) startIndexer() (err error) { - if indexBlockDataSynchronously { - blockchain.indexer = newBlockchainIndexerSync() - } else { - blockchain.indexer = newBlockchainIndexerAsync() - } - err = blockchain.indexer.start(blockchain) - return -} - -// getLastBlock get last block in blockchain -func (blockchain *blockchain) getLastBlock() (*protos.Block, error) { - if blockchain.size == 0 { - return nil, nil - } - return blockchain.getBlock(blockchain.size - 1) -} - -// getSize number of blocks in blockchain -func (blockchain *blockchain) getSize() uint64 { - return blockchain.size -} - -// getBlock get block at arbitrary height in block chain -func (blockchain *blockchain) getBlock(blockNumber uint64) (*protos.Block, error) { - return fetchBlockFromDB(blockNumber) -} - -// getBlockByHash get block by block hash -func (blockchain *blockchain) getBlockByHash(blockHash []byte) (*protos.Block, error) { - blockNumber, err := blockchain.indexer.fetchBlockNumberByBlockHash(blockHash) - if err != nil { - return nil, err - } - return blockchain.getBlock(blockNumber) -} - -func (blockchain *blockchain) getTransactionByID(txID string) (*protos.Transaction, error) { - blockNumber, txIndex, err := blockchain.indexer.fetchTransactionIndexByID(txID) - if err != nil { - return nil, err - } - block, err := blockchain.getBlock(blockNumber) - if err != nil { - return nil, err - } - transaction := block.GetTransactions()[txIndex] - return transaction, nil -} - -// getTransactions get all transactions in a block identified by block number -func (blockchain *blockchain) getTransactions(blockNumber uint64) ([]*protos.Transaction, error) { - block, err := blockchain.getBlock(blockNumber) - if err != nil { - return nil, err - } - return block.GetTransactions(), nil -} - -// getTransactionsByBlockHash get all transactions in a block identified by block hash -func (blockchain *blockchain) getTransactionsByBlockHash(blockHash []byte) ([]*protos.Transaction, error) { - block, err := blockchain.getBlockByHash(blockHash) - if err != nil { - return nil, err - } - return block.GetTransactions(), nil -} - -// getTransaction get a transaction identified by block number and index within the block -func (blockchain *blockchain) getTransaction(blockNumber uint64, txIndex uint64) (*protos.Transaction, error) { - block, err := blockchain.getBlock(blockNumber) - if err != nil { - return nil, err - } - return block.GetTransactions()[txIndex], nil -} - -// getTransactionByBlockHash get a transaction identified by block hash and index within the block -func (blockchain *blockchain) getTransactionByBlockHash(blockHash []byte, txIndex uint64) (*protos.Transaction, error) { - block, err := blockchain.getBlockByHash(blockHash) - if err != nil { - return nil, err - } - return block.GetTransactions()[txIndex], nil -} - -func (blockchain *blockchain) getBlockchainInfo() (*protos.BlockchainInfo, error) { - if blockchain.getSize() == 0 { - return &protos.BlockchainInfo{Height: 0}, nil - } - - lastBlock, err := blockchain.getLastBlock() - if err != nil { - return nil, err - } - - info := blockchain.getBlockchainInfoForBlock(blockchain.getSize(), lastBlock) - return info, nil -} - -func (blockchain *blockchain) getBlockchainInfoForBlock(height uint64, block *protos.Block) *protos.BlockchainInfo { - hash, _ := block.GetHash() - info := &protos.BlockchainInfo{ - Height: height, - CurrentBlockHash: hash, - PreviousBlockHash: block.PreviousBlockHash} - - return info -} - -func (blockchain *blockchain) buildBlock(block *protos.Block, stateHash []byte) *protos.Block { - block.SetPreviousBlockHash(blockchain.previousBlockHash) - block.StateHash = stateHash - return block -} - -func (blockchain *blockchain) addPersistenceChangesForNewBlock(ctx context.Context, - block *protos.Block, stateHash []byte, writeBatch *gorocksdb.WriteBatch) (uint64, error) { - block = blockchain.buildBlock(block, stateHash) - if block.NonHashData == nil { - block.NonHashData = &protos.NonHashData{LocalLedgerCommitTimestamp: util.CreateUtcTimestamp()} - } else { - block.NonHashData.LocalLedgerCommitTimestamp = util.CreateUtcTimestamp() - } - blockNumber := blockchain.size - blockHash, err := block.GetHash() - if err != nil { - return 0, err - } - blockBytes, blockBytesErr := block.Bytes() - if blockBytesErr != nil { - return 0, blockBytesErr - } - writeBatch.PutCF(db.GetDBHandle().BlockchainCF, encodeBlockNumberDBKey(blockNumber), blockBytes) - writeBatch.PutCF(db.GetDBHandle().BlockchainCF, blockCountKey, encodeUint64(blockNumber+1)) - if blockchain.indexer.isSynchronous() { - blockchain.indexer.createIndexes(block, blockNumber, blockHash, writeBatch) - } - blockchain.lastProcessedBlock = &lastProcessedBlock{block, blockNumber, blockHash} - return blockNumber, nil -} - -func (blockchain *blockchain) blockPersistenceStatus(success bool) { - if success { - blockchain.size++ - blockchain.previousBlockHash = blockchain.lastProcessedBlock.blockHash - if !blockchain.indexer.isSynchronous() { - writeBatch := gorocksdb.NewWriteBatch() - defer writeBatch.Destroy() - blockchain.indexer.createIndexes(blockchain.lastProcessedBlock.block, - blockchain.lastProcessedBlock.blockNumber, blockchain.lastProcessedBlock.blockHash, writeBatch) - } - } - blockchain.lastProcessedBlock = nil -} - -func (blockchain *blockchain) persistRawBlock(block *protos.Block, blockNumber uint64) error { - blockBytes, blockBytesErr := block.Bytes() - if blockBytesErr != nil { - return blockBytesErr - } - writeBatch := gorocksdb.NewWriteBatch() - defer writeBatch.Destroy() - writeBatch.PutCF(db.GetDBHandle().BlockchainCF, encodeBlockNumberDBKey(blockNumber), blockBytes) - - blockHash, err := block.GetHash() - if err != nil { - return err - } - - // Need to check as we support out of order blocks in cases such as block/state synchronization. This is - // real blockchain height, not size. - if blockchain.getSize() < blockNumber+1 { - sizeBytes := encodeUint64(blockNumber + 1) - writeBatch.PutCF(db.GetDBHandle().BlockchainCF, blockCountKey, sizeBytes) - blockchain.size = blockNumber + 1 - blockchain.previousBlockHash = blockHash - } - - if blockchain.indexer.isSynchronous() { - blockchain.indexer.createIndexes(block, blockNumber, blockHash, writeBatch) - } - - opt := gorocksdb.NewDefaultWriteOptions() - defer opt.Destroy() - err = db.GetDBHandle().DB.Write(opt, writeBatch) - if err != nil { - return err - } - return nil -} - -func fetchBlockFromDB(blockNumber uint64) (*protos.Block, error) { - blockBytes, err := db.GetDBHandle().GetFromBlockchainCF(encodeBlockNumberDBKey(blockNumber)) - if err != nil { - return nil, err - } - if blockBytes == nil { - return nil, nil - } - return protos.UnmarshallBlock(blockBytes) -} - -func fetchBlockchainSizeFromDB() (uint64, error) { - bytes, err := db.GetDBHandle().GetFromBlockchainCF(blockCountKey) - if err != nil { - return 0, err - } - if bytes == nil { - return 0, nil - } - return decodeToUint64(bytes), nil -} - -func fetchBlockchainSizeFromSnapshot(snapshot *gorocksdb.Snapshot) (uint64, error) { - blockNumberBytes, err := db.GetDBHandle().GetFromBlockchainCFSnapshot(snapshot, blockCountKey) - if err != nil { - return 0, err - } - var blockNumber uint64 - if blockNumberBytes != nil { - blockNumber = decodeToUint64(blockNumberBytes) - } - return blockNumber, nil -} - -var blockCountKey = []byte("blockCount") - -func encodeBlockNumberDBKey(blockNumber uint64) []byte { - return encodeUint64(blockNumber) -} - -func encodeUint64(number uint64) []byte { - bytes := make([]byte, 8) - binary.BigEndian.PutUint64(bytes, number) - return bytes -} - -func decodeToUint64(bytes []byte) uint64 { - return binary.BigEndian.Uint64(bytes) -} - -func (blockchain *blockchain) String() string { - var buffer bytes.Buffer - size := blockchain.getSize() - for i := uint64(0); i < size; i++ { - block, blockErr := blockchain.getBlock(i) - if blockErr != nil { - return "" - } - buffer.WriteString("\n--------------------\n") - buffer.WriteString(block.String()) - buffer.WriteString("\n----------<\\block #") - buffer.WriteString(strconv.FormatUint(i, 10)) - buffer.WriteString(">----------\n") - } - return buffer.String() -} diff --git a/core/ledger/blockchain_indexes.go b/core/ledger/blockchain_indexes.go deleted file mode 100644 index 526393993b8..00000000000 --- a/core/ledger/blockchain_indexes.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ledger - -import ( - "fmt" - - "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/protos" - "github.com/op/go-logging" - "github.com/tecbot/gorocksdb" -) - -var indexLogger = logging.MustGetLogger("indexes") -var prefixBlockHashKey = byte(1) -var prefixTxIDKey = byte(2) -var prefixAddressBlockNumCompositeKey = byte(3) - -type blockchainIndexer interface { - isSynchronous() bool - start(blockchain *blockchain) error - createIndexes(block *protos.Block, blockNumber uint64, blockHash []byte, writeBatch *gorocksdb.WriteBatch) error - fetchBlockNumberByBlockHash(blockHash []byte) (uint64, error) - fetchTransactionIndexByID(txID string) (uint64, uint64, error) - stop() -} - -// Implementation for sync indexer -type blockchainIndexerSync struct { -} - -func newBlockchainIndexerSync() *blockchainIndexerSync { - return &blockchainIndexerSync{} -} - -func (indexer *blockchainIndexerSync) isSynchronous() bool { - return true -} - -func (indexer *blockchainIndexerSync) start(blockchain *blockchain) error { - return nil -} - -func (indexer *blockchainIndexerSync) createIndexes( - block *protos.Block, blockNumber uint64, blockHash []byte, writeBatch *gorocksdb.WriteBatch) error { - return addIndexDataForPersistence(block, blockNumber, blockHash, writeBatch) -} - -func (indexer *blockchainIndexerSync) fetchBlockNumberByBlockHash(blockHash []byte) (uint64, error) { - return fetchBlockNumberByBlockHashFromDB(blockHash) -} - -func (indexer *blockchainIndexerSync) fetchTransactionIndexByID(txID string) (uint64, uint64, error) { - return fetchTransactionIndexByIDFromDB(txID) -} - -func (indexer *blockchainIndexerSync) stop() { - return -} - -// Functions for persisting and retrieving index data -func addIndexDataForPersistence(block *protos.Block, blockNumber uint64, blockHash []byte, writeBatch *gorocksdb.WriteBatch) error { - openchainDB := db.GetDBHandle() - cf := openchainDB.IndexesCF - - // add blockhash -> blockNumber - indexLogger.Debugf("Indexing block number [%d] by hash = [%x]", blockNumber, blockHash) - writeBatch.PutCF(cf, encodeBlockHashKey(blockHash), encodeBlockNumber(blockNumber)) - - addressToTxIndexesMap := make(map[string][]uint64) - addressToChaincodeIDsMap := make(map[string][]*protos.ChaincodeID) - - transactions := block.GetTransactions() - for txIndex, tx := range transactions { - // add TxID -> (blockNumber,indexWithinBlock) - writeBatch.PutCF(cf, encodeTxIDKey(tx.Txid), encodeBlockNumTxIndex(blockNumber, uint64(txIndex))) - - txExecutingAddress := getTxExecutingAddress(tx) - addressToTxIndexesMap[txExecutingAddress] = append(addressToTxIndexesMap[txExecutingAddress], uint64(txIndex)) - - switch tx.Type { - case protos.Transaction_CHAINCODE_DEPLOY, protos.Transaction_CHAINCODE_INVOKE: - authroizedAddresses, chaincodeID := getAuthorisedAddresses(tx) - for _, authroizedAddress := range authroizedAddresses { - addressToChaincodeIDsMap[authroizedAddress] = append(addressToChaincodeIDsMap[authroizedAddress], chaincodeID) - } - } - } - for address, txsIndexes := range addressToTxIndexesMap { - writeBatch.PutCF(cf, encodeAddressBlockNumCompositeKey(address, blockNumber), encodeListTxIndexes(txsIndexes)) - } - return nil -} - -func fetchBlockNumberByBlockHashFromDB(blockHash []byte) (uint64, error) { - indexLogger.Debugf("fetchBlockNumberByBlockHashFromDB() for blockhash [%x]", blockHash) - blockNumberBytes, err := db.GetDBHandle().GetFromIndexesCF(encodeBlockHashKey(blockHash)) - if err != nil { - return 0, err - } - indexLogger.Debugf("blockNumberBytes for blockhash [%x] is [%x]", blockHash, blockNumberBytes) - if len(blockNumberBytes) == 0 { - return 0, newLedgerError(ErrorTypeBlockNotFound, fmt.Sprintf("No block indexed with block hash [%x]", blockHash)) - } - blockNumber := decodeBlockNumber(blockNumberBytes) - return blockNumber, nil -} - -func fetchTransactionIndexByIDFromDB(txID string) (uint64, uint64, error) { - blockNumTxIndexBytes, err := db.GetDBHandle().GetFromIndexesCF(encodeTxIDKey(txID)) - if err != nil { - return 0, 0, err - } - if blockNumTxIndexBytes == nil { - return 0, 0, ErrResourceNotFound - } - return decodeBlockNumTxIndex(blockNumTxIndexBytes) -} - -func getTxExecutingAddress(tx *protos.Transaction) string { - // TODO Fetch address form tx - return "address1" -} - -func getAuthorisedAddresses(tx *protos.Transaction) ([]string, *protos.ChaincodeID) { - // TODO fetch address from chaincode deployment tx - // TODO this method should also return error - data := tx.ChaincodeID - cID := &protos.ChaincodeID{} - err := proto.Unmarshal(data, cID) - if err != nil { - return nil, nil - } - return []string{"address1", "address2"}, cID -} - -// functions for encoding/decoding db keys/values for index data -// encode / decode BlockNumber -func encodeBlockNumber(blockNumber uint64) []byte { - return proto.EncodeVarint(blockNumber) -} - -func decodeBlockNumber(blockNumberBytes []byte) (blockNumber uint64) { - blockNumber, _ = proto.DecodeVarint(blockNumberBytes) - return -} - -// encode / decode BlockNumTxIndex -func encodeBlockNumTxIndex(blockNumber uint64, txIndexInBlock uint64) []byte { - b := proto.NewBuffer([]byte{}) - b.EncodeVarint(blockNumber) - b.EncodeVarint(txIndexInBlock) - return b.Bytes() -} - -func decodeBlockNumTxIndex(bytes []byte) (blockNum uint64, txIndex uint64, err error) { - b := proto.NewBuffer(bytes) - blockNum, err = b.DecodeVarint() - if err != nil { - return - } - txIndex, err = b.DecodeVarint() - if err != nil { - return - } - return -} - -// encode BlockHashKey -func encodeBlockHashKey(blockHash []byte) []byte { - return prependKeyPrefix(prefixBlockHashKey, blockHash) -} - -// encode TxIDKey -func encodeTxIDKey(txID string) []byte { - return prependKeyPrefix(prefixTxIDKey, []byte(txID)) -} - -func encodeAddressBlockNumCompositeKey(address string, blockNumber uint64) []byte { - b := proto.NewBuffer([]byte{prefixAddressBlockNumCompositeKey}) - b.EncodeRawBytes([]byte(address)) - b.EncodeVarint(blockNumber) - return b.Bytes() -} - -func encodeListTxIndexes(listTx []uint64) []byte { - b := proto.NewBuffer([]byte{}) - for i := range listTx { - b.EncodeVarint(listTx[i]) - } - return b.Bytes() -} - -func prependKeyPrefix(prefix byte, key []byte) []byte { - modifiedKey := []byte{} - modifiedKey = append(modifiedKey, prefix) - modifiedKey = append(modifiedKey, key...) - return modifiedKey -} diff --git a/core/ledger/blockchain_indexes_async.go b/core/ledger/blockchain_indexes_async.go deleted file mode 100644 index 53f1a1b178a..00000000000 --- a/core/ledger/blockchain_indexes_async.go +++ /dev/null @@ -1,324 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ledger - -import ( - "fmt" - "sync" - - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/protos" - "github.com/tecbot/gorocksdb" -) - -var lastIndexedBlockKey = []byte{byte(0)} - -type blockWrapper struct { - block *protos.Block - blockNumber uint64 - blockHash []byte - stopNow bool -} - -type blockchainIndexerAsync struct { - blockchain *blockchain - // Channel for transferring block from block chain for indexing - blockChan chan blockWrapper - indexerState *blockchainIndexerState -} - -func newBlockchainIndexerAsync() *blockchainIndexerAsync { - return new(blockchainIndexerAsync) -} - -func (indexer *blockchainIndexerAsync) isSynchronous() bool { - return false -} - -func (indexer *blockchainIndexerAsync) start(blockchain *blockchain) error { - indexer.blockchain = blockchain - indexerState, err := newBlockchainIndexerState(indexer) - if err != nil { - return err - } - indexer.indexerState = indexerState - indexLogger.Debugf("staring indexer, lastIndexedBlockNum = [%d]", - indexer.indexerState.getLastIndexedBlockNumber()) - - err = indexer.indexPendingBlocks() - if err != nil { - return err - } - indexLogger.Debugf("staring indexer, lastIndexedBlockNum = [%d] after processing pending blocks", - indexer.indexerState.getLastIndexedBlockNumber()) - indexer.blockChan = make(chan blockWrapper) - go func() { - for { - indexLogger.Debug("Going to wait on channel for next block to index") - blockWrapper := <-indexer.blockChan - - indexLogger.Debugf("Blockwrapper received on channel: block number = [%d]", blockWrapper.blockNumber) - - if blockWrapper.stopNow { - indexLogger.Debug("stop command received on channel") - indexer.blockChan <- blockWrapper - return - } - if indexer.indexerState.hasError() { - indexLogger.Debugf("Not indexing block number [%d]. Because of previous error: %s.", - blockWrapper.blockNumber, indexer.indexerState.getError()) - continue - } - - err := indexer.createIndexesInternal(blockWrapper.block, blockWrapper.blockNumber, blockWrapper.blockHash) - if err != nil { - indexer.indexerState.setError(err) - indexLogger.Debugf( - "Error occured while indexing block number [%d]. Error: %s. Further blocks will not be indexed", - blockWrapper.blockNumber, err) - - } else { - indexLogger.Debugf("Finished indexing block number [%d]", blockWrapper.blockNumber) - } - } - }() - return nil -} - -func (indexer *blockchainIndexerAsync) createIndexes(block *protos.Block, blockNumber uint64, blockHash []byte, writeBatch *gorocksdb.WriteBatch) error { - indexer.blockChan <- blockWrapper{block, blockNumber, blockHash, false} - return nil -} - -// createIndexes adds entries into db for creating indexes on various attributes -func (indexer *blockchainIndexerAsync) createIndexesInternal(block *protos.Block, blockNumber uint64, blockHash []byte) error { - openchainDB := db.GetDBHandle() - writeBatch := gorocksdb.NewWriteBatch() - defer writeBatch.Destroy() - addIndexDataForPersistence(block, blockNumber, blockHash, writeBatch) - writeBatch.PutCF(openchainDB.IndexesCF, lastIndexedBlockKey, encodeBlockNumber(blockNumber)) - opt := gorocksdb.NewDefaultWriteOptions() - defer opt.Destroy() - err := openchainDB.DB.Write(opt, writeBatch) - if err != nil { - return err - } - indexer.indexerState.blockIndexed(blockNumber) - return nil -} - -func (indexer *blockchainIndexerAsync) fetchBlockNumberByBlockHash(blockHash []byte) (uint64, error) { - err := indexer.indexerState.checkError() - if err != nil { - indexLogger.Debug("Async indexer has a previous error. Returing the error") - return 0, err - } - indexer.indexerState.waitForLastCommittedBlock() - return fetchBlockNumberByBlockHashFromDB(blockHash) -} - -func (indexer *blockchainIndexerAsync) fetchTransactionIndexByID(txID string) (uint64, uint64, error) { - err := indexer.indexerState.checkError() - if err != nil { - return 0, 0, err - } - indexer.indexerState.waitForLastCommittedBlock() - return fetchTransactionIndexByIDFromDB(txID) -} - -func (indexer *blockchainIndexerAsync) indexPendingBlocks() error { - blockchain := indexer.blockchain - if blockchain.getSize() == 0 { - // chain is empty as yet - return nil - } - - lastCommittedBlockNum := blockchain.getSize() - 1 - lastIndexedBlockNum := indexer.indexerState.getLastIndexedBlockNumber() - zerothBlockIndexed := indexer.indexerState.isZerothBlockIndexed() - - indexLogger.Debugf("lastCommittedBlockNum=[%d], lastIndexedBlockNum=[%d], zerothBlockIndexed=[%t]", - lastCommittedBlockNum, lastIndexedBlockNum, zerothBlockIndexed) - - // block numbers use uint64 - so, 'lastIndexedBlockNum = 0' is ambiguous. - // So, explicitly checking whether zero-th block has been indexed - if !zerothBlockIndexed { - err := indexer.fetchBlockFromDBAndCreateIndexes(0) - if err != nil { - return err - } - } - - if lastCommittedBlockNum == lastIndexedBlockNum { - // all committed blocks are indexed - return nil - } - - for ; lastIndexedBlockNum < lastCommittedBlockNum; lastIndexedBlockNum++ { - blockNumToIndex := lastIndexedBlockNum + 1 - err := indexer.fetchBlockFromDBAndCreateIndexes(blockNumToIndex) - if err != nil { - return err - } - } - return nil -} - -func (indexer *blockchainIndexerAsync) fetchBlockFromDBAndCreateIndexes(blockNumber uint64) error { - blockchain := indexer.blockchain - blockToIndex, errBlockFetch := blockchain.getBlock(blockNumber) - if errBlockFetch != nil { - return errBlockFetch - } - - blockHash, errBlockHash := blockToIndex.GetHash() - if errBlockHash != nil { - return errBlockHash - } - indexer.createIndexesInternal(blockToIndex, blockNumber, blockHash) - return nil -} - -func (indexer *blockchainIndexerAsync) stop() { - indexer.indexerState.waitForLastCommittedBlock() - indexer.blockChan <- blockWrapper{nil, 0, nil, true} - <-indexer.blockChan - close(indexer.blockChan) -} - -// Code related to tracking the block number that has been indexed -// and if there has been an error in indexing a block -// Since, we index blocks asynchronously, there may be a case when -// a client query arrives before a block has been indexed. -// -// Do we really need strict semantics such that an index query results -// should include up to block number (or higher) that may have been committed -// when user query arrives? -// If a delay of a couple of blocks are allowed, we can get rid of this synchronization stuff -type blockchainIndexerState struct { - indexer *blockchainIndexerAsync - - zerothBlockIndexed bool - lastBlockIndexed uint64 - err error - lock *sync.RWMutex - newBlockIndexed *sync.Cond -} - -func newBlockchainIndexerState(indexer *blockchainIndexerAsync) (*blockchainIndexerState, error) { - var lock sync.RWMutex - zerothBlockIndexed, lastIndexedBlockNum, err := fetchLastIndexedBlockNumFromDB() - if err != nil { - return nil, err - } - return &blockchainIndexerState{indexer, zerothBlockIndexed, lastIndexedBlockNum, nil, &lock, sync.NewCond(&lock)}, nil -} - -func (indexerState *blockchainIndexerState) blockIndexed(blockNumber uint64) { - indexerState.newBlockIndexed.L.Lock() - defer indexerState.newBlockIndexed.L.Unlock() - indexerState.lastBlockIndexed = blockNumber - indexerState.zerothBlockIndexed = true - indexerState.newBlockIndexed.Broadcast() -} - -func (indexerState *blockchainIndexerState) getLastIndexedBlockNumber() uint64 { - indexerState.lock.RLock() - defer indexerState.lock.RUnlock() - return indexerState.lastBlockIndexed -} - -func (indexerState *blockchainIndexerState) isZerothBlockIndexed() bool { - indexerState.lock.RLock() - defer indexerState.lock.RUnlock() - return indexerState.zerothBlockIndexed -} - -func (indexerState *blockchainIndexerState) waitForLastCommittedBlock() error { - indexLogger.Debugf("waitForLastCommittedBlock() indexerState.err = %#v", indexerState.err) - chain := indexerState.indexer.blockchain - indexerState.lock.Lock() - defer indexerState.lock.Unlock() - if indexerState.err != nil { - return indexerState.err - } - - if chain.getSize() == 0 { - return nil - } - - lastBlockCommitted := chain.getSize() - 1 - - if !indexerState.zerothBlockIndexed { - indexLogger.Debugf( - "Waiting for zeroth block to be indexed. lastBlockCommitted=[%d] and lastBlockIndexed=[%d]", - lastBlockCommitted, indexerState.lastBlockIndexed) - indexerState.newBlockIndexed.Wait() - } - - for indexerState.lastBlockIndexed < lastBlockCommitted && indexerState.err == nil { - indexLogger.Debugf( - "Waiting for index to catch up with block chain. lastBlockCommitted=[%d] and lastBlockIndexed=[%d]", - lastBlockCommitted, indexerState.lastBlockIndexed) - indexerState.newBlockIndexed.Wait() - } - return indexerState.err -} - -func (indexerState *blockchainIndexerState) setError(err error) { - indexerState.lock.Lock() - defer indexerState.lock.Unlock() - indexerState.err = err - indexLogger.Debugf("setError() indexerState.err = %#v", indexerState.err) - indexerState.newBlockIndexed.Broadcast() -} - -func (indexerState *blockchainIndexerState) hasError() bool { - indexerState.lock.RLock() - defer indexerState.lock.RUnlock() - return indexerState.err != nil -} - -func (indexerState *blockchainIndexerState) getError() error { - indexerState.lock.RLock() - defer indexerState.lock.RUnlock() - return indexerState.err -} - -func (indexerState *blockchainIndexerState) checkError() error { - indexerState.lock.RLock() - defer indexerState.lock.RUnlock() - if indexerState.err != nil { - return fmt.Errorf( - "An error had occured during indexing block number [%d]. So, index is out of sync. Detail of the error = %s", - indexerState.getLastIndexedBlockNumber()+1, indexerState.err) - } - return indexerState.err -} - -func fetchLastIndexedBlockNumFromDB() (zerothBlockIndexed bool, lastIndexedBlockNum uint64, err error) { - lastIndexedBlockNumberBytes, err := db.GetDBHandle().GetFromIndexesCF(lastIndexedBlockKey) - if err != nil { - return - } - if lastIndexedBlockNumberBytes == nil { - return - } - lastIndexedBlockNum = decodeBlockNumber(lastIndexedBlockNumberBytes) - zerothBlockIndexed = true - return -} diff --git a/core/ledger/genesis/genesis.go b/core/ledger/genesis/genesis.go deleted file mode 100644 index 340125a8115..00000000000 --- a/core/ledger/genesis/genesis.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genesis - -import ( - "sync" - - "github.com/hyperledger/fabric/core/ledger" - "github.com/op/go-logging" -) - -var genesisLogger = logging.MustGetLogger("genesis") - -var makeGenesisError error -var once sync.Once - -// MakeGenesis creates the genesis block and adds it to the blockchain. -func MakeGenesis() error { - once.Do(func() { - ledger, err := ledger.GetLedger() - if err != nil { - makeGenesisError = err - return - } - - if ledger.GetBlockchainSize() == 0 { - genesisLogger.Info("Creating genesis block.") - if makeGenesisError = ledger.BeginTxBatch(0); makeGenesisError == nil { - makeGenesisError = ledger.CommitTxBatch(0, nil, nil, nil) - } - } - }) - return makeGenesisError -} diff --git a/core/ledger/genesis/genesis_test.yaml b/core/ledger/genesis/genesis_test.yaml deleted file mode 100644 index b78bbcc77fd..00000000000 --- a/core/ledger/genesis/genesis_test.yaml +++ /dev/null @@ -1,211 +0,0 @@ ---- -############################################################################### -# -# CLI section -# -############################################################################### -cli: - - # The address that the cli process will use for callbacks from chaincodes - address: 0.0.0.0:7052 - - - -############################################################################### -# -# REST section -# -############################################################################### -rest: - - # The address that the REST service will listen on for incoming requests. - address: 0.0.0.0:7050 - - - -############################################################################### -# -# Peer section -# -############################################################################### -peer: - - # Peeer Version following version semantics as described here http://semver.org/ - # The Peer supplies this version in communications with other Peers - version: 0.1.0 - - # The Peer id is used for identifying this Peer instance. - id: jdoe - - # The privateKey to be used by this peer - privateKey: 794ef087680e2494fa4918fd8fb80fb284b50b57d321a31423fe42b9ccf6216047cea0b66fe8365a8e3f2a8140c6866cc45852e63124668bee1daa9c97da0c2a - - # The networkId allows for logical seperation of networks - # networkId: dev - # networkId: test - networkId: dev - - # The Address this Peer will bind to for providing services - address: 0.0.0.0:7051 - # Whether the Peer should programmatically determine the address to bind to. This case is useful for docker containers. - addressAutoDetect: false - - - # Logging settings - logging: - # Logging level, can be one of [error|warning|info|debug] - # One of: CRITICAL | ERROR | WARNING | NOTICE | INFO | DEBUG - level: DEBUG - - # Peer port to accept connections on - port: 7051 - # Peer's setting for GOMAXPROCS - gomaxprocs: 2 - workers: 2 - - # Validator defines whether this peer is a validating peer or not, and if - # it is enabled, what consensus plugin to load - validator: - enabled: true - # Consensus plugin to use. The value is the name of the plugin; ie bpft, noops - consensus: noops - - # TLS Settings for p2p communications - tls: - enabled: false - cert: - file: testdata/server1.pem - key: - file: testdata/server1.key - # The server name use to verify the hostname returned by TLS handshake - serverhostoverride: - - # Peer discovery settings. Controls how this peer discovers other peers - discovery: - - # The root nodes are used for bootstrapping purposes, and generally supplied through ENV variables - rootnode: - - # The duration of time between attempts to asks peers for their connected peers - period: 5s - - ## leaving this in for example of sub map entry - # testNodes: - # - node : 1 - # ip : 127.0.0.1 - # port : 7051 - # - node : 2 - # ip : 127.0.0.1 - # port : 7051 - - # Should the discovered nodes and their reputations - # be stored in DB and persisted between restarts - persist: true - - # if peer discovery is off - # the peer window will show - # only what retrieved by active - # peer [true/false] - enabled: true - - # number of workers that - # tastes the peers for being - # online [1..10] - workers: 8 - - # the period in seconds with which the discovery - # tries to reconnect to successful nodes - # 0 means the nodes are not reconnected - touchPeriod: 600 - - # the maximum nuber of nodes to reconnect to - # -1 for unlimited - touchMaxNodes: 100 - - # Path on the file system where peer will store data - fileSystemPath: /var/hyperledger/test/genesis_test - -### NOTE: The validator section below is not needed and will be removed - BN -############################################################################### -# -# Validator section -# -############################################################################### -validator: - enabled: false - address: 0.0.0.0:7052 - # TLS Settings for p2p communications - tls: - enabled: false - cert: - file: testdata/server1.pem - key: - file: testdata/server1.key - # The server name use to verify the hostname returned by TLS handshake - serverhostoverride: - # Peer discovery settings. Controls how this peer discovers other peers - discovery: - - # The root nodes are used for bootstrapping purposes, and generally supplied through ENV variables - rootnode: - -############################################################################### -# -# VM section -# -############################################################################### -vm: - - # Endpoint of the vm management system. For docker can be one of the following in general - # unix:///var/run/docker.sock - # http://localhost:2375 - endpoint: unix:///var/run/docker.sock - - -############################################################################### -# -# Chaincode section -# -############################################################################### -chaincode: - - # The id is used by the Chaincode stub to register the executing ChaincodeID with the Peerand is generally supplied through ENV variables - id: - url: - version: - - golang: - - # This is the basis for the Golang Dockerfile. Additional commands will be appended depedendent upon the chaincode specification. - Dockerfile: | - FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) - COPY src $GOPATH/src - WORKDIR $GOPATH - - #timeout for starting up a container and waiting for Register to come through - startuptimeout: 20000 - - #mode - options are "dev", "net" - #dev - in dev mode, user runs the chaincode after starting validator from command line on local machine - #net - in net mode validator will run chaincode in a docker container - - mode: net - - installpath: /opt/gopath/bin/ - -############################################################################### -# -# Ledger section - ledger configuration encompases both the blockchain -# and the state -# -############################################################################### -ledger: - - blockchain: - - state: - - # Control the number state deltas that are maintained. This takes additional - # disk space, but allow the state to be rolled backwards and forwards - # without the need to replay transactions. - deltaHistorySize: 500 diff --git a/core/ledgernext/kvledger/example/app.go b/core/ledger/kvledger/example/app.go similarity index 98% rename from core/ledgernext/kvledger/example/app.go rename to core/ledger/kvledger/example/app.go index 0ea3a83c4cb..6e4ed349c6f 100644 --- a/core/ledgernext/kvledger/example/app.go +++ b/core/ledger/kvledger/example/app.go @@ -20,7 +20,7 @@ import ( "fmt" "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/ledgernext" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/protos" ) diff --git a/core/ledgernext/kvledger/example/committer.go b/core/ledger/kvledger/example/committer.go similarity index 96% rename from core/ledgernext/kvledger/example/committer.go rename to core/ledger/kvledger/example/committer.go index 7cf4b1b7a05..d46431ba584 100644 --- a/core/ledgernext/kvledger/example/committer.go +++ b/core/ledger/kvledger/example/committer.go @@ -17,7 +17,7 @@ limitations under the License. package example import ( - "github.com/hyperledger/fabric/core/ledgernext" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/protos" ) diff --git a/core/ledgernext/kvledger/example/consenter.go b/core/ledger/kvledger/example/consenter.go similarity index 100% rename from core/ledgernext/kvledger/example/consenter.go rename to core/ledger/kvledger/example/consenter.go diff --git a/core/ledgernext/kvledger/example/main/example.go b/core/ledger/kvledger/example/main/example.go similarity index 94% rename from core/ledgernext/kvledger/example/main/example.go rename to core/ledger/kvledger/example/main/example.go index f3caa05d546..f86b9c4ab02 100644 --- a/core/ledgernext/kvledger/example/main/example.go +++ b/core/ledger/kvledger/example/main/example.go @@ -20,14 +20,14 @@ import ( "fmt" "os" - "github.com/hyperledger/fabric/core/ledgernext" - "github.com/hyperledger/fabric/core/ledgernext/kvledger" - "github.com/hyperledger/fabric/core/ledgernext/kvledger/example" + "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/ledger/kvledger" + "github.com/hyperledger/fabric/core/ledger/kvledger/example" "github.com/hyperledger/fabric/protos" ) const ( - ledgerPath = "/tmp/test/ledgernext/kvledger/example" + ledgerPath = "/tmp/test/ledger/kvledger/example" ) var finalLedger ledger.ValidatedLedger diff --git a/core/ledgernext/kvledger/kv_ledger.go b/core/ledger/kvledger/kv_ledger.go similarity index 93% rename from core/ledgernext/kvledger/kv_ledger.go rename to core/ledger/kvledger/kv_ledger.go index 2e39a1d17cf..48901a91a33 100644 --- a/core/ledgernext/kvledger/kv_ledger.go +++ b/core/ledger/kvledger/kv_ledger.go @@ -21,11 +21,11 @@ import ( "fmt" "strings" - "github.com/hyperledger/fabric/core/ledgernext" - "github.com/hyperledger/fabric/core/ledgernext/blkstorage" - "github.com/hyperledger/fabric/core/ledgernext/blkstorage/fsblkstorage" - "github.com/hyperledger/fabric/core/ledgernext/kvledger/txmgmt" - "github.com/hyperledger/fabric/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt" + "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/ledger/blkstorage" + "github.com/hyperledger/fabric/core/ledger/blkstorage/fsblkstorage" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/lockbasedtxmgmt" "github.com/hyperledger/fabric/protos" ) @@ -47,7 +47,7 @@ func NewConf(filesystemPath string, maxBlockfileSize int) *Conf { return &Conf{blocksStorageDir, maxBlockfileSize, txMgrDBPath} } -// KVLedger provides an implementation of `ledgernext.ValidatedLedger`. +// KVLedger provides an implementation of `ledger.ValidatedLedger`. // This implementation provides a key-value based data model type KVLedger struct { blockStore blkstorage.BlockStore diff --git a/core/ledgernext/kvledger/kv_ledger_test.go b/core/ledger/kvledger/kv_ledger_test.go similarity index 97% rename from core/ledgernext/kvledger/kv_ledger_test.go rename to core/ledger/kvledger/kv_ledger_test.go index e16f06f9bb2..a50acebff52 100644 --- a/core/ledgernext/kvledger/kv_ledger_test.go +++ b/core/ledger/kvledger/kv_ledger_test.go @@ -19,7 +19,7 @@ package kvledger import ( "testing" - "github.com/hyperledger/fabric/core/ledgernext/testutil" + "github.com/hyperledger/fabric/core/ledger/testutil" "github.com/hyperledger/fabric/protos" ) diff --git a/core/ledgernext/kvledger/kv_ledgers.go b/core/ledger/kvledger/kv_ledgers.go similarity index 100% rename from core/ledgernext/kvledger/kv_ledgers.go rename to core/ledger/kvledger/kv_ledgers.go diff --git a/core/ledgernext/kvledger/kv_ledgers_test.go b/core/ledger/kvledger/kv_ledgers_test.go similarity index 100% rename from core/ledgernext/kvledger/kv_ledgers_test.go rename to core/ledger/kvledger/kv_ledgers_test.go diff --git a/core/ledgernext/kvledger/pkg_test.go b/core/ledger/kvledger/pkg_test.go similarity index 95% rename from core/ledgernext/kvledger/pkg_test.go rename to core/ledger/kvledger/pkg_test.go index 72aec7401a4..d3091c453a8 100644 --- a/core/ledgernext/kvledger/pkg_test.go +++ b/core/ledger/kvledger/pkg_test.go @@ -27,7 +27,7 @@ type testEnv struct { } func newTestEnv(t testing.TB) *testEnv { - conf := NewConf("/tmp/tests/ledgernext/", 0) + conf := NewConf("/tmp/tests/ledger/", 0) os.RemoveAll(conf.blockStorageDir) os.RemoveAll(conf.txMgrDBPath) return &testEnv{conf, t} diff --git a/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_query_executer.go b/core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_query_executer.go similarity index 97% rename from core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_query_executer.go rename to core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_query_executer.go index ece3469a5d3..ddc91b3a0b0 100644 --- a/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_query_executer.go +++ b/core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_query_executer.go @@ -19,7 +19,7 @@ package lockbasedtxmgmt import ( "errors" - "github.com/hyperledger/fabric/core/ledgernext" + "github.com/hyperledger/fabric/core/ledger" ) // RWLockQueryExecutor is a query executor used in `LockBasedTxMgr` diff --git a/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_tx_simulator.go b/core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_tx_simulator.go similarity index 98% rename from core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_tx_simulator.go rename to core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_tx_simulator.go index 0a230530a66..b77866a7b03 100644 --- a/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_tx_simulator.go +++ b/core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_tx_simulator.go @@ -20,7 +20,7 @@ import ( "errors" "reflect" - "github.com/hyperledger/fabric/core/ledgernext/kvledger/txmgmt" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt" ) type kvReadCache struct { diff --git a/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgmt_test.go b/core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgmt_test.go similarity index 99% rename from core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgmt_test.go rename to core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgmt_test.go index 193133bbcbd..8f997b0c0ba 100644 --- a/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgmt_test.go +++ b/core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgmt_test.go @@ -20,7 +20,7 @@ import ( "fmt" "testing" - "github.com/hyperledger/fabric/core/ledgernext/testutil" + "github.com/hyperledger/fabric/core/ledger/testutil" ) func TestTxSimulatorWithNoExistingData(t *testing.T) { diff --git a/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgr.go b/core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgr.go similarity index 98% rename from core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgr.go rename to core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgr.go index cdc478dcfdc..ed8999dce32 100644 --- a/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgr.go +++ b/core/ledger/kvledger/txmgmt/lockbasedtxmgmt/lockbased_txmgr.go @@ -21,9 +21,9 @@ import ( "sync" "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/ledgernext" - "github.com/hyperledger/fabric/core/ledgernext/kvledger/txmgmt" - "github.com/hyperledger/fabric/core/ledgernext/util/db" + "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt" + "github.com/hyperledger/fabric/core/ledger/util/db" "github.com/hyperledger/fabric/protos" "github.com/op/go-logging" "github.com/tecbot/gorocksdb" diff --git a/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/pkg_test.go b/core/ledger/kvledger/txmgmt/lockbasedtxmgmt/pkg_test.go similarity index 91% rename from core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/pkg_test.go rename to core/ledger/kvledger/txmgmt/lockbasedtxmgmt/pkg_test.go index 33cc42e0706..1e6350948bb 100644 --- a/core/ledgernext/kvledger/txmgmt/lockbasedtxmgmt/pkg_test.go +++ b/core/ledger/kvledger/txmgmt/lockbasedtxmgmt/pkg_test.go @@ -26,7 +26,7 @@ type testEnv struct { } func newTestEnv(t testing.TB) *testEnv { - conf := &Conf{"/tmp/tests/ledgernext/kvledger/txmgmt/lockbasedtxmgmt"} + conf := &Conf{"/tmp/tests/ledger/kvledger/txmgmt/lockbasedtxmgmt"} os.RemoveAll(conf.DBPath) return &testEnv{conf} } diff --git a/core/ledgernext/kvledger/txmgmt/rwset.go b/core/ledger/kvledger/txmgmt/rwset.go similarity index 100% rename from core/ledgernext/kvledger/txmgmt/rwset.go rename to core/ledger/kvledger/txmgmt/rwset.go diff --git a/core/ledgernext/kvledger/txmgmt/rwset_test.go b/core/ledger/kvledger/txmgmt/rwset_test.go similarity index 96% rename from core/ledgernext/kvledger/txmgmt/rwset_test.go rename to core/ledger/kvledger/txmgmt/rwset_test.go index 151145ba5b4..1d535573117 100644 --- a/core/ledgernext/kvledger/txmgmt/rwset_test.go +++ b/core/ledger/kvledger/txmgmt/rwset_test.go @@ -19,7 +19,7 @@ package txmgmt import ( "testing" - "github.com/hyperledger/fabric/core/ledgernext/testutil" + "github.com/hyperledger/fabric/core/ledger/testutil" ) func TestTxRWSetMarshalUnmarshal(t *testing.T) { diff --git a/core/ledgernext/kvledger/txmgmt/txmgmt.go b/core/ledger/kvledger/txmgmt/txmgmt.go similarity index 95% rename from core/ledgernext/kvledger/txmgmt/txmgmt.go rename to core/ledger/kvledger/txmgmt/txmgmt.go index cabab8cacc2..684a34ff88f 100644 --- a/core/ledgernext/kvledger/txmgmt/txmgmt.go +++ b/core/ledger/kvledger/txmgmt/txmgmt.go @@ -17,7 +17,7 @@ limitations under the License. package txmgmt import ( - "github.com/hyperledger/fabric/core/ledgernext" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/protos" ) diff --git a/core/ledger/ledger.go b/core/ledger/ledger.go deleted file mode 100644 index 760d18b48bc..00000000000 --- a/core/ledger/ledger.go +++ /dev/null @@ -1,531 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ledger - -import ( - "bytes" - "fmt" - "reflect" - "sync" - - "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/hyperledger/fabric/core/ledger/statemgmt/state" - "github.com/hyperledger/fabric/events/producer" - "github.com/op/go-logging" - "github.com/tecbot/gorocksdb" - - "github.com/hyperledger/fabric/protos" - "golang.org/x/net/context" -) - -var ledgerLogger = logging.MustGetLogger("ledger") - -//ErrorType represents the type of a ledger error -type ErrorType string - -const ( - //ErrorTypeInvalidArgument used to indicate the invalid input to ledger method - ErrorTypeInvalidArgument = ErrorType("InvalidArgument") - //ErrorTypeOutOfBounds used to indicate that a request is out of bounds - ErrorTypeOutOfBounds = ErrorType("OutOfBounds") - //ErrorTypeResourceNotFound used to indicate if a resource is not found - ErrorTypeResourceNotFound = ErrorType("ResourceNotFound") - //ErrorTypeBlockNotFound used to indicate if a block is not found when looked up by it's hash - ErrorTypeBlockNotFound = ErrorType("ErrorTypeBlockNotFound") -) - -//Error can be used for throwing an error from ledger code. -type Error struct { - errType ErrorType - msg string -} - -func (ledgerError *Error) Error() string { - return fmt.Sprintf("LedgerError - %s: %s", ledgerError.errType, ledgerError.msg) -} - -//Type returns the type of the error -func (ledgerError *Error) Type() ErrorType { - return ledgerError.errType -} - -func newLedgerError(errType ErrorType, msg string) *Error { - return &Error{errType, msg} -} - -var ( - // ErrOutOfBounds is returned if a request is out of bounds - ErrOutOfBounds = newLedgerError(ErrorTypeOutOfBounds, "ledger: out of bounds") - - // ErrResourceNotFound is returned if a resource is not found - ErrResourceNotFound = newLedgerError(ErrorTypeResourceNotFound, "ledger: resource not found") -) - -// Ledger - the struct for openchain ledger -type Ledger struct { - blockchain *blockchain - state *state.State - currentID interface{} -} - -var ledger *Ledger -var ledgerError error -var once sync.Once - -// GetLedger - gives a reference to a 'singleton' ledger -func GetLedger() (*Ledger, error) { - panic("----DONT ACCESS OLD LEDGER----") -} - -// GetNewLedger - gives a reference to a new ledger TODO need better approach -func GetNewLedger() (*Ledger, error) { - blockchain, err := newBlockchain() - if err != nil { - return nil, err - } - - state := state.NewState() - return &Ledger{blockchain, state, nil}, nil -} - -/////////////////// Transaction-batch related methods /////////////////////////////// -///////////////////////////////////////////////////////////////////////////////////// - -// BeginTxBatch - gets invoked when next round of transaction-batch execution begins -func (ledger *Ledger) BeginTxBatch(id interface{}) error { - err := ledger.checkValidIDBegin() - if err != nil { - return err - } - ledger.currentID = id - return nil -} - -// GetTXBatchPreviewBlockInfo returns a preview block info that will -// contain the same information as GetBlockchainInfo will return after -// ledger.CommitTxBatch is called with the same parameters. If the -// state is modified by a transaction between these two calls, the -// contained hash will be different. -func (ledger *Ledger) GetTXBatchPreviewBlockInfo(id interface{}, - transactions []*protos.Transaction, metadata []byte) (*protos.BlockchainInfo, error) { - err := ledger.checkValidIDCommitORRollback(id) - if err != nil { - return nil, err - } - stateHash, err := ledger.state.GetHash() - if err != nil { - return nil, err - } - block := ledger.blockchain.buildBlock(protos.NewBlock(transactions, metadata), stateHash) - info := ledger.blockchain.getBlockchainInfoForBlock(ledger.blockchain.getSize()+1, block) - return info, nil -} - -// CommitTxBatch - gets invoked when the current transaction-batch needs to be committed -// This function returns successfully iff the transactions details and state changes (that -// may have happened during execution of this transaction-batch) have been committed to permanent storage -func (ledger *Ledger) CommitTxBatch(id interface{}, transactions []*protos.Transaction, transactionResults []*protos.TransactionResult, metadata []byte) error { - err := ledger.checkValidIDCommitORRollback(id) - if err != nil { - return err - } - - stateHash, err := ledger.state.GetHash() - if err != nil { - ledger.resetForNextTxGroup(false) - ledger.blockchain.blockPersistenceStatus(false) - return err - } - - writeBatch := gorocksdb.NewWriteBatch() - defer writeBatch.Destroy() - block := protos.NewBlock(transactions, metadata) - - ccEvents := []*protos.ChaincodeEvent{} - - if transactionResults != nil { - ccEvents = make([]*protos.ChaincodeEvent, len(transactionResults)) - for i := 0; i < len(transactionResults); i++ { - if transactionResults[i].ChaincodeEvent != nil { - ccEvents[i] = transactionResults[i].ChaincodeEvent - } else { - //We need the index so we can map the chaincode - //event to the transaction that generated it. - //Hence need an entry for cc event even if one - //wasn't generated for the transaction. We cannot - //use a nil cc event as protobuf does not like - //elements of a repeated array to be nil. - // - //We should discard empty events without chaincode - //ID when sending out events. - ccEvents[i] = &protos.ChaincodeEvent{} - } - } - } - - //store chaincode events directly in NonHashData. This will likely change in New Consensus where we can move them to Transaction - block.NonHashData = &protos.NonHashData{ChaincodeEvents: ccEvents} - newBlockNumber, err := ledger.blockchain.addPersistenceChangesForNewBlock(context.TODO(), block, stateHash, writeBatch) - if err != nil { - ledger.resetForNextTxGroup(false) - ledger.blockchain.blockPersistenceStatus(false) - return err - } - ledger.state.AddChangesForPersistence(newBlockNumber, writeBatch) - opt := gorocksdb.NewDefaultWriteOptions() - defer opt.Destroy() - dbErr := db.GetDBHandle().DB.Write(opt, writeBatch) - if dbErr != nil { - ledger.resetForNextTxGroup(false) - ledger.blockchain.blockPersistenceStatus(false) - return dbErr - } - - ledger.resetForNextTxGroup(true) - ledger.blockchain.blockPersistenceStatus(true) - - sendProducerBlockEvent(block) - - //send chaincode events from transaction results - sendChaincodeEvents(transactionResults) - - if len(transactionResults) != 0 { - ledgerLogger.Debug("There were some erroneous transactions. We need to send a 'TX rejected' message here.") - } - return nil -} - -// RollbackTxBatch - Discards all the state changes that may have taken place during the execution of -// current transaction-batch -func (ledger *Ledger) RollbackTxBatch(id interface{}) error { - ledgerLogger.Debugf("RollbackTxBatch for id = [%s]", id) - err := ledger.checkValidIDCommitORRollback(id) - if err != nil { - return err - } - ledger.resetForNextTxGroup(false) - return nil -} - -// TxBegin - Marks the begin of a new transaction in the ongoing batch -func (ledger *Ledger) TxBegin(txID string) { - ledger.state.TxBegin(txID) -} - -// TxFinished - Marks the finish of the on-going transaction. -// If txSuccessful is false, the state changes made by the transaction are discarded -func (ledger *Ledger) TxFinished(txID string, txSuccessful bool) { - ledger.state.TxFinish(txID, txSuccessful) -} - -/////////////////// world-state related methods ///////////////////////////////////// -///////////////////////////////////////////////////////////////////////////////////// - -// GetTempStateHash - Computes state hash by taking into account the state changes that may have taken -// place during the execution of current transaction-batch -func (ledger *Ledger) GetTempStateHash() ([]byte, error) { - return ledger.state.GetHash() -} - -// GetTempStateHashWithTxDeltaStateHashes - In addition to the state hash (as defined in method GetTempStateHash), -// this method returns a map [txUuid of Tx --> cryptoHash(stateChangesMadeByTx)] -// Only successful txs appear in this map -func (ledger *Ledger) GetTempStateHashWithTxDeltaStateHashes() ([]byte, map[string][]byte, error) { - stateHash, err := ledger.state.GetHash() - return stateHash, ledger.state.GetTxStateDeltaHash(), err -} - -// GetState get state for chaincodeID and key. If committed is false, this first looks in memory -// and if missing, pulls from db. If committed is true, this pulls from the db only. -func (ledger *Ledger) GetState(chaincodeID string, key string, committed bool) ([]byte, error) { - return ledger.state.Get(chaincodeID, key, committed) -} - -// GetStateRangeScanIterator returns an iterator to get all the keys (and values) between startKey and endKey -// (assuming lexical order of the keys) for a chaincodeID. -// If committed is true, the key-values are retrieved only from the db. If committed is false, the results from db -// are mergerd with the results in memory (giving preference to in-memory data) -// The key-values in the returned iterator are not guaranteed to be in any specific order -func (ledger *Ledger) GetStateRangeScanIterator(chaincodeID string, startKey string, endKey string, committed bool) (statemgmt.RangeScanIterator, error) { - return ledger.state.GetRangeScanIterator(chaincodeID, startKey, endKey, committed) -} - -// SetState sets state to given value for chaincodeID and key. Does not immideatly writes to DB -func (ledger *Ledger) SetState(chaincodeID string, key string, value []byte) error { - if key == "" || value == nil { - return newLedgerError(ErrorTypeInvalidArgument, - fmt.Sprintf("An empty string key or a nil value is not supported. Method invoked with key='%s', value='%#v'", key, value)) - } - return ledger.state.Set(chaincodeID, key, value) -} - -// DeleteState tracks the deletion of state for chaincodeID and key. Does not immediately writes to DB -func (ledger *Ledger) DeleteState(chaincodeID string, key string) error { - return ledger.state.Delete(chaincodeID, key) -} - -// CopyState copies all the key-values from sourceChaincodeID to destChaincodeID -func (ledger *Ledger) CopyState(sourceChaincodeID string, destChaincodeID string) error { - return ledger.state.CopyState(sourceChaincodeID, destChaincodeID) -} - -// GetStateMultipleKeys returns the values for the multiple keys. -// This method is mainly to amortize the cost of grpc communication between chaincode shim peer -func (ledger *Ledger) GetStateMultipleKeys(chaincodeID string, keys []string, committed bool) ([][]byte, error) { - return ledger.state.GetMultipleKeys(chaincodeID, keys, committed) -} - -// SetStateMultipleKeys sets the values for the multiple keys. -// This method is mainly to amortize the cost of grpc communication between chaincode shim peer -func (ledger *Ledger) SetStateMultipleKeys(chaincodeID string, kvs map[string][]byte) error { - return ledger.state.SetMultipleKeys(chaincodeID, kvs) -} - -// GetStateSnapshot returns a point-in-time view of the global state for the current block. This -// should be used when transferring the state from one peer to another peer. You must call -// stateSnapshot.Release() once you are done with the snapshot to free up resources. -func (ledger *Ledger) GetStateSnapshot() (*state.StateSnapshot, error) { - dbSnapshot := db.GetDBHandle().GetSnapshot() - blockHeight, err := fetchBlockchainSizeFromSnapshot(dbSnapshot) - if err != nil { - dbSnapshot.Release() - return nil, err - } - if 0 == blockHeight { - dbSnapshot.Release() - return nil, fmt.Errorf("Blockchain has no blocks, cannot determine block number") - } - return ledger.state.GetSnapshot(blockHeight-1, dbSnapshot) -} - -// GetStateDelta will return the state delta for the specified block if -// available. If not available because it has been discarded, returns nil,nil. -func (ledger *Ledger) GetStateDelta(blockNumber uint64) (*statemgmt.StateDelta, error) { - if blockNumber >= ledger.GetBlockchainSize() { - return nil, ErrOutOfBounds - } - return ledger.state.FetchStateDeltaFromDB(blockNumber) -} - -// ApplyStateDelta applies a state delta to the current state. This is an -// in memory change only. You must call ledger.CommitStateDelta to persist -// the change to the DB. -// This should only be used as part of state synchronization. State deltas -// can be retrieved from another peer though the Ledger.GetStateDelta function -// or by creating state deltas with keys retrieved from -// Ledger.GetStateSnapshot(). For an example, see TestSetRawState in -// ledger_test.go -// Note that there is no order checking in this function and it is up to -// the caller to ensure that deltas are applied in the correct order. -// For example, if you are currently at block 8 and call this function -// with a delta retrieved from Ledger.GetStateDelta(10), you would now -// be in a bad state because you did not apply the delta for block 9. -// It's possible to roll the state forwards or backwards using -// stateDelta.RollBackwards. By default, a delta retrieved for block 3 can -// be used to roll forwards from state at block 2 to state at block 3. If -// stateDelta.RollBackwards=false, the delta retrieved for block 3 can be -// used to roll backwards from the state at block 3 to the state at block 2. -func (ledger *Ledger) ApplyStateDelta(id interface{}, delta *statemgmt.StateDelta) error { - err := ledger.checkValidIDBegin() - if err != nil { - return err - } - ledger.currentID = id - ledger.state.ApplyStateDelta(delta) - return nil -} - -// CommitStateDelta will commit the state delta passed to ledger.ApplyStateDelta -// to the DB -func (ledger *Ledger) CommitStateDelta(id interface{}) error { - err := ledger.checkValidIDCommitORRollback(id) - if err != nil { - return err - } - defer ledger.resetForNextTxGroup(true) - return ledger.state.CommitStateDelta() -} - -// RollbackStateDelta will discard the state delta passed -// to ledger.ApplyStateDelta -func (ledger *Ledger) RollbackStateDelta(id interface{}) error { - err := ledger.checkValidIDCommitORRollback(id) - if err != nil { - return err - } - ledger.resetForNextTxGroup(false) - return nil -} - -// DeleteALLStateKeysAndValues deletes all keys and values from the state. -// This is generally only used during state synchronization when creating a -// new state from a snapshot. -func (ledger *Ledger) DeleteALLStateKeysAndValues() error { - return ledger.state.DeleteState() -} - -/////////////////// blockchain related methods ///////////////////////////////////// -///////////////////////////////////////////////////////////////////////////////////// - -// GetBlockchainInfo returns information about the blockchain ledger such as -// height, current block hash, and previous block hash. -func (ledger *Ledger) GetBlockchainInfo() (*protos.BlockchainInfo, error) { - return ledger.blockchain.getBlockchainInfo() -} - -// GetBlockByNumber return block given the number of the block on blockchain. -// Lowest block on chain is block number zero -func (ledger *Ledger) GetBlockByNumber(blockNumber uint64) (*protos.Block, error) { - if blockNumber >= ledger.GetBlockchainSize() { - return nil, ErrOutOfBounds - } - return ledger.blockchain.getBlock(blockNumber) -} - -// GetBlockchainSize returns number of blocks in blockchain -func (ledger *Ledger) GetBlockchainSize() uint64 { - return ledger.blockchain.getSize() -} - -// GetTransactionByID return transaction by it's txId -func (ledger *Ledger) GetTransactionByID(txID string) (*protos.Transaction, error) { - return ledger.blockchain.getTransactionByID(txID) -} - -// PutRawBlock puts a raw block on the chain. This function should only be -// used for synchronization between peers. -func (ledger *Ledger) PutRawBlock(block *protos.Block, blockNumber uint64) error { - err := ledger.blockchain.persistRawBlock(block, blockNumber) - if err != nil { - return err - } - sendProducerBlockEvent(block) - return nil -} - -// VerifyChain will verify the integrity of the blockchain. This is accomplished -// by ensuring that the previous block hash stored in each block matches -// the actual hash of the previous block in the chain. The return value is the -// block number of lowest block in the range which can be verified as valid. -// The first block is assumed to be valid, and an error is only returned if the -// first block does not exist, or some other sort of irrecoverable ledger error -// such as the first block failing to hash is encountered. -// For example, if VerifyChain(0, 99) is called and previous hash values stored -// in blocks 8, 32, and 42 do not match the actual hashes of respective previous -// block 42 would be the return value from this function. -// highBlock is the high block in the chain to include in verification. If you -// wish to verify the entire chain, use ledger.GetBlockchainSize() - 1. -// lowBlock is the low block in the chain to include in verification. If -// you wish to verify the entire chain, use 0 for the genesis block. -func (ledger *Ledger) VerifyChain(highBlock, lowBlock uint64) (uint64, error) { - if highBlock >= ledger.GetBlockchainSize() { - return highBlock, ErrOutOfBounds - } - if highBlock < lowBlock { - return lowBlock, ErrOutOfBounds - } - - currentBlock, err := ledger.GetBlockByNumber(highBlock) - if err != nil { - return highBlock, fmt.Errorf("Error fetching block %d.", highBlock) - } - if currentBlock == nil { - return highBlock, fmt.Errorf("Block %d is nil.", highBlock) - } - - for i := highBlock; i > lowBlock; i-- { - previousBlock, err := ledger.GetBlockByNumber(i - 1) - if err != nil { - return i, nil - } - if previousBlock == nil { - return i, nil - } - previousBlockHash, err := previousBlock.GetHash() - if err != nil { - return i, nil - } - if bytes.Compare(previousBlockHash, currentBlock.PreviousBlockHash) != 0 { - return i, nil - } - currentBlock = previousBlock - } - - return lowBlock, nil -} - -func (ledger *Ledger) checkValidIDBegin() error { - if ledger.currentID != nil { - return fmt.Errorf("Another TxGroup [%s] already in-progress", ledger.currentID) - } - return nil -} - -func (ledger *Ledger) checkValidIDCommitORRollback(id interface{}) error { - if !reflect.DeepEqual(ledger.currentID, id) { - return fmt.Errorf("Another TxGroup [%s] already in-progress", ledger.currentID) - } - return nil -} - -func (ledger *Ledger) resetForNextTxGroup(txCommited bool) { - ledgerLogger.Debug("resetting ledger state for next transaction batch") - ledger.currentID = nil - ledger.state.ClearInMemoryChanges(txCommited) -} - -func sendProducerBlockEvent(block *protos.Block) { - - // Remove payload from deploy transactions. This is done to make block - // events more lightweight as the payload for these types of transactions - // can be very large. - blockTransactions := block.GetTransactions() - for _, transaction := range blockTransactions { - if transaction.Type == protos.Transaction_CHAINCODE_DEPLOY { - deploymentSpec := &protos.ChaincodeDeploymentSpec{} - err := proto.Unmarshal(transaction.Payload, deploymentSpec) - if err != nil { - ledgerLogger.Errorf("Error unmarshalling deployment transaction for block event: %s", err) - continue - } - deploymentSpec.CodePackage = nil - deploymentSpecBytes, err := proto.Marshal(deploymentSpec) - if err != nil { - ledgerLogger.Errorf("Error marshalling deployment transaction for block event: %s", err) - continue - } - transaction.Payload = deploymentSpecBytes - } - } - - producer.Send(producer.CreateBlockEvent(block)) -} - -//send chaincode events created by transactions -func sendChaincodeEvents(trs []*protos.TransactionResult) { - if trs != nil { - for _, tr := range trs { - //we store empty chaincode events in the protobuf repeated array to make protobuf happy. - //when we replay off a block ignore empty events - if tr.ChaincodeEvent != nil && tr.ChaincodeEvent.ChaincodeID != "" { - producer.Send(producer.CreateChaincodeEvent(tr.ChaincodeEvent)) - } - } - } -} diff --git a/core/ledgernext/ledger_interface.go b/core/ledger/ledger_interface.go similarity index 100% rename from core/ledgernext/ledger_interface.go rename to core/ledger/ledger_interface.go diff --git a/core/ledger/ledger_test_exports.go b/core/ledger/ledger_test_exports.go deleted file mode 100644 index f1d7d013b04..00000000000 --- a/core/ledger/ledger_test_exports.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ledger - -import ( - "testing" - - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/testutil" -) - -var testDBWrapper = db.NewTestDBWrapper() - -//InitTestLedger provides a ledger for testing. This method creates a fresh db and constructs a ledger instance on that. -func InitTestLedger(t *testing.T) *Ledger { - testDBWrapper.CleanDB(t) - _, err := GetLedger() - testutil.AssertNoError(t, err, "Error while constructing ledger") - newLedger, err := GetNewLedger() - testutil.AssertNoError(t, err, "Error while constructing ledger") - ledger = newLedger - return newLedger -} diff --git a/core/ledger/perfstat/stat.go b/core/ledger/perfstat/stat.go deleted file mode 100644 index 1b8382d3a5d..00000000000 --- a/core/ledger/perfstat/stat.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package perfstat - -import ( - "fmt" - "sync" - "time" -) - -type stat struct { - rwLock sync.RWMutex - statName string - desc string - - numInvocations int64 - total int64 - min int64 - max int64 -} - -func newStat(name string, desc string) *stat { - return &stat{statName: name, desc: desc, min: int64(^uint64(0) >> 1)} -} - -func (s *stat) String() string { - s.rwLock.RLock() - defer s.rwLock.RUnlock() - return fmt.Sprintf("%s: [total:%d, numInvocation:%d, average:%d, min=%d, max=%d]", - s.statName, s.total, s.numInvocations, (s.total / s.numInvocations), s.min, s.max) -} - -func (s *stat) reset() { - s.rwLock.Lock() - defer s.rwLock.Unlock() - s.numInvocations = 0 - s.min = int64(^uint64(0) >> 1) - s.max = 0 - s.total = 0 -} - -func (s *stat) updateTimeSpent(startTime time.Time) { - s.updateDataStat(time.Since(startTime).Nanoseconds()) -} - -func (s *stat) updateDataStat(value int64) { - s.rwLock.Lock() - defer s.rwLock.Unlock() - s.numInvocations++ - s.total += value - if value < s.min { - s.min = value - } else if value > s.max { - s.max = value - } -} diff --git a/core/ledger/perfstat/stat_holder.go b/core/ledger/perfstat/stat_holder.go deleted file mode 100644 index 9fd3ec7c680..00000000000 --- a/core/ledger/perfstat/stat_holder.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package perfstat - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "strings" - "sync" - "time" - - "github.com/op/go-logging" -) - -const enableStats = false -const printPeriodically = true -const printInterval = 10000 //Millisecond -const commonPrefix = "github.com/hyperledger/fabric/core/ledger" -const commonPrefixLen = len(commonPrefix) - -var holder *statsHolder -var logger = logging.MustGetLogger("ledger.perfstat") - -type statsHolder struct { - rwLock sync.RWMutex - m map[string]*stat -} - -func init() { - if !enableStats { - return - } - holder = &statsHolder{m: make(map[string]*stat)} - if printPeriodically { - go printStatsPeriodically() - } -} - -// UpdateTimeStat updates the stats for time spent at a particular point in the code -func UpdateTimeStat(id string, startTime time.Time) { - updateStat(id, time.Since(startTime).Nanoseconds()) -} - -// UpdateDataStat updates the stats for data at a particular point in the code -func UpdateDataStat(id string, value int64) { - updateStat(id, value) -} - -// ResetStats resets all the stats data -func ResetStats() { - if !enableStats { - return - } - holder.rwLock.Lock() - defer holder.rwLock.Unlock() - for _, v := range holder.m { - v.reset() - } -} - -// PrintStats prints the stats in the log file. -func PrintStats() { - if !enableStats { - return - } - holder.rwLock.RLock() - defer holder.rwLock.RUnlock() - logger.Info("Stats.......Start") - var paths []string - for k := range holder.m { - paths = append(paths, k) - } - sort.Strings(paths) - for _, k := range paths { - v := holder.m[k] - logger.Info(v.String()) - } - logger.Info("Stats.......Finish") -} - -func updateStat(id string, value int64) { - if !enableStats { - return - } - path := getCallerInfo() - statName := fmt.Sprintf("%s:%s", path, id) - fmt.Println(statName) - stat := getOrCreateStat(statName, "", 0) - stat.updateDataStat(value) -} - -func getOrCreateStat(name string, file string, line int) *stat { - holder.rwLock.RLock() - stat, ok := holder.m[name] - if ok { - holder.rwLock.RUnlock() - return stat - } - - holder.rwLock.RUnlock() - holder.rwLock.Lock() - defer holder.rwLock.Unlock() - stat, ok = holder.m[name] - if !ok { - stat = newStat(name, fmt.Sprintf("%s:%d", file, line)) - holder.m[name] = stat - } - return stat -} - -func printStatsPeriodically() { - for { - PrintStats() - time.Sleep(time.Duration(int64(printInterval) * time.Millisecond.Nanoseconds())) - } -} - -func getCallerInfo() string { - pc := make([]uintptr, 10) - // Note: the default value 4 will ensure stat name exclude the path - // "/perfstat.UpdateTimeStat -> /perfstat.updateStat -> /perfstat.getCallerInfo" - // "/perfstat.UpdateDataStat -> /perfstat.updateStat -> /perfstat.getCallerInfo" - runtime.Callers(4, pc) - var path bytes.Buffer - j := 0 - for i := range pc { - f := runtime.FuncForPC(pc[i]) - funcName := f.Name() - if strings.HasPrefix(funcName, commonPrefix) { - j = i - } else { - break - } - } - - for i := j; i >= 0; i-- { - f := runtime.FuncForPC(pc[i]) - funcName := f.Name() - funcNameShort := funcName[commonPrefixLen:] - path.WriteString(funcNameShort) - if i > 0 { - path.WriteString(" -> ") - } - } - - return path.String() -} diff --git a/core/ledger/statemgmt/buckettree/bucket_cache.go b/core/ledger/statemgmt/buckettree/bucket_cache.go deleted file mode 100644 index 1501f8b1c3c..00000000000 --- a/core/ledger/statemgmt/buckettree/bucket_cache.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "sync" - "time" - "unsafe" - - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/perfstat" - "github.com/hyperledger/fabric/core/ledger/statemgmt" -) - -var defaultBucketCacheMaxSize = 100 // MBs - -// We can create a cache and keep all the bucket nodes pre-loaded. -// Since, the bucket nodes do not contain actual data and max possible -// buckets are pre-determined, the memory demand may not be very high or can easily -// be controlled - by keeping seletive buckets in the cache (most likely first few levels of the bucket tree - because, -// higher the level of the bucket, more are the chances that the bucket would be required for recomputation of hash) -type bucketCache struct { - isEnabled bool - c map[bucketKey]*bucketNode - lock sync.RWMutex - size uint64 - maxSize uint64 -} - -func newBucketCache(maxSizeMBs int) *bucketCache { - isEnabled := true - if maxSizeMBs <= 0 { - isEnabled = false - } else { - logger.Infof("Constructing bucket-cache with max bucket cache size = [%d] MBs", maxSizeMBs) - } - return &bucketCache{c: make(map[bucketKey]*bucketNode), maxSize: uint64(maxSizeMBs * 1024 * 1024), isEnabled: isEnabled} -} - -func (cache *bucketCache) loadAllBucketNodesFromDB() { - if !cache.isEnabled { - return - } - openchainDB := db.GetDBHandle() - itr := openchainDB.GetStateCFIterator() - defer itr.Close() - itr.Seek([]byte{byte(0)}) - count := 0 - cache.lock.Lock() - defer cache.lock.Unlock() - for ; itr.Valid(); itr.Next() { - key := itr.Key().Data() - if key[0] != byte(0) { - itr.Key().Free() - itr.Value().Free() - break - } - bKey := decodeBucketKey(statemgmt.Copy(itr.Key().Data())) - nodeBytes := statemgmt.Copy(itr.Value().Data()) - bucketNode := unmarshalBucketNode(&bKey, nodeBytes) - size := bKey.size() + bucketNode.size() - cache.size += size - if cache.size >= cache.maxSize { - cache.size -= size - break - } - cache.c[bKey] = bucketNode - itr.Key().Free() - itr.Value().Free() - count++ - } - logger.Infof("Loaded buckets data in cache. Total buckets in DB = [%d]. Total cache size:=%d", count, cache.size) -} - -func (cache *bucketCache) putWithoutLock(key bucketKey, node *bucketNode) { - if !cache.isEnabled { - return - } - node.markedForDeletion = false - node.childrenUpdated = nil - existingNode, ok := cache.c[key] - size := uint64(0) - if ok { - size = node.size() - existingNode.size() - cache.size += size - if cache.size > cache.maxSize { - delete(cache.c, key) - cache.size -= (key.size() + existingNode.size()) - } else { - cache.c[key] = node - } - } else { - size = node.size() - cache.size += size - if cache.size > cache.maxSize { - return - } - cache.c[key] = node - } -} - -func (cache *bucketCache) get(key bucketKey) (*bucketNode, error) { - defer perfstat.UpdateTimeStat("timeSpent", time.Now()) - if !cache.isEnabled { - return fetchBucketNodeFromDB(&key) - } - cache.lock.RLock() - defer cache.lock.RUnlock() - bucketNode := cache.c[key] - if bucketNode == nil { - return fetchBucketNodeFromDB(&key) - } - return bucketNode, nil -} - -func (cache *bucketCache) removeWithoutLock(key bucketKey) { - if !cache.isEnabled { - return - } - node, ok := cache.c[key] - if ok { - cache.size -= (key.size() + node.size()) - delete(cache.c, key) - } -} - -func (bk bucketKey) size() uint64 { - return uint64(unsafe.Sizeof(bk)) -} - -func (bNode *bucketNode) size() uint64 { - size := uint64(unsafe.Sizeof(*bNode)) - numChildHashes := len(bNode.childrenCryptoHash) - if numChildHashes > 0 { - size += uint64(numChildHashes * len(bNode.childrenCryptoHash[0])) - } - return size -} diff --git a/core/ledger/statemgmt/buckettree/bucket_hash.go b/core/ledger/statemgmt/buckettree/bucket_hash.go deleted file mode 100644 index 154d51d68e6..00000000000 --- a/core/ledger/statemgmt/buckettree/bucket_hash.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "github.com/golang/protobuf/proto" - openchainUtil "github.com/hyperledger/fabric/core/util" -) - -type bucketHashCalculator struct { - bucketKey *bucketKey - currentChaincodeID string - dataNodes []*dataNode - hashingData []byte -} - -func newBucketHashCalculator(bucketKey *bucketKey) *bucketHashCalculator { - return &bucketHashCalculator{bucketKey, "", nil, nil} -} - -// addNextNode - this method assumes that the datanodes are added in the increasing order of the keys -func (c *bucketHashCalculator) addNextNode(dataNode *dataNode) { - chaincodeID, _ := dataNode.getKeyElements() - if chaincodeID != c.currentChaincodeID { - c.appendCurrentChaincodeData() - c.currentChaincodeID = chaincodeID - c.dataNodes = nil - } - c.dataNodes = append(c.dataNodes, dataNode) -} - -func (c *bucketHashCalculator) computeCryptoHash() []byte { - if c.currentChaincodeID != "" { - c.appendCurrentChaincodeData() - c.currentChaincodeID = "" - c.dataNodes = nil - } - logger.Debugf("Hashable content for bucket [%s]: length=%d, contentInStringForm=[%s]", c.bucketKey, len(c.hashingData), string(c.hashingData)) - if c.hashingData == nil { - return nil - } - return openchainUtil.ComputeCryptoHash(c.hashingData) -} - -func (c *bucketHashCalculator) appendCurrentChaincodeData() { - if c.currentChaincodeID == "" { - return - } - c.appendSizeAndData([]byte(c.currentChaincodeID)) - c.appendSize(len(c.dataNodes)) - for _, dataNode := range c.dataNodes { - _, key := dataNode.getKeyElements() - value := dataNode.getValue() - c.appendSizeAndData([]byte(key)) - c.appendSizeAndData(value) - } -} - -func (c *bucketHashCalculator) appendSizeAndData(b []byte) { - c.appendSize(len(b)) - c.hashingData = append(c.hashingData, b...) -} - -func (c *bucketHashCalculator) appendSize(size int) { - c.hashingData = append(c.hashingData, proto.EncodeVarint(uint64(size))...) -} diff --git a/core/ledger/statemgmt/buckettree/bucket_key.go b/core/ledger/statemgmt/buckettree/bucket_key.go deleted file mode 100644 index f87a7b5c659..00000000000 --- a/core/ledger/statemgmt/buckettree/bucket_key.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "fmt" - - "github.com/golang/protobuf/proto" -) - -type bucketKey struct { - level int - bucketNumber int -} - -func newBucketKey(level int, bucketNumber int) *bucketKey { - if level > conf.getLowestLevel() || level < 0 { - panic(fmt.Errorf("Invalid Level [%d] for bucket key. Level can be between 0 and [%d]", level, conf.lowestLevel)) - } - - if bucketNumber < 1 || bucketNumber > conf.getNumBuckets(level) { - panic(fmt.Errorf("Invalid bucket number [%d]. Bucket nuber at level [%d] can be between 1 and [%d]", bucketNumber, level, conf.getNumBuckets(level))) - } - return &bucketKey{level, bucketNumber} -} - -func newBucketKeyAtLowestLevel(bucketNumber int) *bucketKey { - return newBucketKey(conf.getLowestLevel(), bucketNumber) -} - -func constructRootBucketKey() *bucketKey { - return newBucketKey(0, 1) -} - -func decodeBucketKey(keyBytes []byte) bucketKey { - level, numBytesRead := proto.DecodeVarint(keyBytes[1:]) - bucketNumber, _ := proto.DecodeVarint(keyBytes[numBytesRead+1:]) - return bucketKey{int(level), int(bucketNumber)} -} - -func (bucketKey *bucketKey) getParentKey() *bucketKey { - return newBucketKey(bucketKey.level-1, conf.computeParentBucketNumber(bucketKey.bucketNumber)) -} - -func (bucketKey *bucketKey) equals(anotherBucketKey *bucketKey) bool { - return bucketKey.level == anotherBucketKey.level && bucketKey.bucketNumber == anotherBucketKey.bucketNumber -} - -func (bucketKey *bucketKey) getChildIndex(childKey *bucketKey) int { - bucketNumberOfFirstChild := ((bucketKey.bucketNumber - 1) * conf.getMaxGroupingAtEachLevel()) + 1 - bucketNumberOfLastChild := bucketKey.bucketNumber * conf.getMaxGroupingAtEachLevel() - if childKey.bucketNumber < bucketNumberOfFirstChild || childKey.bucketNumber > bucketNumberOfLastChild { - panic(fmt.Errorf("[%#v] is not a valid child bucket of [%#v]", childKey, bucketKey)) - } - return childKey.bucketNumber - bucketNumberOfFirstChild -} - -func (bucketKey *bucketKey) getChildKey(index int) *bucketKey { - bucketNumberOfFirstChild := ((bucketKey.bucketNumber - 1) * conf.getMaxGroupingAtEachLevel()) + 1 - bucketNumberOfChild := bucketNumberOfFirstChild + index - return newBucketKey(bucketKey.level+1, bucketNumberOfChild) -} - -func (bucketKey *bucketKey) getEncodedBytes() []byte { - encodedBytes := []byte{} - encodedBytes = append(encodedBytes, byte(0)) - encodedBytes = append(encodedBytes, proto.EncodeVarint(uint64(bucketKey.level))...) - encodedBytes = append(encodedBytes, proto.EncodeVarint(uint64(bucketKey.bucketNumber))...) - return encodedBytes -} - -func (bucketKey *bucketKey) String() string { - return fmt.Sprintf("level=[%d], bucketNumber=[%d]", bucketKey.level, bucketKey.bucketNumber) -} - -func (bucketKey *bucketKey) clone() *bucketKey { - return newBucketKey(bucketKey.level, bucketKey.bucketNumber) -} diff --git a/core/ledger/statemgmt/buckettree/bucket_node.go b/core/ledger/statemgmt/buckettree/bucket_node.go deleted file mode 100644 index 7b1f5cea1db..00000000000 --- a/core/ledger/statemgmt/buckettree/bucket_node.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "fmt" - - "github.com/golang/protobuf/proto" - openchainUtil "github.com/hyperledger/fabric/core/util" -) - -type bucketNode struct { - bucketKey *bucketKey - childrenCryptoHash [][]byte - childrenUpdated []bool - markedForDeletion bool -} - -func newBucketNode(bucketKey *bucketKey) *bucketNode { - maxChildren := conf.getMaxGroupingAtEachLevel() - return &bucketNode{bucketKey, make([][]byte, maxChildren), make([]bool, maxChildren), false} -} - -func unmarshalBucketNode(bucketKey *bucketKey, serializedBytes []byte) *bucketNode { - bucketNode := newBucketNode(bucketKey) - buffer := proto.NewBuffer(serializedBytes) - for i := 0; i < conf.getMaxGroupingAtEachLevel(); i++ { - childCryptoHash, err := buffer.DecodeRawBytes(false) - if err != nil { - panic(fmt.Errorf("this error should not occur: %s", err)) - } - //protobuf's buffer.EncodeRawBytes/buffer.DecodeRawBytes convert a nil into a zero length byte-array, so nil check would not work - if len(childCryptoHash) != 0 { - bucketNode.childrenCryptoHash[i] = childCryptoHash - } - } - return bucketNode -} - -func (bucketNode *bucketNode) marshal() []byte { - buffer := proto.NewBuffer([]byte{}) - for i := 0; i < conf.getMaxGroupingAtEachLevel(); i++ { - buffer.EncodeRawBytes(bucketNode.childrenCryptoHash[i]) - } - return buffer.Bytes() -} - -func (bucketNode *bucketNode) setChildCryptoHash(childKey *bucketKey, cryptoHash []byte) { - i := bucketNode.bucketKey.getChildIndex(childKey) - bucketNode.childrenCryptoHash[i] = cryptoHash - bucketNode.childrenUpdated[i] = true -} - -func (bucketNode *bucketNode) mergeBucketNode(anotherBucketNode *bucketNode) { - if !bucketNode.bucketKey.equals(anotherBucketNode.bucketKey) { - panic(fmt.Errorf("Nodes with different keys can not be merged. BaseKey=[%#v], MergeKey=[%#v]", bucketNode.bucketKey, anotherBucketNode.bucketKey)) - } - for i, childCryptoHash := range anotherBucketNode.childrenCryptoHash { - if !bucketNode.childrenUpdated[i] { - bucketNode.childrenCryptoHash[i] = childCryptoHash - } - } -} - -func (bucketNode *bucketNode) computeCryptoHash() []byte { - cryptoHashContent := []byte{} - numChildren := 0 - for i, childCryptoHash := range bucketNode.childrenCryptoHash { - if childCryptoHash != nil { - numChildren++ - logger.Debugf("Appending crypto-hash for child bucket = [%s]", bucketNode.bucketKey.getChildKey(i)) - cryptoHashContent = append(cryptoHashContent, childCryptoHash...) - } - } - if numChildren == 0 { - logger.Debugf("Returning crypto-hash of bucket = [%s] - because, it has not children", bucketNode.bucketKey) - bucketNode.markedForDeletion = true - return nil - } - if numChildren == 1 { - logger.Debugf("Propagating crypto-hash of single child node for bucket = [%s]", bucketNode.bucketKey) - return cryptoHashContent - } - logger.Debugf("Computing crypto-hash for bucket [%s] by merging [%d] children", bucketNode.bucketKey, numChildren) - return openchainUtil.ComputeCryptoHash(cryptoHashContent) -} - -func (bucketNode *bucketNode) String() string { - numChildren := 0 - for i := range bucketNode.childrenCryptoHash { - if bucketNode.childrenCryptoHash[i] != nil { - numChildren++ - } - } - str := fmt.Sprintf("bucketKey={%s}\n NumChildren={%d}\n", bucketNode.bucketKey, numChildren) - if numChildren == 0 { - return str - } - - str = str + "Childern crypto-hashes:\n" - for i := range bucketNode.childrenCryptoHash { - childCryptoHash := bucketNode.childrenCryptoHash[i] - if childCryptoHash != nil { - str = str + fmt.Sprintf("childNumber={%d}, cryptoHash={%x}\n", i, childCryptoHash) - } - } - return str -} diff --git a/core/ledger/statemgmt/buckettree/bucket_tree_delta.go b/core/ledger/statemgmt/buckettree/bucket_tree_delta.go deleted file mode 100644 index f81350de106..00000000000 --- a/core/ledger/statemgmt/buckettree/bucket_tree_delta.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -type byBucketNumber map[int]*bucketNode - -type bucketTreeDelta struct { - byLevel map[int]byBucketNumber -} - -func newBucketTreeDelta() *bucketTreeDelta { - return &bucketTreeDelta{make(map[int]byBucketNumber)} -} - -func (bucketTreeDelta *bucketTreeDelta) getOrCreateBucketNode(bucketKey *bucketKey) *bucketNode { - byBucketNumber := bucketTreeDelta.byLevel[bucketKey.level] - if byBucketNumber == nil { - byBucketNumber = make(map[int]*bucketNode) - bucketTreeDelta.byLevel[bucketKey.level] = byBucketNumber - } - bucketNode := byBucketNumber[bucketKey.bucketNumber] - if bucketNode == nil { - bucketNode = newBucketNode(bucketKey) - byBucketNumber[bucketKey.bucketNumber] = bucketNode - } - return bucketNode -} - -func (bucketTreeDelta *bucketTreeDelta) isEmpty() bool { - return bucketTreeDelta.byLevel == nil || len(bucketTreeDelta.byLevel) == 0 -} - -func (bucketTreeDelta *bucketTreeDelta) getBucketNodesAt(level int) []*bucketNode { - bucketNodes := []*bucketNode{} - byBucketNumber := bucketTreeDelta.byLevel[level] - if byBucketNumber == nil { - return nil - } - for _, bucketNode := range byBucketNumber { - bucketNodes = append(bucketNodes, bucketNode) - } - return bucketNodes -} - -func (bucketTreeDelta *bucketTreeDelta) getRootNode() *bucketNode { - bucketNodes := bucketTreeDelta.getBucketNodesAt(0) - if bucketNodes == nil || len(bucketNodes) == 0 { - panic("This method should be called after processing is completed (i.e., the root node has been created)") - } - return bucketNodes[0] -} diff --git a/core/ledger/statemgmt/buckettree/config.go b/core/ledger/statemgmt/buckettree/config.go deleted file mode 100644 index 21f94eaebd6..00000000000 --- a/core/ledger/statemgmt/buckettree/config.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "fmt" - "hash/fnv" -) - -// ConfigNumBuckets - config name 'numBuckets' as it appears in yaml file -const ConfigNumBuckets = "numBuckets" - -// ConfigMaxGroupingAtEachLevel - config name 'maxGroupingAtEachLevel' as it appears in yaml file -const ConfigMaxGroupingAtEachLevel = "maxGroupingAtEachLevel" - -// ConfigHashFunction - config name 'hashFunction'. This is not exposed in yaml file. This configuration is used for testing with custom hash-function -const ConfigHashFunction = "hashFunction" - -// DefaultNumBuckets - total buckets -const DefaultNumBuckets = 10009 - -// DefaultMaxGroupingAtEachLevel - Number of max buckets to group at each level. -// Grouping is started from left. The last group may have less buckets -const DefaultMaxGroupingAtEachLevel = 10 - -var conf *config - -type config struct { - maxGroupingAtEachLevel int - lowestLevel int - levelToNumBucketsMap map[int]int - hashFunc hashFunc -} - -func initConfig(configs map[string]interface{}) { - logger.Infof("configs passed during initialization = %#v", configs) - - numBuckets, ok := configs[ConfigNumBuckets].(int) - if !ok { - numBuckets = DefaultNumBuckets - } - - maxGroupingAtEachLevel, ok := configs[ConfigMaxGroupingAtEachLevel].(int) - if !ok { - maxGroupingAtEachLevel = DefaultMaxGroupingAtEachLevel - } - - hashFunction, ok := configs[ConfigHashFunction].(hashFunc) - if !ok { - hashFunction = fnvHash - } - conf = newConfig(numBuckets, maxGroupingAtEachLevel, hashFunction) - logger.Infof("Initializing bucket tree state implemetation with configurations %+v", conf) -} - -func newConfig(numBuckets int, maxGroupingAtEachLevel int, hashFunc hashFunc) *config { - conf := &config{maxGroupingAtEachLevel, -1, make(map[int]int), hashFunc} - currentLevel := 0 - numBucketAtCurrentLevel := numBuckets - levelInfoMap := make(map[int]int) - levelInfoMap[currentLevel] = numBucketAtCurrentLevel - for numBucketAtCurrentLevel > 1 { - numBucketAtParentLevel := numBucketAtCurrentLevel / maxGroupingAtEachLevel - if numBucketAtCurrentLevel%maxGroupingAtEachLevel != 0 { - numBucketAtParentLevel++ - } - - numBucketAtCurrentLevel = numBucketAtParentLevel - currentLevel++ - levelInfoMap[currentLevel] = numBucketAtCurrentLevel - } - - conf.lowestLevel = currentLevel - for k, v := range levelInfoMap { - conf.levelToNumBucketsMap[conf.lowestLevel-k] = v - } - return conf -} - -func (config *config) getNumBuckets(level int) int { - if level < 0 || level > config.lowestLevel { - panic(fmt.Errorf("level can only be between 0 and [%d]", config.lowestLevel)) - } - return config.levelToNumBucketsMap[level] -} - -func (config *config) computeBucketHash(data []byte) uint32 { - return config.hashFunc(data) -} - -func (config *config) getLowestLevel() int { - return config.lowestLevel -} - -func (config *config) getMaxGroupingAtEachLevel() int { - return config.maxGroupingAtEachLevel -} - -func (config *config) getNumBucketsAtLowestLevel() int { - return config.getNumBuckets(config.getLowestLevel()) -} - -func (config *config) computeParentBucketNumber(bucketNumber int) int { - logger.Debugf("Computing parent bucket number for bucketNumber [%d]", bucketNumber) - parentBucketNumber := bucketNumber / config.getMaxGroupingAtEachLevel() - if bucketNumber%config.getMaxGroupingAtEachLevel() != 0 { - parentBucketNumber++ - } - return parentBucketNumber -} - -type hashFunc func(data []byte) uint32 - -func fnvHash(data []byte) uint32 { - fnvHash := fnv.New32a() - fnvHash.Write(data) - return fnvHash.Sum32() -} diff --git a/core/ledger/statemgmt/buckettree/data_key.go b/core/ledger/statemgmt/buckettree/data_key.go deleted file mode 100644 index 1bd3539525c..00000000000 --- a/core/ledger/statemgmt/buckettree/data_key.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/hyperledger/fabric/core/ledger/util" -) - -type dataKey struct { - bucketKey *bucketKey - compositeKey []byte -} - -func newDataKey(chaincodeID string, key string) *dataKey { - logger.Debugf("Enter - newDataKey. chaincodeID=[%s], key=[%s]", chaincodeID, key) - compositeKey := statemgmt.ConstructCompositeKey(chaincodeID, key) - bucketHash := conf.computeBucketHash(compositeKey) - // Adding one because - we start bucket-numbers 1 onwards - bucketNumber := int(bucketHash)%conf.getNumBucketsAtLowestLevel() + 1 - dataKey := &dataKey{newBucketKeyAtLowestLevel(bucketNumber), compositeKey} - logger.Debugf("Exit - newDataKey=[%s]", dataKey) - return dataKey -} - -func minimumPossibleDataKeyBytesFor(bucketKey *bucketKey) []byte { - min := encodeBucketNumber(bucketKey.bucketNumber) - min = append(min, byte(0)) - return min -} - -func minimumPossibleDataKeyBytes(bucketNumber int, chaincodeID string, key string) []byte { - b := encodeBucketNumber(bucketNumber) - b = append(b, statemgmt.ConstructCompositeKey(chaincodeID, key)...) - return b -} - -func (key *dataKey) getBucketKey() *bucketKey { - return key.bucketKey -} - -func encodeBucketNumber(bucketNumber int) []byte { - return util.EncodeOrderPreservingVarUint64(uint64(bucketNumber)) -} - -func decodeBucketNumber(encodedBytes []byte) (int, int) { - bucketNum, bytesConsumed := util.DecodeOrderPreservingVarUint64(encodedBytes) - return int(bucketNum), bytesConsumed -} - -func (key *dataKey) getEncodedBytes() []byte { - encodedBytes := encodeBucketNumber(key.bucketKey.bucketNumber) - encodedBytes = append(encodedBytes, key.compositeKey...) - return encodedBytes -} - -func newDataKeyFromEncodedBytes(encodedBytes []byte) *dataKey { - bucketNum, l := decodeBucketNumber(encodedBytes) - compositeKey := encodedBytes[l:] - return &dataKey{newBucketKeyAtLowestLevel(bucketNum), compositeKey} -} - -func (key *dataKey) String() string { - return fmt.Sprintf("bucketKey=[%s], compositeKey=[%s]", key.bucketKey, string(key.compositeKey)) -} - -func (key *dataKey) clone() *dataKey { - clone := &dataKey{key.bucketKey.clone(), key.compositeKey} - return clone -} diff --git a/core/ledger/statemgmt/buckettree/data_node.go b/core/ledger/statemgmt/buckettree/data_node.go deleted file mode 100644 index a35bb5da770..00000000000 --- a/core/ledger/statemgmt/buckettree/data_node.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/ledger/statemgmt" -) - -type dataNode struct { - dataKey *dataKey - value []byte -} - -func newDataNode(dataKey *dataKey, value []byte) *dataNode { - return &dataNode{dataKey, value} -} - -func unmarshalDataNodeFromBytes(keyBytes []byte, valueBytes []byte) *dataNode { - return unmarshalDataNode(newDataKeyFromEncodedBytes(keyBytes), valueBytes) -} - -func unmarshalDataNode(dataKey *dataKey, serializedBytes []byte) *dataNode { - return &dataNode{dataKey, serializedBytes} -} - -func (dataNode *dataNode) getCompositeKey() []byte { - return dataNode.dataKey.compositeKey -} - -func (dataNode *dataNode) isDelete() bool { - return dataNode.value == nil -} - -func (dataNode *dataNode) getKeyElements() (string, string) { - return statemgmt.DecodeCompositeKey(dataNode.getCompositeKey()) -} - -func (dataNode *dataNode) getValue() []byte { - return dataNode.value -} - -func (dataNode *dataNode) String() string { - return fmt.Sprintf("dataKey=[%s], value=[%s]", dataNode.dataKey, string(dataNode.value)) -} diff --git a/core/ledger/statemgmt/buckettree/data_nodes_delta.go b/core/ledger/statemgmt/buckettree/data_nodes_delta.go deleted file mode 100644 index e8f54918ca6..00000000000 --- a/core/ledger/statemgmt/buckettree/data_nodes_delta.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "bytes" - "sort" - - "github.com/hyperledger/fabric/core/ledger/statemgmt" -) - -// Code for managing changes in data nodes -type dataNodes []*dataNode - -func (dataNodes dataNodes) Len() int { - return len(dataNodes) -} - -func (dataNodes dataNodes) Swap(i, j int) { - dataNodes[i], dataNodes[j] = dataNodes[j], dataNodes[i] -} - -func (dataNodes dataNodes) Less(i, j int) bool { - return bytes.Compare(dataNodes[i].dataKey.compositeKey, dataNodes[j].dataKey.compositeKey) < 0 -} - -type dataNodesDelta struct { - byBucket map[bucketKey]dataNodes -} - -func newDataNodesDelta(stateDelta *statemgmt.StateDelta) *dataNodesDelta { - dataNodesDelta := &dataNodesDelta{make(map[bucketKey]dataNodes)} - chaincodeIDs := stateDelta.GetUpdatedChaincodeIds(false) - for _, chaincodeID := range chaincodeIDs { - updates := stateDelta.GetUpdates(chaincodeID) - for key, updatedValue := range updates { - if stateDelta.RollBackwards { - dataNodesDelta.add(chaincodeID, key, updatedValue.GetPreviousValue()) - } else { - dataNodesDelta.add(chaincodeID, key, updatedValue.GetValue()) - } - } - } - for _, dataNodes := range dataNodesDelta.byBucket { - sort.Sort(dataNodes) - } - return dataNodesDelta -} - -func (dataNodesDelta *dataNodesDelta) add(chaincodeID string, key string, value []byte) { - dataKey := newDataKey(chaincodeID, key) - bucketKey := dataKey.getBucketKey() - dataNode := newDataNode(dataKey, value) - logger.Debugf("Adding dataNode=[%s] against bucketKey=[%s]", dataNode, bucketKey) - dataNodesDelta.byBucket[*bucketKey] = append(dataNodesDelta.byBucket[*bucketKey], dataNode) -} - -func (dataNodesDelta *dataNodesDelta) getAffectedBuckets() []*bucketKey { - changedBuckets := []*bucketKey{} - for bucketKey := range dataNodesDelta.byBucket { - copyOfBucketKey := bucketKey.clone() - logger.Debugf("Adding changed bucket [%s]", copyOfBucketKey) - changedBuckets = append(changedBuckets, copyOfBucketKey) - } - logger.Debugf("Changed buckets are = [%s]", changedBuckets) - return changedBuckets -} - -func (dataNodesDelta *dataNodesDelta) getSortedDataNodesFor(bucketKey *bucketKey) dataNodes { - return dataNodesDelta.byBucket[*bucketKey] -} diff --git a/core/ledger/statemgmt/buckettree/db_helper.go b/core/ledger/statemgmt/buckettree/db_helper.go deleted file mode 100644 index bc2d3a4893b..00000000000 --- a/core/ledger/statemgmt/buckettree/db_helper.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/statemgmt" -) - -func fetchDataNodeFromDB(dataKey *dataKey) (*dataNode, error) { - openchainDB := db.GetDBHandle() - nodeBytes, err := openchainDB.GetFromStateCF(dataKey.getEncodedBytes()) - if err != nil { - return nil, err - } - if nodeBytes == nil { - logger.Debug("nodeBytes from db is nil") - } else if len(nodeBytes) == 0 { - logger.Debug("nodeBytes from db is an empty array") - } - // key does not exist - if nodeBytes == nil { - return nil, nil - } - return unmarshalDataNode(dataKey, nodeBytes), nil -} - -func fetchBucketNodeFromDB(bucketKey *bucketKey) (*bucketNode, error) { - openchainDB := db.GetDBHandle() - nodeBytes, err := openchainDB.GetFromStateCF(bucketKey.getEncodedBytes()) - if err != nil { - return nil, err - } - if nodeBytes == nil { - return nil, nil - } - return unmarshalBucketNode(bucketKey, nodeBytes), nil -} - -type rawKey []byte - -func fetchDataNodesFromDBFor(bucketKey *bucketKey) (dataNodes, error) { - logger.Debugf("Fetching from DB data nodes for bucket [%s]", bucketKey) - openchainDB := db.GetDBHandle() - itr := openchainDB.GetStateCFIterator() - defer itr.Close() - minimumDataKeyBytes := minimumPossibleDataKeyBytesFor(bucketKey) - - var dataNodes dataNodes - - itr.Seek(minimumDataKeyBytes) - - for ; itr.Valid(); itr.Next() { - - // making a copy of key-value bytes because, underlying key bytes are reused by itr. - // no need to free slices as iterator frees memory when closed. - keyBytes := statemgmt.Copy(itr.Key().Data()) - valueBytes := statemgmt.Copy(itr.Value().Data()) - - dataKey := newDataKeyFromEncodedBytes(keyBytes) - logger.Debugf("Retrieved data key [%s] from DB for bucket [%s]", dataKey, bucketKey) - if !dataKey.getBucketKey().equals(bucketKey) { - logger.Debugf("Data key [%s] from DB does not belong to bucket = [%s]. Stopping further iteration and returning results [%v]", dataKey, bucketKey, dataNodes) - return dataNodes, nil - } - dataNode := unmarshalDataNode(dataKey, valueBytes) - - logger.Debugf("Data node [%s] from DB belongs to bucket = [%s]. Including the key in results...", dataNode, bucketKey) - dataNodes = append(dataNodes, dataNode) - } - logger.Debugf("Returning results [%v]", dataNodes) - return dataNodes, nil -} diff --git a/core/ledger/statemgmt/buckettree/range_scan_iterator.go b/core/ledger/statemgmt/buckettree/range_scan_iterator.go deleted file mode 100644 index dea4be9a12a..00000000000 --- a/core/ledger/statemgmt/buckettree/range_scan_iterator.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/tecbot/gorocksdb" -) - -// RangeScanIterator implements the interface 'statemgmt.RangeScanIterator' -type RangeScanIterator struct { - dbItr *gorocksdb.Iterator - chaincodeID string - startKey string - endKey string - currentBucketNumber int - currentKey string - currentValue []byte - done bool -} - -func newRangeScanIterator(chaincodeID string, startKey string, endKey string) (*RangeScanIterator, error) { - dbItr := db.GetDBHandle().GetStateCFIterator() - itr := &RangeScanIterator{ - dbItr: dbItr, - chaincodeID: chaincodeID, - startKey: startKey, - endKey: endKey, - } - itr.seekForStartKeyWithinBucket(1) - return itr, nil -} - -// Next - see interface 'statemgmt.RangeScanIterator' for details -func (itr *RangeScanIterator) Next() bool { - if itr.done { - return false - } - - for itr.dbItr.Valid() { - - // making a copy of key-value bytes because, underlying key bytes are reused by itr. - // no need to free slices as iterator frees memory when closed. - keyBytes := statemgmt.Copy(itr.dbItr.Key().Data()) - valueBytes := statemgmt.Copy(itr.dbItr.Value().Data()) - - dataNode := unmarshalDataNodeFromBytes(keyBytes, valueBytes) - dataKey := dataNode.dataKey - chaincodeID, key := statemgmt.DecodeCompositeKey(dataNode.getCompositeKey()) - value := dataNode.value - logger.Debugf("Evaluating data-key = %s", dataKey) - - bucketNumber := dataKey.bucketKey.bucketNumber - if bucketNumber > itr.currentBucketNumber { - itr.seekForStartKeyWithinBucket(bucketNumber) - continue - } - - if chaincodeID == itr.chaincodeID && (itr.endKey == "" || key <= itr.endKey) { - logger.Debugf("including data-key = %s", dataKey) - itr.currentKey = key - itr.currentValue = value - itr.dbItr.Next() - return true - } - - itr.seekForStartKeyWithinBucket(bucketNumber + 1) - continue - } - itr.done = true - return false -} - -func (itr *RangeScanIterator) seekForStartKeyWithinBucket(bucketNumber int) { - itr.currentBucketNumber = bucketNumber - datakeyBytes := minimumPossibleDataKeyBytes(bucketNumber, itr.chaincodeID, itr.startKey) - itr.dbItr.Seek(datakeyBytes) -} - -// GetKeyValue - see interface 'statemgmt.RangeScanIterator' for details -func (itr *RangeScanIterator) GetKeyValue() (string, []byte) { - return itr.currentKey, itr.currentValue -} - -// Close - see interface 'statemgmt.RangeScanIterator' for details -func (itr *RangeScanIterator) Close() { - itr.dbItr.Close() -} diff --git a/core/ledger/statemgmt/buckettree/snapshot_iterator.go b/core/ledger/statemgmt/buckettree/snapshot_iterator.go deleted file mode 100644 index 8004ab41e26..00000000000 --- a/core/ledger/statemgmt/buckettree/snapshot_iterator.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/tecbot/gorocksdb" -) - -// StateSnapshotIterator implements the interface 'statemgmt.StateSnapshotIterator' -type StateSnapshotIterator struct { - dbItr *gorocksdb.Iterator -} - -func newStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (*StateSnapshotIterator, error) { - dbItr := db.GetDBHandle().GetStateCFSnapshotIterator(snapshot) - dbItr.Seek([]byte{0x01}) - dbItr.Prev() - return &StateSnapshotIterator{dbItr}, nil -} - -// Next - see interface 'statemgmt.StateSnapshotIterator' for details -func (snapshotItr *StateSnapshotIterator) Next() bool { - snapshotItr.dbItr.Next() - return snapshotItr.dbItr.Valid() -} - -// GetRawKeyValue - see interface 'statemgmt.StateSnapshotIterator' for details -func (snapshotItr *StateSnapshotIterator) GetRawKeyValue() ([]byte, []byte) { - - // making a copy of key-value bytes because, underlying key bytes are reused by itr. - // no need to free slices as iterator frees memory when closed. - keyBytes := statemgmt.Copy(snapshotItr.dbItr.Key().Data()) - valueBytes := statemgmt.Copy(snapshotItr.dbItr.Value().Data()) - dataNode := unmarshalDataNodeFromBytes(keyBytes, valueBytes) - return dataNode.getCompositeKey(), dataNode.getValue() -} - -// Close - see interface 'statemgmt.StateSnapshotIterator' for details -func (snapshotItr *StateSnapshotIterator) Close() { - snapshotItr.dbItr.Close() -} diff --git a/core/ledger/statemgmt/buckettree/state_impl.go b/core/ledger/statemgmt/buckettree/state_impl.go deleted file mode 100644 index 9aa0773cd8f..00000000000 --- a/core/ledger/statemgmt/buckettree/state_impl.go +++ /dev/null @@ -1,302 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buckettree - -import ( - "bytes" - - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/op/go-logging" - "github.com/tecbot/gorocksdb" -) - -var logger = logging.MustGetLogger("buckettree") - -// StateImpl - implements the interface - 'statemgmt.HashableState' -type StateImpl struct { - dataNodesDelta *dataNodesDelta - bucketTreeDelta *bucketTreeDelta - persistedStateHash []byte - lastComputedCryptoHash []byte - recomputeCryptoHash bool - bucketCache *bucketCache -} - -// NewStateImpl constructs a new StateImpl -func NewStateImpl() *StateImpl { - return &StateImpl{} -} - -// Initialize - method implementation for interface 'statemgmt.HashableState' -func (stateImpl *StateImpl) Initialize(configs map[string]interface{}) error { - initConfig(configs) - rootBucketNode, err := fetchBucketNodeFromDB(constructRootBucketKey()) - if err != nil { - return err - } - if rootBucketNode != nil { - stateImpl.persistedStateHash = rootBucketNode.computeCryptoHash() - stateImpl.lastComputedCryptoHash = stateImpl.persistedStateHash - } - - bucketCacheMaxSize, ok := configs["bucketCacheSize"].(int) - if !ok { - bucketCacheMaxSize = defaultBucketCacheMaxSize - } - stateImpl.bucketCache = newBucketCache(bucketCacheMaxSize) - stateImpl.bucketCache.loadAllBucketNodesFromDB() - return nil -} - -// Get - method implementation for interface 'statemgmt.HashableState' -func (stateImpl *StateImpl) Get(chaincodeID string, key string) ([]byte, error) { - dataKey := newDataKey(chaincodeID, key) - dataNode, err := fetchDataNodeFromDB(dataKey) - if err != nil { - return nil, err - } - if dataNode == nil { - return nil, nil - } - return dataNode.value, nil -} - -// PrepareWorkingSet - method implementation for interface 'statemgmt.HashableState' -func (stateImpl *StateImpl) PrepareWorkingSet(stateDelta *statemgmt.StateDelta) error { - logger.Debug("Enter - PrepareWorkingSet()") - if stateDelta.IsEmpty() { - logger.Debug("Ignoring working-set as it is empty") - return nil - } - stateImpl.dataNodesDelta = newDataNodesDelta(stateDelta) - stateImpl.bucketTreeDelta = newBucketTreeDelta() - stateImpl.recomputeCryptoHash = true - return nil -} - -// ClearWorkingSet - method implementation for interface 'statemgmt.HashableState' -func (stateImpl *StateImpl) ClearWorkingSet(changesPersisted bool) { - logger.Debug("Enter - ClearWorkingSet()") - if changesPersisted { - stateImpl.persistedStateHash = stateImpl.lastComputedCryptoHash - stateImpl.updateBucketCache() - } else { - stateImpl.lastComputedCryptoHash = stateImpl.persistedStateHash - } - stateImpl.dataNodesDelta = nil - stateImpl.bucketTreeDelta = nil - stateImpl.recomputeCryptoHash = false -} - -// ComputeCryptoHash - method implementation for interface 'statemgmt.HashableState' -func (stateImpl *StateImpl) ComputeCryptoHash() ([]byte, error) { - logger.Debug("Enter - ComputeCryptoHash()") - if stateImpl.recomputeCryptoHash { - logger.Debug("Recomputing crypto-hash...") - err := stateImpl.processDataNodeDelta() - if err != nil { - return nil, err - } - err = stateImpl.processBucketTreeDelta() - if err != nil { - return nil, err - } - stateImpl.lastComputedCryptoHash = stateImpl.computeRootNodeCryptoHash() - stateImpl.recomputeCryptoHash = false - } else { - logger.Debug("Returing existing crypto-hash as recomputation not required") - } - return stateImpl.lastComputedCryptoHash, nil -} - -func (stateImpl *StateImpl) processDataNodeDelta() error { - afftectedBuckets := stateImpl.dataNodesDelta.getAffectedBuckets() - for _, bucketKey := range afftectedBuckets { - updatedDataNodes := stateImpl.dataNodesDelta.getSortedDataNodesFor(bucketKey) - existingDataNodes, err := fetchDataNodesFromDBFor(bucketKey) - if err != nil { - return err - } - cryptoHashForBucket := computeDataNodesCryptoHash(bucketKey, updatedDataNodes, existingDataNodes) - logger.Debugf("Crypto-hash for lowest-level bucket [%s] is [%x]", bucketKey, cryptoHashForBucket) - parentBucket := stateImpl.bucketTreeDelta.getOrCreateBucketNode(bucketKey.getParentKey()) - parentBucket.setChildCryptoHash(bucketKey, cryptoHashForBucket) - } - return nil -} - -func (stateImpl *StateImpl) processBucketTreeDelta() error { - secondLastLevel := conf.getLowestLevel() - 1 - for level := secondLastLevel; level >= 0; level-- { - bucketNodes := stateImpl.bucketTreeDelta.getBucketNodesAt(level) - logger.Debugf("Bucket tree delta. Number of buckets at level [%d] are [%d]", level, len(bucketNodes)) - for _, bucketNode := range bucketNodes { - logger.Debugf("bucketNode in tree-delta [%s]", bucketNode) - dbBucketNode, err := stateImpl.bucketCache.get(*bucketNode.bucketKey) - logger.Debugf("bucket node from db [%s]", dbBucketNode) - if err != nil { - return err - } - if dbBucketNode != nil { - bucketNode.mergeBucketNode(dbBucketNode) - logger.Debugf("After merge... bucketNode in tree-delta [%s]", bucketNode) - } - if level == 0 { - return nil - } - logger.Debugf("Computing cryptoHash for bucket [%s]", bucketNode) - cryptoHash := bucketNode.computeCryptoHash() - logger.Debugf("cryptoHash for bucket [%s] is [%x]", bucketNode, cryptoHash) - parentBucket := stateImpl.bucketTreeDelta.getOrCreateBucketNode(bucketNode.bucketKey.getParentKey()) - parentBucket.setChildCryptoHash(bucketNode.bucketKey, cryptoHash) - } - } - return nil -} - -func (stateImpl *StateImpl) computeRootNodeCryptoHash() []byte { - return stateImpl.bucketTreeDelta.getRootNode().computeCryptoHash() -} - -func computeDataNodesCryptoHash(bucketKey *bucketKey, updatedNodes dataNodes, existingNodes dataNodes) []byte { - logger.Debugf("Computing crypto-hash for bucket [%s]. numUpdatedNodes=[%d], numExistingNodes=[%d]", bucketKey, len(updatedNodes), len(existingNodes)) - bucketHashCalculator := newBucketHashCalculator(bucketKey) - i := 0 - j := 0 - for i < len(updatedNodes) && j < len(existingNodes) { - updatedNode := updatedNodes[i] - existingNode := existingNodes[j] - c := bytes.Compare(updatedNode.dataKey.compositeKey, existingNode.dataKey.compositeKey) - var nextNode *dataNode - switch c { - case -1: - nextNode = updatedNode - i++ - case 0: - nextNode = updatedNode - i++ - j++ - case 1: - nextNode = existingNode - j++ - } - if !nextNode.isDelete() { - bucketHashCalculator.addNextNode(nextNode) - } - } - - var remainingNodes dataNodes - if i < len(updatedNodes) { - remainingNodes = updatedNodes[i:] - } else if j < len(existingNodes) { - remainingNodes = existingNodes[j:] - } - - for _, remainingNode := range remainingNodes { - if !remainingNode.isDelete() { - bucketHashCalculator.addNextNode(remainingNode) - } - } - return bucketHashCalculator.computeCryptoHash() -} - -// AddChangesForPersistence - method implementation for interface 'statemgmt.HashableState' -func (stateImpl *StateImpl) AddChangesForPersistence(writeBatch *gorocksdb.WriteBatch) error { - - if stateImpl.dataNodesDelta == nil { - return nil - } - - if stateImpl.recomputeCryptoHash { - _, err := stateImpl.ComputeCryptoHash() - if err != nil { - return nil - } - } - stateImpl.addDataNodeChangesForPersistence(writeBatch) - stateImpl.addBucketNodeChangesForPersistence(writeBatch) - return nil -} - -func (stateImpl *StateImpl) addDataNodeChangesForPersistence(writeBatch *gorocksdb.WriteBatch) { - openchainDB := db.GetDBHandle() - affectedBuckets := stateImpl.dataNodesDelta.getAffectedBuckets() - for _, affectedBucket := range affectedBuckets { - dataNodes := stateImpl.dataNodesDelta.getSortedDataNodesFor(affectedBucket) - for _, dataNode := range dataNodes { - if dataNode.isDelete() { - logger.Debugf("Deleting data node key = %#v", dataNode.dataKey) - writeBatch.DeleteCF(openchainDB.StateCF, dataNode.dataKey.getEncodedBytes()) - } else { - logger.Debugf("Adding data node with value = %#v", dataNode.value) - writeBatch.PutCF(openchainDB.StateCF, dataNode.dataKey.getEncodedBytes(), dataNode.value) - } - } - } -} - -func (stateImpl *StateImpl) addBucketNodeChangesForPersistence(writeBatch *gorocksdb.WriteBatch) { - openchainDB := db.GetDBHandle() - secondLastLevel := conf.getLowestLevel() - 1 - for level := secondLastLevel; level >= 0; level-- { - bucketNodes := stateImpl.bucketTreeDelta.getBucketNodesAt(level) - for _, bucketNode := range bucketNodes { - if bucketNode.markedForDeletion { - writeBatch.DeleteCF(openchainDB.StateCF, bucketNode.bucketKey.getEncodedBytes()) - } else { - writeBatch.PutCF(openchainDB.StateCF, bucketNode.bucketKey.getEncodedBytes(), bucketNode.marshal()) - } - } - } -} - -func (stateImpl *StateImpl) updateBucketCache() { - if stateImpl.bucketTreeDelta == nil || stateImpl.bucketTreeDelta.isEmpty() { - return - } - stateImpl.bucketCache.lock.Lock() - defer stateImpl.bucketCache.lock.Unlock() - secondLastLevel := conf.getLowestLevel() - 1 - for level := 0; level <= secondLastLevel; level++ { - bucketNodes := stateImpl.bucketTreeDelta.getBucketNodesAt(level) - for _, bucketNode := range bucketNodes { - key := *bucketNode.bucketKey - if bucketNode.markedForDeletion { - stateImpl.bucketCache.removeWithoutLock(key) - } else { - stateImpl.bucketCache.putWithoutLock(key, bucketNode) - } - } - } -} - -// PerfHintKeyChanged - method implementation for interface 'statemgmt.HashableState' -func (stateImpl *StateImpl) PerfHintKeyChanged(chaincodeID string, key string) { - // We can create a cache. Pull all the keys for the bucket (to which given key belongs) in a separate thread - // This prefetching can help making method 'ComputeCryptoHash' faster. -} - -// GetStateSnapshotIterator - method implementation for interface 'statemgmt.HashableState' -func (stateImpl *StateImpl) GetStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (statemgmt.StateSnapshotIterator, error) { - return newStateSnapshotIterator(snapshot) -} - -// GetRangeScanIterator - method implementation for interface 'statemgmt.HashableState' -func (stateImpl *StateImpl) GetRangeScanIterator(chaincodeID string, startKey string, endKey string) (statemgmt.RangeScanIterator, error) { - return newRangeScanIterator(chaincodeID, startKey, endKey) -} diff --git a/core/ledger/statemgmt/buckettree/test.yaml b/core/ledger/statemgmt/buckettree/test.yaml deleted file mode 100644 index aa3f08aedde..00000000000 --- a/core/ledger/statemgmt/buckettree/test.yaml +++ /dev/null @@ -1,15 +0,0 @@ -############################################################################### -# -# Peer section -# -############################################################################### -peer: - # Path on the file system where peer will store data - fileSystemPath: /var/hyperledger/test/ledger/statemgmt/buckettree/testdb -ledger: - state: - dataStructure: - name: buckettree - configs: - numBuckets: 19 - maxGroupingAtEachLevel: 3 diff --git a/core/ledger/statemgmt/commons.go b/core/ledger/statemgmt/commons.go deleted file mode 100644 index 102cd18656a..00000000000 --- a/core/ledger/statemgmt/commons.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package statemgmt - -import ( - "bytes" - - "github.com/op/go-logging" -) - -var logger = logging.MustGetLogger("statemgmt") - -var stateKeyDelimiter = []byte{0x00} - -// ConstructCompositeKey returns a []byte that uniquely represents a given chaincodeID and key. -// This assumes that chaincodeID does not contain a 0x00 byte, but the key may -// TODO:enforce this restriction on chaincodeID or use length prefixing here instead of delimiter -func ConstructCompositeKey(chaincodeID string, key string) []byte { - return bytes.Join([][]byte{[]byte(chaincodeID), []byte(key)}, stateKeyDelimiter) -} - -// DecodeCompositeKey decodes the compositeKey constructed by ConstructCompositeKey method -// back to the original chaincodeID and key form -func DecodeCompositeKey(compositeKey []byte) (string, string) { - split := bytes.SplitN(compositeKey, stateKeyDelimiter, 2) - return string(split[0]), string(split[1]) -} - -// Copy returns a copy of given bytes -func Copy(src []byte) []byte { - dest := make([]byte, len(src)) - copy(dest, src) - return dest -} diff --git a/core/ledger/statemgmt/hashable_state.go b/core/ledger/statemgmt/hashable_state.go deleted file mode 100644 index 04bc6aa55d0..00000000000 --- a/core/ledger/statemgmt/hashable_state.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package statemgmt - -import ( - "github.com/tecbot/gorocksdb" -) - -// HashableState - Interface that is be implemented by state management -// Different state management implementation can be effiecient for computing crypto-hash for -// state under different workload conditions. -type HashableState interface { - - // Initialize this gives a chance to initialize. For instance, state implementation can load some data from DB - Initialize(configs map[string]interface{}) error - - // Get get the value from DB - Get(chaincodeID string, key string) ([]byte, error) - - // PrepareWorkingSet passes a stateDelta that captures the changes that needs to be applied to the state - PrepareWorkingSet(stateDelta *StateDelta) error - - // ComputeCryptoHash state implementation to compute crypto-hash of state - // assuming the stateDelta (passed in PrepareWorkingSet method) is to be applied - ComputeCryptoHash() ([]byte, error) - - // AddChangesForPersistence state implementation to add all the key-value pair that it needs - // to persist for committing the stateDelta (passed in PrepareWorkingSet method) to DB. - // In addition to the information in the StateDelta, the implementation may also want to - // persist intermediate results for faster crypto-hash computation - AddChangesForPersistence(writeBatch *gorocksdb.WriteBatch) error - - // ClearWorkingSet state implementation may clear any data structures that it may have constructed - // for computing cryptoHash and persisting the changes for the stateDelta (passed in PrepareWorkingSet method) - ClearWorkingSet(changesPersisted bool) - - // GetStateSnapshotIterator state implementation to provide an iterator that is supposed to give - // All the key-value of global state. A particular implementation may need to remove additional information - // that the implementation keeps for faster crypto-hash computation. For instance, filter a few of the - // key-values or remove some data from particular key-values. - GetStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (StateSnapshotIterator, error) - - // GetRangeScanIterator - state implementation to provide an iterator that is supposed to give - // All the key-values for a given chaincodeID such that a return key should be lexically greater than or - // equal to startKey and less than or equal to endKey. If the value for startKey parameter is an empty string - // startKey is assumed to be the smallest key available in the db for the chaincodeID. Similarly, an empty string - // for endKey parameter assumes the endKey to be the greatest key available in the db for the chaincodeID - GetRangeScanIterator(chaincodeID string, startKey string, endKey string) (RangeScanIterator, error) - - // PerfHintKeyChanged state implementation may be provided with some hints before (e.g., during tx execution) - // the StateDelta is prepared and passed in PrepareWorkingSet method. - // A state implementation may use this hint for prefetching relevant data so as if this could improve - // the performance of ComputeCryptoHash method (when gets called at a later time) - PerfHintKeyChanged(chaincodeID string, key string) -} - -// StateSnapshotIterator An interface that is to be implemented by the return value of -// GetStateSnapshotIterator method in the implementation of HashableState interface -type StateSnapshotIterator interface { - - // Next moves to next key-value. Returns true if next key-value exists - Next() bool - - // GetRawKeyValue returns next key-value - GetRawKeyValue() ([]byte, []byte) - - // Close releases resources occupied by the iterator - Close() -} - -// RangeScanIterator - is to be implemented by the return value of -// GetRangeScanIterator method in the implementation of HashableState interface -type RangeScanIterator interface { - - // Next moves to next key-value. Returns true if next key-value exists - Next() bool - - // GetKeyValue returns next key-value - GetKeyValue() (string, []byte) - - // Close releases resources occupied by the iterator - Close() -} diff --git a/core/ledger/statemgmt/raw/state_impl.go b/core/ledger/statemgmt/raw/state_impl.go deleted file mode 100644 index 5229c226ee4..00000000000 --- a/core/ledger/statemgmt/raw/state_impl.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package raw - -import ( - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/tecbot/gorocksdb" -) - -// StateImpl implements raw state management. This implementation does not support computation of crypto-hash of the state. -// It simply stores the compositeKey and value in the db -type StateImpl struct { - stateDelta *statemgmt.StateDelta -} - -// NewStateImpl constructs new instance of raw state -func NewStateImpl() *StateImpl { - return &StateImpl{} -} - -// Initialize - method implementation for interface 'statemgmt.HashableState' -func (impl *StateImpl) Initialize(configs map[string]interface{}) error { - return nil -} - -// Get - method implementation for interface 'statemgmt.HashableState' -func (impl *StateImpl) Get(chaincodeID string, key string) ([]byte, error) { - compositeKey := statemgmt.ConstructCompositeKey(chaincodeID, key) - openchainDB := db.GetDBHandle() - return openchainDB.GetFromStateCF(compositeKey) -} - -// PrepareWorkingSet - method implementation for interface 'statemgmt.HashableState' -func (impl *StateImpl) PrepareWorkingSet(stateDelta *statemgmt.StateDelta) error { - impl.stateDelta = stateDelta - return nil -} - -// ClearWorkingSet - method implementation for interface 'statemgmt.HashableState' -func (impl *StateImpl) ClearWorkingSet(changesPersisted bool) { - impl.stateDelta = nil -} - -// ComputeCryptoHash - method implementation for interface 'statemgmt.HashableState' -func (impl *StateImpl) ComputeCryptoHash() ([]byte, error) { - return nil, nil -} - -// AddChangesForPersistence - method implementation for interface 'statemgmt.HashableState' -func (impl *StateImpl) AddChangesForPersistence(writeBatch *gorocksdb.WriteBatch) error { - delta := impl.stateDelta - if delta == nil { - return nil - } - openchainDB := db.GetDBHandle() - updatedChaincodeIds := delta.GetUpdatedChaincodeIds(false) - for _, updatedChaincodeID := range updatedChaincodeIds { - updates := delta.GetUpdates(updatedChaincodeID) - for updatedKey, value := range updates { - compositeKey := statemgmt.ConstructCompositeKey(updatedChaincodeID, updatedKey) - if value.IsDeleted() { - writeBatch.DeleteCF(openchainDB.StateCF, compositeKey) - } else { - writeBatch.PutCF(openchainDB.StateCF, compositeKey, value.GetValue()) - } - } - } - return nil -} - -// PerfHintKeyChanged - method implementation for interface 'statemgmt.HashableState' -func (impl *StateImpl) PerfHintKeyChanged(chaincodeID string, key string) { -} - -// GetStateSnapshotIterator - method implementation for interface 'statemgmt.HashableState' -func (impl *StateImpl) GetStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (statemgmt.StateSnapshotIterator, error) { - panic("Not a full-fledged state implementation. Implemented only for measuring best-case performance benchmark") -} - -// GetRangeScanIterator - method implementation for interface 'statemgmt.HashableState' -func (impl *StateImpl) GetRangeScanIterator(chaincodeID string, startKey string, endKey string) (statemgmt.RangeScanIterator, error) { - panic("Not a full-fledged state implementation. Implemented only for measuring best-case performance benchmark") -} diff --git a/core/ledger/statemgmt/state/composite_range_scan_iterator.go b/core/ledger/statemgmt/state/composite_range_scan_iterator.go deleted file mode 100644 index 548aa6f71ba..00000000000 --- a/core/ledger/statemgmt/state/composite_range_scan_iterator.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "github.com/hyperledger/fabric/core/ledger/statemgmt" -) - -// CompositeRangeScanIterator - an implementation of interface 'statemgmt.RangeScanIterator' -// This provides a wrapper on top of more than one underlying iterators -type CompositeRangeScanIterator struct { - itrs []statemgmt.RangeScanIterator - currentItrNumber int -} - -func newCompositeRangeScanIterator( - txDeltaItr *statemgmt.StateDeltaIterator, - batchDeltaItr *statemgmt.StateDeltaIterator, - implItr statemgmt.RangeScanIterator) statemgmt.RangeScanIterator { - itrs := make([]statemgmt.RangeScanIterator, 3) - itrs[0] = txDeltaItr - itrs[1] = batchDeltaItr - itrs[2] = implItr - return &CompositeRangeScanIterator{itrs, 0} -} - -// Next - see interface 'statemgmt.RangeScanIterator' for details -// The specific implementation below starts from first underlying iterator and -// after exhausting the first underlying iterator, move to the second underlying iterator. -// The implementation repeats this until last underlying iterator has been exhausted -// In addition, the key-value from an underlying iterator are skipped if the key is found -// in any of the preceding iterators -func (itr *CompositeRangeScanIterator) Next() bool { - currentItrNumber := itr.currentItrNumber - currentItr := itr.itrs[currentItrNumber] - logger.Debugf("Operating on iterator number = %d", currentItrNumber) - keyAvailable := currentItr.Next() - for keyAvailable { - key, _ := currentItr.GetKeyValue() - logger.Debugf("Retrieved key = %s", key) - skipKey := false - for i := currentItrNumber - 1; i >= 0; i-- { - logger.Debugf("Evaluating key = %s in itr number = %d. currentItrNumber = %d", key, i, currentItrNumber) - previousItr := itr.itrs[i] - if previousItr.(*statemgmt.StateDeltaIterator).ContainsKey(key) { - skipKey = true - break - } - } - if skipKey { - logger.Debugf("Skipping key = %s", key) - keyAvailable = currentItr.Next() - continue - } - break - } - - if keyAvailable || currentItrNumber == 2 { - logger.Debug("Returning for current key") - return keyAvailable - } - - logger.Debug("Moving to next iterator") - itr.currentItrNumber++ - return itr.Next() -} - -// GetKeyValue - see interface 'statemgmt.RangeScanIterator' for details -func (itr *CompositeRangeScanIterator) GetKeyValue() (string, []byte) { - return itr.itrs[itr.currentItrNumber].GetKeyValue() -} - -// Close - see interface 'statemgmt.RangeScanIterator' for details -func (itr *CompositeRangeScanIterator) Close() { - itr.itrs[2].Close() -} diff --git a/core/ledger/statemgmt/state/config.go b/core/ledger/statemgmt/state/config.go deleted file mode 100644 index 11fdd06f032..00000000000 --- a/core/ledger/statemgmt/state/config.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "fmt" - "sync" - - "github.com/spf13/viper" -) - -var loadConfigOnce sync.Once - -var stateImplName stateImplType -var stateImplConfigs map[string]interface{} -var deltaHistorySize int - -func initConfig() { - loadConfigOnce.Do(func() { loadConfig() }) -} - -func loadConfig() { - logger.Info("Loading configurations...") - stateImplName = stateImplType(viper.GetString("ledger.state.dataStructure.name")) - stateImplConfigs = viper.GetStringMap("ledger.state.dataStructure.configs") - deltaHistorySize = viper.GetInt("ledger.state.deltaHistorySize") - logger.Infof("Configurations loaded. stateImplName=[%s], stateImplConfigs=%s, deltaHistorySize=[%d]", - stateImplName, stateImplConfigs, deltaHistorySize) - - if len(stateImplName) == 0 { - stateImplName = defaultStateImpl - stateImplConfigs = nil - } else if stateImplName != buckettreeType && stateImplName != trieType && stateImplName != rawType { - panic(fmt.Errorf("Error during initialization of state implementation. State data structure '%s' is not valid.", stateImplName)) - } - - if deltaHistorySize < 0 { - panic(fmt.Errorf("Delta history size must be greater than or equal to 0. Current value is %d.", deltaHistorySize)) - } -} diff --git a/core/ledger/statemgmt/state/state.go b/core/ledger/statemgmt/state/state.go deleted file mode 100644 index 4ec46b593fd..00000000000 --- a/core/ledger/statemgmt/state/state.go +++ /dev/null @@ -1,366 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "encoding/binary" - "fmt" - - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/hyperledger/fabric/core/ledger/statemgmt/buckettree" - "github.com/hyperledger/fabric/core/ledger/statemgmt/raw" - "github.com/hyperledger/fabric/core/ledger/statemgmt/trie" - "github.com/op/go-logging" - "github.com/tecbot/gorocksdb" -) - -var logger = logging.MustGetLogger("state") - -const defaultStateImpl = "buckettree" - -var stateImpl statemgmt.HashableState - -type stateImplType string - -const ( - buckettreeType stateImplType = "buckettree" - trieType stateImplType = "trie" - rawType stateImplType = "raw" -) - -// State structure for maintaining world state. -// This encapsulates a particular implementation for managing the state persistence -// This is not thread safe -type State struct { - stateImpl statemgmt.HashableState - stateDelta *statemgmt.StateDelta - currentTxStateDelta *statemgmt.StateDelta - currentTxID string - txStateDeltaHash map[string][]byte - updateStateImpl bool - historyStateDeltaSize uint64 -} - -// NewState constructs a new State. This Initializes encapsulated state implementation -func NewState() *State { - initConfig() - logger.Infof("Initializing state implementation [%s]", stateImplName) - switch stateImplName { - case buckettreeType: - stateImpl = buckettree.NewStateImpl() - case trieType: - stateImpl = trie.NewStateImpl() - case rawType: - stateImpl = raw.NewStateImpl() - default: - panic("Should not reach here. Configs should have checked for the stateImplName being a valid names ") - } - err := stateImpl.Initialize(stateImplConfigs) - if err != nil { - panic(fmt.Errorf("Error during initialization of state implementation: %s", err)) - } - return &State{stateImpl, statemgmt.NewStateDelta(), statemgmt.NewStateDelta(), "", make(map[string][]byte), - false, uint64(deltaHistorySize)} -} - -// TxBegin marks begin of a new tx. If a tx is already in progress, this call panics -func (state *State) TxBegin(txID string) { - logger.Debugf("txBegin() for txId [%s]", txID) - if state.txInProgress() { - panic(fmt.Errorf("A tx [%s] is already in progress. Received call for begin of another tx [%s]", state.currentTxID, txID)) - } - state.currentTxID = txID -} - -// TxFinish marks the completion of on-going tx. If txID is not same as of the on-going tx, this call panics -func (state *State) TxFinish(txID string, txSuccessful bool) { - logger.Debugf("txFinish() for txId [%s], txSuccessful=[%t]", txID, txSuccessful) - if state.currentTxID != txID { - panic(fmt.Errorf("Different txId in tx-begin [%s] and tx-finish [%s]", state.currentTxID, txID)) - } - if txSuccessful { - if !state.currentTxStateDelta.IsEmpty() { - logger.Debugf("txFinish() for txId [%s] merging state changes", txID) - state.stateDelta.ApplyChanges(state.currentTxStateDelta) - state.txStateDeltaHash[txID] = state.currentTxStateDelta.ComputeCryptoHash() - state.updateStateImpl = true - } else { - state.txStateDeltaHash[txID] = nil - } - } - state.currentTxStateDelta = statemgmt.NewStateDelta() - state.currentTxID = "" -} - -func (state *State) txInProgress() bool { - return state.currentTxID != "" -} - -// Get returns state for chaincodeID and key. If committed is false, this first looks in memory and if missing, -// pulls from db. If committed is true, this pulls from the db only. -func (state *State) Get(chaincodeID string, key string, committed bool) ([]byte, error) { - if !committed { - valueHolder := state.currentTxStateDelta.Get(chaincodeID, key) - if valueHolder != nil { - return valueHolder.GetValue(), nil - } - valueHolder = state.stateDelta.Get(chaincodeID, key) - if valueHolder != nil { - return valueHolder.GetValue(), nil - } - } - return state.stateImpl.Get(chaincodeID, key) -} - -// GetRangeScanIterator returns an iterator to get all the keys (and values) between startKey and endKey -// (assuming lexical order of the keys) for a chaincodeID. -func (state *State) GetRangeScanIterator(chaincodeID string, startKey string, endKey string, committed bool) (statemgmt.RangeScanIterator, error) { - stateImplItr, err := state.stateImpl.GetRangeScanIterator(chaincodeID, startKey, endKey) - if err != nil { - return nil, err - } - - if committed { - return stateImplItr, nil - } - return newCompositeRangeScanIterator( - statemgmt.NewStateDeltaRangeScanIterator(state.currentTxStateDelta, chaincodeID, startKey, endKey), - statemgmt.NewStateDeltaRangeScanIterator(state.stateDelta, chaincodeID, startKey, endKey), - stateImplItr), nil -} - -// Set sets state to given value for chaincodeID and key. Does not immediately writes to DB -func (state *State) Set(chaincodeID string, key string, value []byte) error { - logger.Debugf("set() chaincodeID=[%s], key=[%s], value=[%#v]", chaincodeID, key, value) - if !state.txInProgress() { - panic("State can be changed only in context of a tx.") - } - - // Check if a previous value is already set in the state delta - if state.currentTxStateDelta.IsUpdatedValueSet(chaincodeID, key) { - // No need to bother looking up the previous value as we will not - // set it again. Just pass nil - state.currentTxStateDelta.Set(chaincodeID, key, value, nil) - } else { - // Need to lookup the previous value - previousValue, err := state.Get(chaincodeID, key, true) - if err != nil { - return err - } - state.currentTxStateDelta.Set(chaincodeID, key, value, previousValue) - } - - return nil -} - -// Delete tracks the deletion of state for chaincodeID and key. Does not immediately writes to DB -func (state *State) Delete(chaincodeID string, key string) error { - logger.Debugf("delete() chaincodeID=[%s], key=[%s]", chaincodeID, key) - if !state.txInProgress() { - panic("State can be changed only in context of a tx.") - } - - // Check if a previous value is already set in the state delta - if state.currentTxStateDelta.IsUpdatedValueSet(chaincodeID, key) { - // No need to bother looking up the previous value as we will not - // set it again. Just pass nil - state.currentTxStateDelta.Delete(chaincodeID, key, nil) - } else { - // Need to lookup the previous value - previousValue, err := state.Get(chaincodeID, key, true) - if err != nil { - return err - } - state.currentTxStateDelta.Delete(chaincodeID, key, previousValue) - } - - return nil -} - -// CopyState copies all the key-values from sourceChaincodeID to destChaincodeID -func (state *State) CopyState(sourceChaincodeID string, destChaincodeID string) error { - itr, err := state.GetRangeScanIterator(sourceChaincodeID, "", "", true) - defer itr.Close() - if err != nil { - return err - } - for itr.Next() { - k, v := itr.GetKeyValue() - err := state.Set(destChaincodeID, k, v) - if err != nil { - return err - } - } - return nil -} - -// GetMultipleKeys returns the values for the multiple keys. -func (state *State) GetMultipleKeys(chaincodeID string, keys []string, committed bool) ([][]byte, error) { - var values [][]byte - for _, k := range keys { - v, err := state.Get(chaincodeID, k, committed) - if err != nil { - return nil, err - } - values = append(values, v) - } - return values, nil -} - -// SetMultipleKeys sets the values for the multiple keys. -func (state *State) SetMultipleKeys(chaincodeID string, kvs map[string][]byte) error { - for k, v := range kvs { - err := state.Set(chaincodeID, k, v) - if err != nil { - return err - } - } - return nil -} - -// GetHash computes new state hash if the stateDelta is to be applied. -// Recomputes only if stateDelta has changed after most recent call to this function -func (state *State) GetHash() ([]byte, error) { - logger.Debug("Enter - GetHash()") - if state.updateStateImpl { - logger.Debug("updating stateImpl with working-set") - state.stateImpl.PrepareWorkingSet(state.stateDelta) - state.updateStateImpl = false - } - hash, err := state.stateImpl.ComputeCryptoHash() - if err != nil { - return nil, err - } - logger.Debug("Exit - GetHash()") - return hash, nil -} - -// GetTxStateDeltaHash return the hash of the StateDelta -func (state *State) GetTxStateDeltaHash() map[string][]byte { - return state.txStateDeltaHash -} - -// ClearInMemoryChanges remove from memory all the changes to state -func (state *State) ClearInMemoryChanges(changesPersisted bool) { - state.stateDelta = statemgmt.NewStateDelta() - state.txStateDeltaHash = make(map[string][]byte) - state.stateImpl.ClearWorkingSet(changesPersisted) -} - -// getStateDelta get changes in state after most recent call to method clearInMemoryChanges -func (state *State) getStateDelta() *statemgmt.StateDelta { - return state.stateDelta -} - -// GetSnapshot returns a snapshot of the global state for the current block. stateSnapshot.Release() -// must be called once you are done. -func (state *State) GetSnapshot(blockNumber uint64, dbSnapshot *gorocksdb.Snapshot) (*StateSnapshot, error) { - return newStateSnapshot(blockNumber, dbSnapshot) -} - -// FetchStateDeltaFromDB fetches the StateDelta corrsponding to given blockNumber -func (state *State) FetchStateDeltaFromDB(blockNumber uint64) (*statemgmt.StateDelta, error) { - stateDeltaBytes, err := db.GetDBHandle().GetFromStateDeltaCF(encodeStateDeltaKey(blockNumber)) - if err != nil { - return nil, err - } - if stateDeltaBytes == nil { - return nil, nil - } - stateDelta := statemgmt.NewStateDelta() - stateDelta.Unmarshal(stateDeltaBytes) - return stateDelta, nil -} - -// AddChangesForPersistence adds key-value pairs to writeBatch -func (state *State) AddChangesForPersistence(blockNumber uint64, writeBatch *gorocksdb.WriteBatch) { - logger.Debug("state.addChangesForPersistence()...start") - if state.updateStateImpl { - state.stateImpl.PrepareWorkingSet(state.stateDelta) - state.updateStateImpl = false - } - state.stateImpl.AddChangesForPersistence(writeBatch) - - serializedStateDelta := state.stateDelta.Marshal() - cf := db.GetDBHandle().StateDeltaCF - logger.Debugf("Adding state-delta corresponding to block number[%d]", blockNumber) - writeBatch.PutCF(cf, encodeStateDeltaKey(blockNumber), serializedStateDelta) - if blockNumber >= state.historyStateDeltaSize { - blockNumberToDelete := blockNumber - state.historyStateDeltaSize - logger.Debugf("Deleting state-delta corresponding to block number[%d]", blockNumberToDelete) - writeBatch.DeleteCF(cf, encodeStateDeltaKey(blockNumberToDelete)) - } else { - logger.Debugf("Not deleting previous state-delta. Block number [%d] is smaller than historyStateDeltaSize [%d]", - blockNumber, state.historyStateDeltaSize) - } - logger.Debug("state.addChangesForPersistence()...finished") -} - -// ApplyStateDelta applies already prepared stateDelta to the existing state. -// This is an in memory change only. state.CommitStateDelta must be used to -// commit the state to the DB. This method is to be used in state transfer. -func (state *State) ApplyStateDelta(delta *statemgmt.StateDelta) { - state.stateDelta = delta - state.updateStateImpl = true -} - -// CommitStateDelta commits the changes from state.ApplyStateDelta to the -// DB. -func (state *State) CommitStateDelta() error { - if state.updateStateImpl { - state.stateImpl.PrepareWorkingSet(state.stateDelta) - state.updateStateImpl = false - } - - writeBatch := gorocksdb.NewWriteBatch() - defer writeBatch.Destroy() - state.stateImpl.AddChangesForPersistence(writeBatch) - opt := gorocksdb.NewDefaultWriteOptions() - defer opt.Destroy() - return db.GetDBHandle().DB.Write(opt, writeBatch) -} - -// DeleteState deletes ALL state keys/values from the DB. This is generally -// only used during state synchronization when creating a new state from -// a snapshot. -func (state *State) DeleteState() error { - state.ClearInMemoryChanges(false) - err := db.GetDBHandle().DeleteState() - if err != nil { - logger.Errorf("Error deleting state: %s", err) - } - return err -} - -func encodeStateDeltaKey(blockNumber uint64) []byte { - return encodeUint64(blockNumber) -} - -func decodeStateDeltaKey(dbkey []byte) uint64 { - return decodeToUint64(dbkey) -} - -func encodeUint64(number uint64) []byte { - bytes := make([]byte, 8) - binary.BigEndian.PutUint64(bytes, number) - return bytes -} - -func decodeToUint64(bytes []byte) uint64 { - return binary.BigEndian.Uint64(bytes) -} diff --git a/core/ledger/statemgmt/state/state_snapshot.go b/core/ledger/statemgmt/state/state_snapshot.go deleted file mode 100644 index 5a9839650b9..00000000000 --- a/core/ledger/statemgmt/state/state_snapshot.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/tecbot/gorocksdb" -) - -// StateSnapshot encapsulates StateSnapshotIterator given by actual state implementation and the db snapshot -type StateSnapshot struct { - blockNumber uint64 - stateImplItr statemgmt.StateSnapshotIterator - dbSnapshot *gorocksdb.Snapshot -} - -// newStateSnapshot creates a new snapshot of the global state for the current block. -func newStateSnapshot(blockNumber uint64, dbSnapshot *gorocksdb.Snapshot) (*StateSnapshot, error) { - itr, err := stateImpl.GetStateSnapshotIterator(dbSnapshot) - if err != nil { - return nil, err - } - snapshot := &StateSnapshot{blockNumber, itr, dbSnapshot} - return snapshot, nil -} - -// Release the snapshot. This MUST be called when you are done with this resouce. -func (ss *StateSnapshot) Release() { - ss.stateImplItr.Close() - ss.dbSnapshot.Release() -} - -// Next moves the iterator to the next key/value pair in the state -func (ss *StateSnapshot) Next() bool { - return ss.stateImplItr.Next() -} - -// GetRawKeyValue returns the raw bytes for the key and value at the current iterator position -func (ss *StateSnapshot) GetRawKeyValue() ([]byte, []byte) { - return ss.stateImplItr.GetRawKeyValue() -} - -// GetBlockNumber returns the blocknumber associated with this global state snapshot -func (ss *StateSnapshot) GetBlockNumber() uint64 { - return ss.blockNumber -} diff --git a/core/ledger/statemgmt/state/test.yaml b/core/ledger/statemgmt/state/test.yaml deleted file mode 100644 index a3d6accdef6..00000000000 --- a/core/ledger/statemgmt/state/test.yaml +++ /dev/null @@ -1,22 +0,0 @@ -############################################################################### -# -# Peer section -# -############################################################################### -peer: - # Path on the file system where peer will store data - fileSystemPath: /var/hyperledger/test/ledge/statemgmt/state/testdb - -ledger: - - state: - - # Control the number state deltas that are maintained. This takes additional - # disk space, but allow the state to be rolled backwards and forwards - # without the need to replay transactions. - deltaHistorySize: 500 - dataStructure: - name: buckettree - configs: - numBuckets: 10009 - maxGroupingAtEachLevel: 10 diff --git a/core/ledger/statemgmt/state_delta.go b/core/ledger/statemgmt/state_delta.go deleted file mode 100644 index dc9a84ea660..00000000000 --- a/core/ledger/statemgmt/state_delta.go +++ /dev/null @@ -1,366 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package statemgmt - -import ( - "bytes" - "fmt" - "sort" - - "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/util" -) - -// StateDelta holds the changes to existing state. This struct is used for holding the uncommitted changes during execution of a tx-batch -// Also, to be used for transferring the state to another peer in chunks -type StateDelta struct { - ChaincodeStateDeltas map[string]*ChaincodeStateDelta - // RollBackwards allows one to contol whether this delta will roll the state - // forwards or backwards. - RollBackwards bool -} - -// NewStateDelta constructs an empty StateDelta struct -func NewStateDelta() *StateDelta { - return &StateDelta{make(map[string]*ChaincodeStateDelta), false} -} - -// Get get the state from delta if exists -func (stateDelta *StateDelta) Get(chaincodeID string, key string) *UpdatedValue { - // TODO Cache? - chaincodeStateDelta, ok := stateDelta.ChaincodeStateDeltas[chaincodeID] - if ok { - return chaincodeStateDelta.get(key) - } - return nil -} - -// Set sets state value for a key -func (stateDelta *StateDelta) Set(chaincodeID string, key string, value, previousValue []byte) { - chaincodeStateDelta := stateDelta.getOrCreateChaincodeStateDelta(chaincodeID) - chaincodeStateDelta.set(key, value, previousValue) - return -} - -// Delete deletes a key from the state -func (stateDelta *StateDelta) Delete(chaincodeID string, key string, previousValue []byte) { - chaincodeStateDelta := stateDelta.getOrCreateChaincodeStateDelta(chaincodeID) - chaincodeStateDelta.remove(key, previousValue) - return -} - -// IsUpdatedValueSet returns true if a update value is already set for -// the given chaincode ID and key. -func (stateDelta *StateDelta) IsUpdatedValueSet(chaincodeID, key string) bool { - chaincodeStateDelta, ok := stateDelta.ChaincodeStateDeltas[chaincodeID] - if !ok { - return false - } - if _, ok := chaincodeStateDelta.UpdatedKVs[key]; ok { - return true - } - return false -} - -// ApplyChanges merges another delta - if a key is present in both, the value of the existing key is overwritten -func (stateDelta *StateDelta) ApplyChanges(anotherStateDelta *StateDelta) { - for chaincodeID, chaincodeStateDelta := range anotherStateDelta.ChaincodeStateDeltas { - existingChaincodeStateDelta, existingChaincode := stateDelta.ChaincodeStateDeltas[chaincodeID] - for key, valueHolder := range chaincodeStateDelta.UpdatedKVs { - var previousValue []byte - if existingChaincode { - existingUpdateValue, existingUpdate := existingChaincodeStateDelta.UpdatedKVs[key] - if existingUpdate { - // The existing state delta already has an updated value for this key. - previousValue = existingUpdateValue.PreviousValue - } else { - // Use the previous value set in the new state delta - previousValue = valueHolder.PreviousValue - } - } else { - // Use the previous value set in the new state delta - previousValue = valueHolder.PreviousValue - } - - if valueHolder.IsDeleted() { - stateDelta.Delete(chaincodeID, key, previousValue) - } else { - stateDelta.Set(chaincodeID, key, valueHolder.Value, previousValue) - } - } - } -} - -// IsEmpty checks whether StateDelta contains any data -func (stateDelta *StateDelta) IsEmpty() bool { - return len(stateDelta.ChaincodeStateDeltas) == 0 -} - -// GetUpdatedChaincodeIds return the chaincodeIDs that are prepsent in the delta -// If sorted is true, the method return chaincodeIDs in lexicographical sorted order -func (stateDelta *StateDelta) GetUpdatedChaincodeIds(sorted bool) []string { - updatedChaincodeIds := make([]string, len(stateDelta.ChaincodeStateDeltas)) - i := 0 - for k := range stateDelta.ChaincodeStateDeltas { - updatedChaincodeIds[i] = k - i++ - } - if sorted { - sort.Strings(updatedChaincodeIds) - } - return updatedChaincodeIds -} - -// GetUpdates returns changes associated with given chaincodeId -func (stateDelta *StateDelta) GetUpdates(chaincodeID string) map[string]*UpdatedValue { - chaincodeStateDelta := stateDelta.ChaincodeStateDeltas[chaincodeID] - if chaincodeStateDelta == nil { - return nil - } - return chaincodeStateDelta.UpdatedKVs -} - -func (stateDelta *StateDelta) getOrCreateChaincodeStateDelta(chaincodeID string) *ChaincodeStateDelta { - chaincodeStateDelta, ok := stateDelta.ChaincodeStateDeltas[chaincodeID] - if !ok { - chaincodeStateDelta = newChaincodeStateDelta(chaincodeID) - stateDelta.ChaincodeStateDeltas[chaincodeID] = chaincodeStateDelta - } - return chaincodeStateDelta -} - -// ComputeCryptoHash computes crypto-hash for the data held -// returns nil if no data is present -func (stateDelta *StateDelta) ComputeCryptoHash() []byte { - if stateDelta.IsEmpty() { - return nil - } - var buffer bytes.Buffer - sortedChaincodeIds := stateDelta.GetUpdatedChaincodeIds(true) - for _, chaincodeID := range sortedChaincodeIds { - buffer.WriteString(chaincodeID) - chaincodeStateDelta := stateDelta.ChaincodeStateDeltas[chaincodeID] - sortedKeys := chaincodeStateDelta.getSortedKeys() - for _, key := range sortedKeys { - buffer.WriteString(key) - updatedValue := chaincodeStateDelta.get(key) - if !updatedValue.IsDeleted() { - buffer.Write(updatedValue.Value) - } - } - } - hashingContent := buffer.Bytes() - logger.Debugf("computing hash on %#v", hashingContent) - return util.ComputeCryptoHash(hashingContent) -} - -//ChaincodeStateDelta maintains state for a chaincode -type ChaincodeStateDelta struct { - ChaincodeID string - UpdatedKVs map[string]*UpdatedValue -} - -func newChaincodeStateDelta(chaincodeID string) *ChaincodeStateDelta { - return &ChaincodeStateDelta{chaincodeID, make(map[string]*UpdatedValue)} -} - -func (chaincodeStateDelta *ChaincodeStateDelta) get(key string) *UpdatedValue { - // TODO Cache? - return chaincodeStateDelta.UpdatedKVs[key] -} - -func (chaincodeStateDelta *ChaincodeStateDelta) set(key string, updatedValue, previousValue []byte) { - updatedKV, ok := chaincodeStateDelta.UpdatedKVs[key] - if ok { - // Key already exists, just set the updated value - updatedKV.Value = updatedValue - } else { - // New key. Create a new entry in the map - chaincodeStateDelta.UpdatedKVs[key] = &UpdatedValue{updatedValue, previousValue} - } -} - -func (chaincodeStateDelta *ChaincodeStateDelta) remove(key string, previousValue []byte) { - updatedKV, ok := chaincodeStateDelta.UpdatedKVs[key] - if ok { - // Key already exists, just set the value - updatedKV.Value = nil - } else { - // New key. Create a new entry in the map - chaincodeStateDelta.UpdatedKVs[key] = &UpdatedValue{nil, previousValue} - } -} - -func (chaincodeStateDelta *ChaincodeStateDelta) hasChanges() bool { - return len(chaincodeStateDelta.UpdatedKVs) > 0 -} - -func (chaincodeStateDelta *ChaincodeStateDelta) getSortedKeys() []string { - updatedKeys := []string{} - for k := range chaincodeStateDelta.UpdatedKVs { - updatedKeys = append(updatedKeys, k) - } - sort.Strings(updatedKeys) - logger.Debugf("Sorted keys = %#v", updatedKeys) - return updatedKeys -} - -// UpdatedValue holds the value for a key -type UpdatedValue struct { - Value []byte - PreviousValue []byte -} - -// IsDeleted checks whether the key was deleted -func (updatedValue *UpdatedValue) IsDeleted() bool { - return updatedValue.Value == nil -} - -// GetValue returns the value -func (updatedValue *UpdatedValue) GetValue() []byte { - return updatedValue.Value -} - -// GetPreviousValue returns the previous value -func (updatedValue *UpdatedValue) GetPreviousValue() []byte { - return updatedValue.PreviousValue -} - -// marshalling / Unmarshalling code -// We need to revisit the following when we define proto messages -// for state related structures for transporting. May be we can -// completely get rid of custom marshalling / Unmarshalling of a state delta - -// Marshal serializes the StateDelta -func (stateDelta *StateDelta) Marshal() (b []byte) { - buffer := proto.NewBuffer([]byte{}) - err := buffer.EncodeVarint(uint64(len(stateDelta.ChaincodeStateDeltas))) - if err != nil { - // in protobuf code the error return is always nil - panic(fmt.Errorf("This error should not occure: %s", err)) - } - for chaincodeID, chaincodeStateDelta := range stateDelta.ChaincodeStateDeltas { - buffer.EncodeStringBytes(chaincodeID) - chaincodeStateDelta.marshal(buffer) - } - b = buffer.Bytes() - return -} - -func (chaincodeStateDelta *ChaincodeStateDelta) marshal(buffer *proto.Buffer) { - err := buffer.EncodeVarint(uint64(len(chaincodeStateDelta.UpdatedKVs))) - if err != nil { - panic(fmt.Errorf("This error should not occur: %s", err)) - } - for key, valueHolder := range chaincodeStateDelta.UpdatedKVs { - err = buffer.EncodeStringBytes(key) - if err != nil { - panic(fmt.Errorf("This error should not occur: %s", err)) - } - chaincodeStateDelta.marshalValueWithMarker(buffer, valueHolder.Value) - chaincodeStateDelta.marshalValueWithMarker(buffer, valueHolder.PreviousValue) - } - return -} - -func (chaincodeStateDelta *ChaincodeStateDelta) marshalValueWithMarker(buffer *proto.Buffer, value []byte) { - if value == nil { - // Just add a marker that the value is nil - err := buffer.EncodeVarint(uint64(0)) - if err != nil { - panic(fmt.Errorf("This error should not occur: %s", err)) - } - return - } - err := buffer.EncodeVarint(uint64(1)) - if err != nil { - panic(fmt.Errorf("This error should not occur: %s", err)) - } - // If the value happen to be an empty byte array, it would appear as a nil during - // deserialization - see method 'unmarshalValueWithMarker' - err = buffer.EncodeRawBytes(value) - if err != nil { - panic(fmt.Errorf("This error should not occur: %s", err)) - } -} - -// Unmarshal deserializes StateDelta -func (stateDelta *StateDelta) Unmarshal(bytes []byte) error { - buffer := proto.NewBuffer(bytes) - size, err := buffer.DecodeVarint() - if err != nil { - return fmt.Errorf("Error unmarashaling size: %s", err) - } - stateDelta.ChaincodeStateDeltas = make(map[string]*ChaincodeStateDelta, size) - for i := uint64(0); i < size; i++ { - chaincodeID, err := buffer.DecodeStringBytes() - if err != nil { - return fmt.Errorf("Error unmarshaling chaincodeID : %s", err) - } - chaincodeStateDelta := newChaincodeStateDelta(chaincodeID) - err = chaincodeStateDelta.unmarshal(buffer) - if err != nil { - return fmt.Errorf("Error unmarshalling chaincodeStateDelta : %s", err) - } - stateDelta.ChaincodeStateDeltas[chaincodeID] = chaincodeStateDelta - } - - return nil -} - -func (chaincodeStateDelta *ChaincodeStateDelta) unmarshal(buffer *proto.Buffer) error { - size, err := buffer.DecodeVarint() - if err != nil { - return fmt.Errorf("Error unmarshaling state delta: %s", err) - } - chaincodeStateDelta.UpdatedKVs = make(map[string]*UpdatedValue, size) - for i := uint64(0); i < size; i++ { - key, err := buffer.DecodeStringBytes() - if err != nil { - return fmt.Errorf("Error unmarshaling state delta : %s", err) - } - value, err := chaincodeStateDelta.unmarshalValueWithMarker(buffer) - if err != nil { - return fmt.Errorf("Error unmarshaling state delta : %s", err) - } - previousValue, err := chaincodeStateDelta.unmarshalValueWithMarker(buffer) - if err != nil { - return fmt.Errorf("Error unmarshaling state delta : %s", err) - } - chaincodeStateDelta.UpdatedKVs[key] = &UpdatedValue{value, previousValue} - } - return nil -} - -func (chaincodeStateDelta *ChaincodeStateDelta) unmarshalValueWithMarker(buffer *proto.Buffer) ([]byte, error) { - valueMarker, err := buffer.DecodeVarint() - if err != nil { - return nil, fmt.Errorf("Error unmarshaling state delta : %s", err) - } - if valueMarker == 0 { - return nil, nil - } - value, err := buffer.DecodeRawBytes(false) - if err != nil { - return nil, fmt.Errorf("Error unmarhsaling state delta : %s", err) - } - // protobuff makes an empty []byte into a nil. So, assigning an empty byte array explicitly - if value == nil { - value = []byte{} - } - return value, nil -} diff --git a/core/ledger/statemgmt/state_delta_iterator.go b/core/ledger/statemgmt/state_delta_iterator.go deleted file mode 100644 index e6159f18baf..00000000000 --- a/core/ledger/statemgmt/state_delta_iterator.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package statemgmt - -// StateDeltaIterator - An iterator implementation over state-delta -type StateDeltaIterator struct { - updates map[string]*UpdatedValue - relevantKeys []string - currentKeyIndex int - done bool -} - -// NewStateDeltaRangeScanIterator - return an iterator for performing a range scan over a state-delta object -func NewStateDeltaRangeScanIterator(delta *StateDelta, chaincodeID string, startKey string, endKey string) *StateDeltaIterator { - updates := delta.GetUpdates(chaincodeID) - return &StateDeltaIterator{updates, retrieveRelevantKeys(updates, startKey, endKey), -1, false} -} - -func retrieveRelevantKeys(updates map[string]*UpdatedValue, startKey string, endKey string) []string { - relevantKeys := []string{} - if updates == nil { - return relevantKeys - } - for k, v := range updates { - if k >= startKey && (endKey == "" || k <= endKey) && !v.IsDeleted() { - relevantKeys = append(relevantKeys, k) - } - } - return relevantKeys -} - -// Next - see interface 'RangeScanIterator' for details -func (itr *StateDeltaIterator) Next() bool { - itr.currentKeyIndex++ - if itr.currentKeyIndex < len(itr.relevantKeys) { - return true - } - itr.currentKeyIndex-- - itr.done = true - return false -} - -// GetKeyValue - see interface 'RangeScanIterator' for details -func (itr *StateDeltaIterator) GetKeyValue() (string, []byte) { - if itr.done { - logger.Warning("Iterator used after it has been exhausted. Last retrieved value will be returned") - } - key := itr.relevantKeys[itr.currentKeyIndex] - value := itr.updates[key].GetValue() - return key, value -} - -// Close - see interface 'RangeScanIterator' for details -func (itr *StateDeltaIterator) Close() { -} - -// ContainsKey - checks wether the given key is present in the state-delta -func (itr *StateDeltaIterator) ContainsKey(key string) bool { - _, ok := itr.updates[key] - return ok -} diff --git a/core/ledger/statemgmt/test_exports.go b/core/ledger/statemgmt/test_exports.go deleted file mode 100644 index 46951ebcdc3..00000000000 --- a/core/ledger/statemgmt/test_exports.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package statemgmt - -import ( - "fmt" - "math/rand" - "strconv" - "testing" - "time" - - "github.com/hyperledger/fabric/core/ledger/testutil" -) - -// AssertIteratorContains - tests wether the iterator (itr) contains expected results (provided in map) -func AssertIteratorContains(t *testing.T, itr RangeScanIterator, expected map[string][]byte) { - count := 0 - actual := make(map[string][]byte) - for itr.Next() { - count++ - k, v := itr.GetKeyValue() - actual[k] = v - } - - t.Logf("Results from iterator: %s", actual) - testutil.AssertEquals(t, count, len(expected)) - for k, v := range expected { - testutil.AssertEquals(t, actual[k], v) - } -} - -// ConstructRandomStateDelta creates a random state delta for testing -func ConstructRandomStateDelta( - t testing.TB, - chaincodeIDPrefix string, - numChaincodes int, - maxKeySuffix int, - numKeysToInsert int, - kvSize int) *StateDelta { - delta := NewStateDelta() - s2 := rand.NewSource(time.Now().UnixNano()) - r2 := rand.New(s2) - - for i := 0; i < numKeysToInsert; i++ { - chaincodeID := chaincodeIDPrefix + "_" + strconv.Itoa(r2.Intn(numChaincodes)) - key := "key_" + strconv.Itoa(r2.Intn(maxKeySuffix)) - valueSize := kvSize - len(key) - if valueSize < 1 { - panic(fmt.Errorf("valueSize cannot be less than one. ValueSize=%d", valueSize)) - } - value := testutil.ConstructRandomBytes(t, valueSize) - delta.Set(chaincodeID, key, value, nil) - } - - for _, chaincodeDelta := range delta.ChaincodeStateDeltas { - sortedKeys := chaincodeDelta.getSortedKeys() - smallestKey := sortedKeys[0] - largestKey := sortedKeys[len(sortedKeys)-1] - t.Logf("chaincode=%s, numKeys=%d, smallestKey=%s, largestKey=%s", chaincodeDelta.ChaincodeID, len(sortedKeys), smallestKey, largestKey) - } - return delta -} diff --git a/core/ledger/statemgmt/trie/byteTrieKey.go b/core/ledger/statemgmt/trie/byteTrieKey.go deleted file mode 100644 index e004a34aa7d..00000000000 --- a/core/ledger/statemgmt/trie/byteTrieKey.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package trie - -import ( - "encoding/binary" - "fmt" - "math" -) - -var numBytesAtEachLevel = 1 - -type byteTrieKeyEncoder struct { -} - -func newByteTrieKeyEncoder() trieKeyEncoder { - return &byteTrieKeyEncoder{} -} - -func (encoder *byteTrieKeyEncoder) newTrieKey(originalBytes []byte) trieKeyInterface { - len := len(originalBytes) - remainingBytes := len % numBytesAtEachLevel - bytesToAppend := 0 - if remainingBytes != 0 { - bytesToAppend = numBytesAtEachLevel - remainingBytes - } - for i := 0; i < bytesToAppend; i++ { - originalBytes = append(originalBytes, byte(0)) - } - return byteTrieKey(originalBytes) -} - -func (encoder *byteTrieKeyEncoder) decodeTrieKeyBytes(encodedBytes []byte) []byte { - return encodedBytes -} - -func (encoder *byteTrieKeyEncoder) getMaxTrieWidth() int { - return int(math.Pow(2, float64(8*numBytesAtEachLevel))) -} - -type byteTrieKey string - -func (key byteTrieKey) getLevel() int { - return len(key) / numBytesAtEachLevel -} - -func (key byteTrieKey) getParentTrieKey() trieKeyInterface { - if key.isRootKey() { - panic(fmt.Errorf("Parent for Trie root shoould not be asked for")) - } - return key[:len(key)-numBytesAtEachLevel] -} - -func (key byteTrieKey) getIndexInParent() int { - if key.isRootKey() { - panic(fmt.Errorf("Parent for Trie root should not be asked for")) - } - indexBytes := []byte{} - for i := 0; i < 8-numBytesAtEachLevel; i++ { - indexBytes = append(indexBytes, byte(0)) - } - indexBytes = append(indexBytes, []byte(key[len(key)-numBytesAtEachLevel:])...) - return int(binary.BigEndian.Uint64(indexBytes)) -} - -func (key byteTrieKey) getEncodedBytes() []byte { - return []byte(key) -} - -func (key byteTrieKey) isRootKey() bool { - return len(key) == 0 -} diff --git a/core/ledger/statemgmt/trie/hexTrieKey.go b/core/ledger/statemgmt/trie/hexTrieKey.go deleted file mode 100644 index af7d2760421..00000000000 --- a/core/ledger/statemgmt/trie/hexTrieKey.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package trie - -import ( - "encoding/hex" - "fmt" -) - -var charIndexMap = map[hexTrieKey]int{ - "0": 0, - "1": 1, - "2": 2, - "3": 3, - "4": 4, - "5": 5, - "6": 6, - "7": 7, - "8": 8, - "9": 9, - "a": 10, - "b": 11, - "c": 12, - "d": 13, - "e": 14, - "f": 15, -} - -type hexTrieKeyEncoder struct { -} - -func newHexTrieKeyEncoder() trieKeyEncoder { - return &hexTrieKeyEncoder{} -} - -func (encoder *hexTrieKeyEncoder) newTrieKey(originalBytes []byte) trieKeyInterface { - return hexTrieKey(hex.EncodeToString(originalBytes)) -} - -func (encoder *hexTrieKeyEncoder) decodeTrieKeyBytes(encodedBytes []byte) []byte { - originalBytes, err := hex.DecodeString(string(encodedBytes)) - if err != nil { - panic(fmt.Errorf("Invalid input: input bytes=[%x], error:%s", encodedBytes, err)) - } - return originalBytes -} - -func (encoder *hexTrieKeyEncoder) getMaxTrieWidth() int { - return len(charIndexMap) -} - -type hexTrieKey string - -func (key hexTrieKey) getLevel() int { - return len(key) -} - -func (key hexTrieKey) getParentTrieKey() trieKeyInterface { - if key.isRootKey() { - panic(fmt.Errorf("Parent for Trie root shoould not be asked for")) - } - return key[:len(key)-1] -} - -func (key hexTrieKey) getIndexInParent() int { - if key.isRootKey() { - panic(fmt.Errorf("Parent for Trie root shoould not be asked for")) - } - return charIndexMap[key[len(key)-1:]] -} - -func (key hexTrieKey) getEncodedBytes() []byte { - return []byte(key) -} - -func (key hexTrieKey) isRootKey() bool { - return len(key) == 0 -} diff --git a/core/ledger/statemgmt/trie/range_scan_iterator.go b/core/ledger/statemgmt/trie/range_scan_iterator.go deleted file mode 100644 index 1fb4a91db8a..00000000000 --- a/core/ledger/statemgmt/trie/range_scan_iterator.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package trie - -import ( - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/tecbot/gorocksdb" -) - -// RangeScanIterator implements the interface 'statemgmt.RangeScanIterator' -type RangeScanIterator struct { - dbItr *gorocksdb.Iterator - chaincodeID string - endKey string - currentKey string - currentValue []byte - done bool -} - -func newRangeScanIterator(chaincodeID string, startKey string, endKey string) (*RangeScanIterator, error) { - dbItr := db.GetDBHandle().GetStateCFIterator() - encodedStartKey := newTrieKey(chaincodeID, startKey).getEncodedBytes() - dbItr.Seek(encodedStartKey) - return &RangeScanIterator{dbItr, chaincodeID, endKey, "", nil, false}, nil -} - -// Next - see interface 'statemgmt.RangeScanIterator' for details -func (itr *RangeScanIterator) Next() bool { - if itr.done { - return false - } - for ; itr.dbItr.Valid(); itr.dbItr.Next() { - - // making a copy of key-value bytes because, underlying key bytes are reused by itr. - // no need to free slices as iterator frees memory when closed. - trieKeyBytes := statemgmt.Copy(itr.dbItr.Key().Data()) - trieNodeBytes := statemgmt.Copy(itr.dbItr.Value().Data()) - value := unmarshalTrieNodeValue(trieNodeBytes) - if value == nil { - continue - } - - // found an actual key - currentCompositeKey := trieKeyEncoderImpl.decodeTrieKeyBytes(statemgmt.Copy(trieKeyBytes)) - currentChaincodeID, currentKey := statemgmt.DecodeCompositeKey(currentCompositeKey) - if currentChaincodeID == itr.chaincodeID && (itr.endKey == "" || currentKey <= itr.endKey) { - itr.currentKey = currentKey - itr.currentValue = value - itr.dbItr.Next() - return true - } - - // retrieved all the keys in the given range - break - } - itr.done = true - return false -} - -// GetKeyValue - see interface 'statemgmt.RangeScanIterator' for details -func (itr *RangeScanIterator) GetKeyValue() (string, []byte) { - return itr.currentKey, itr.currentValue -} - -// Close - see interface 'statemgmt.RangeScanIterator' for details -func (itr *RangeScanIterator) Close() { - itr.dbItr.Close() -} diff --git a/core/ledger/statemgmt/trie/snapshot_iterator.go b/core/ledger/statemgmt/trie/snapshot_iterator.go deleted file mode 100644 index b9f182f7070..00000000000 --- a/core/ledger/statemgmt/trie/snapshot_iterator.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package trie - -import ( - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/tecbot/gorocksdb" -) - -// StateSnapshotIterator implements the interface 'statemgmt.StateSnapshotIterator' -type StateSnapshotIterator struct { - dbItr *gorocksdb.Iterator - currentKey []byte - currentValue []byte -} - -func newStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (*StateSnapshotIterator, error) { - dbItr := db.GetDBHandle().GetStateCFSnapshotIterator(snapshot) - dbItr.SeekToFirst() - // skip the root key, because, the value test in Next method is misleading for root key as the value field - dbItr.Next() - return &StateSnapshotIterator{dbItr, nil, nil}, nil -} - -// Next - see interface 'statemgmt.StateSnapshotIterator' for details -func (snapshotItr *StateSnapshotIterator) Next() bool { - var available bool - for ; snapshotItr.dbItr.Valid(); snapshotItr.dbItr.Next() { - - // making a copy of key-value bytes because, underlying key bytes are reused by itr. - // no need to free slices as iterator frees memory when closed. - trieKeyBytes := statemgmt.Copy(snapshotItr.dbItr.Key().Data()) - trieNodeBytes := statemgmt.Copy(snapshotItr.dbItr.Value().Data()) - value := unmarshalTrieNodeValue(trieNodeBytes) - if value != nil { - snapshotItr.currentKey = trieKeyEncoderImpl.decodeTrieKeyBytes(statemgmt.Copy(trieKeyBytes)) - snapshotItr.currentValue = value - available = true - snapshotItr.dbItr.Next() - break - } - } - return available -} - -// GetRawKeyValue - see interface 'statemgmt.StateSnapshotIterator' for details -func (snapshotItr *StateSnapshotIterator) GetRawKeyValue() ([]byte, []byte) { - return snapshotItr.currentKey, snapshotItr.currentValue -} - -// Close - see interface 'statemgmt.StateSnapshotIterator' for details -func (snapshotItr *StateSnapshotIterator) Close() { - snapshotItr.dbItr.Close() -} diff --git a/core/ledger/statemgmt/trie/state_trie.go b/core/ledger/statemgmt/trie/state_trie.go deleted file mode 100644 index e73d293952b..00000000000 --- a/core/ledger/statemgmt/trie/state_trie.go +++ /dev/null @@ -1,190 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package trie - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/db" - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/op/go-logging" - "github.com/tecbot/gorocksdb" -) - -var stateTrieLogger = logging.MustGetLogger("stateTrie") -var logHashOfEveryNode = false - -// StateTrie defines the trie for the state, a merkle tree where keys -// and values are stored for fast hash computation. -type StateTrie struct { - trieDelta *trieDelta - persistedStateHash []byte - lastComputedCryptoHash []byte - recomputeCryptoHash bool -} - -// NewStateImpl contructs a new empty StateTrie -func NewStateImpl() *StateTrie { - return &StateTrie{} -} - -// Initialize the state trie with the root key -func (stateTrie *StateTrie) Initialize(configs map[string]interface{}) error { - rootNode, err := fetchTrieNodeFromDB(rootTrieKey) - if err != nil { - panic(fmt.Errorf("Error in fetching root node from DB while initializing state trie: %s", err)) - } - if rootNode != nil { - stateTrie.persistedStateHash = rootNode.computeCryptoHash() - stateTrie.lastComputedCryptoHash = stateTrie.persistedStateHash - } - return nil -} - -// Get the value for a given chaincode ID and key -func (stateTrie *StateTrie) Get(chaincodeID string, key string) ([]byte, error) { - trieNode, err := fetchTrieNodeFromDB(newTrieKey(chaincodeID, key)) - if err != nil { - return nil, err - } - if trieNode == nil { - return nil, nil - } - return trieNode.value, nil -} - -// PrepareWorkingSet creates the start of a new delta -func (stateTrie *StateTrie) PrepareWorkingSet(stateDelta *statemgmt.StateDelta) error { - stateTrie.trieDelta = newTrieDelta(stateDelta) - stateTrie.recomputeCryptoHash = true - return nil -} - -// ClearWorkingSet clears the existing delta -func (stateTrie *StateTrie) ClearWorkingSet(changesPersisted bool) { - stateTrie.trieDelta = nil - stateTrie.recomputeCryptoHash = false - - if changesPersisted { - stateTrie.persistedStateHash = stateTrie.lastComputedCryptoHash - } else { - stateTrie.lastComputedCryptoHash = stateTrie.persistedStateHash - } -} - -// ComputeCryptoHash returns the hash of the current state trie -func (stateTrie *StateTrie) ComputeCryptoHash() ([]byte, error) { - stateTrieLogger.Debug("Enter - ComputeCryptoHash()") - if !stateTrie.recomputeCryptoHash { - stateTrieLogger.Debug("No change since last time crypto-hash was computed. Returning result from last computation") - return stateTrie.lastComputedCryptoHash, nil - } - lowestLevel := stateTrie.trieDelta.getLowestLevel() - stateTrieLogger.Debugf("Lowest level in trieDelta = [%d]", lowestLevel) - for level := lowestLevel; level > 0; level-- { - changedNodes := stateTrie.trieDelta.deltaMap[level] - for _, changedNode := range changedNodes { - err := stateTrie.processChangedNode(changedNode) - if err != nil { - return nil, err - } - } - } - trieRootNode := stateTrie.trieDelta.getTrieRootNode() - if trieRootNode == nil { - return stateTrie.lastComputedCryptoHash, nil - } - stateTrie.lastComputedCryptoHash = trieRootNode.computeCryptoHash() - stateTrie.recomputeCryptoHash = false - hash := stateTrie.lastComputedCryptoHash - stateTrieLogger.Debug("Exit - ComputeCryptoHash()") - return hash, nil -} - -func (stateTrie *StateTrie) processChangedNode(changedNode *trieNode) error { - stateTrieLogger.Debugf("Enter - processChangedNode() for node [%s]", changedNode) - dbNode, err := fetchTrieNodeFromDB(changedNode.trieKey) - if err != nil { - return err - } - if dbNode != nil { - stateTrieLogger.Debugf("processChangedNode() - merging attributes from db node [%s]", dbNode) - changedNode.mergeMissingAttributesFrom(dbNode) - } - newCryptoHash := changedNode.computeCryptoHash() - parentNode := stateTrie.trieDelta.getParentOf(changedNode) - if parentNode == nil { - parentNode = newTrieNode(changedNode.getParentTrieKey(), nil, false) - stateTrie.trieDelta.addTrieNode(parentNode) - } - parentNode.setChildCryptoHash(changedNode.getIndexInParent(), newCryptoHash) - if logHashOfEveryNode { - stateTrieLogger.Debugf("Hash for changedNode[%s]", changedNode) - stateTrieLogger.Debugf("%#v", newCryptoHash) - } - stateTrieLogger.Debugf("Exit - processChangedNode() for node [%s]", changedNode) - return nil -} - -// AddChangesForPersistence commits current changes to the database -func (stateTrie *StateTrie) AddChangesForPersistence(writeBatch *gorocksdb.WriteBatch) error { - if stateTrie.recomputeCryptoHash { - _, err := stateTrie.ComputeCryptoHash() - if err != nil { - return err - } - } - - if stateTrie.trieDelta == nil { - stateTrieLogger.Info("trieDelta is nil. Not writing anything to DB") - return nil - } - - openchainDB := db.GetDBHandle() - lowestLevel := stateTrie.trieDelta.getLowestLevel() - for level := lowestLevel; level >= 0; level-- { - changedNodes := stateTrie.trieDelta.deltaMap[level] - for _, changedNode := range changedNodes { - if changedNode.markedForDeletion { - writeBatch.DeleteCF(openchainDB.StateCF, changedNode.trieKey.getEncodedBytes()) - continue - } - serializedContent, err := changedNode.marshal() - if err != nil { - return err - } - writeBatch.PutCF(openchainDB.StateCF, changedNode.trieKey.getEncodedBytes(), serializedContent) - } - } - stateTrieLogger.Debug("Added changes to DB") - return nil -} - -// PerfHintKeyChanged is currently a no-op. Can perform pre-fetching of relevant data from db here. -func (stateTrie *StateTrie) PerfHintKeyChanged(chaincodeID string, key string) { - // nothing for now. Can perform pre-fetching of relevant data from db here. -} - -// GetStateSnapshotIterator - method implementation for interface 'statemgmt.HashableState' -func (stateTrie *StateTrie) GetStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (statemgmt.StateSnapshotIterator, error) { - return newStateSnapshotIterator(snapshot) -} - -// GetRangeScanIterator returns an iterator for performing a range scan between the start and end keys -func (stateTrie *StateTrie) GetRangeScanIterator(chaincodeID string, startKey string, endKey string) (statemgmt.RangeScanIterator, error) { - return newRangeScanIterator(chaincodeID, startKey, endKey) -} diff --git a/core/ledger/statemgmt/trie/test.yaml b/core/ledger/statemgmt/trie/test.yaml deleted file mode 100644 index 8e91abd9353..00000000000 --- a/core/ledger/statemgmt/trie/test.yaml +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################### -# -# Peer section -# -############################################################################### -peer: - # Path on the file system where peer will store data - fileSystemPath: /var/hyperledger/test/ledger/statemgmt/trie/testdb diff --git a/core/ledger/statemgmt/trie/trie_db_helper.go b/core/ledger/statemgmt/trie/trie_db_helper.go deleted file mode 100644 index 9564ad45a2e..00000000000 --- a/core/ledger/statemgmt/trie/trie_db_helper.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package trie - -import "github.com/hyperledger/fabric/core/db" - -func fetchTrieNodeFromDB(key *trieKey) (*trieNode, error) { - stateTrieLogger.Debugf("Enter fetchTrieNodeFromDB() for trieKey [%s]", key) - openchainDB := db.GetDBHandle() - trieNodeBytes, err := openchainDB.GetFromStateCF(key.getEncodedBytes()) - if err != nil { - stateTrieLogger.Errorf("Error in retrieving trie node from DB for triekey [%s]. Error:%s", key, err) - return nil, err - } - - if trieNodeBytes == nil { - return nil, nil - } - - trieNode, err := unmarshalTrieNode(key, trieNodeBytes) - if err != nil { - stateTrieLogger.Errorf("Error in unmarshalling trie node for triekey [%s]. Error:%s", key, err) - return nil, err - } - stateTrieLogger.Debugf("Exit fetchTrieNodeFromDB() for trieKey [%s]", key) - return trieNode, nil -} diff --git a/core/ledger/statemgmt/trie/trie_delta.go b/core/ledger/statemgmt/trie/trie_delta.go deleted file mode 100644 index fe78a1fdec9..00000000000 --- a/core/ledger/statemgmt/trie/trie_delta.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package trie - -import ( - "github.com/hyperledger/fabric/core/ledger/statemgmt" -) - -type levelDeltaMap map[string]*trieNode - -type trieDelta struct { - lowestLevel int - deltaMap map[int]levelDeltaMap -} - -func newLevelDeltaMap() levelDeltaMap { - return levelDeltaMap(make(map[string]*trieNode)) -} - -func newTrieDelta(stateDelta *statemgmt.StateDelta) *trieDelta { - trieDelta := &trieDelta{0, make(map[int]levelDeltaMap)} - chaincodes := stateDelta.GetUpdatedChaincodeIds(false) - for _, chaincodeID := range chaincodes { - updates := stateDelta.GetUpdates(chaincodeID) - for key, updatedvalue := range updates { - if updatedvalue.IsDeleted() { - trieDelta.delete(chaincodeID, key) - } else { - if stateDelta.RollBackwards { - trieDelta.set(chaincodeID, key, updatedvalue.GetPreviousValue()) - } else { - trieDelta.set(chaincodeID, key, updatedvalue.GetValue()) - } - } - } - } - return trieDelta -} - -func (trieDelta *trieDelta) getLowestLevel() int { - return trieDelta.lowestLevel -} - -func (trieDelta *trieDelta) getChangesAtLevel(level int) []*trieNode { - levelDelta := trieDelta.deltaMap[level] - changedNodes := make([]*trieNode, len(levelDelta)) - for _, v := range levelDelta { - changedNodes = append(changedNodes, v) - } - return changedNodes -} - -func (trieDelta *trieDelta) getParentOf(trieNode *trieNode) *trieNode { - parentLevel := trieNode.getParentLevel() - parentTrieKey := trieNode.getParentTrieKey() - levelDeltaMap := trieDelta.deltaMap[parentLevel] - if levelDeltaMap == nil { - return nil - } - return levelDeltaMap[parentTrieKey.getEncodedBytesAsStr()] -} - -func (trieDelta *trieDelta) addTrieNode(trieNode *trieNode) { - level := trieNode.getLevel() - levelDeltaMap := trieDelta.deltaMap[level] - if levelDeltaMap == nil { - levelDeltaMap = newLevelDeltaMap() - trieDelta.deltaMap[level] = levelDeltaMap - } - levelDeltaMap[trieNode.trieKey.getEncodedBytesAsStr()] = trieNode - if level > trieDelta.lowestLevel { - trieDelta.lowestLevel = level - } -} - -func (trieDelta *trieDelta) getTrieRootNode() *trieNode { - levelZeroMap := trieDelta.deltaMap[0] - if levelZeroMap == nil { - return nil - } - return levelZeroMap[rootTrieKeyStr] -} - -func (trieDelta *trieDelta) set(chaincodeId string, key string, value []byte) { - trieNode := newTrieNode(newTrieKey(chaincodeId, key), value, true) - trieDelta.addTrieNode(trieNode) -} - -func (trieDelta *trieDelta) delete(chaincodeId string, key string) { - trieDelta.set(chaincodeId, key, nil) -} diff --git a/core/ledger/statemgmt/trie/trie_key.go b/core/ledger/statemgmt/trie/trie_key.go deleted file mode 100644 index a57015a6b61..00000000000 --- a/core/ledger/statemgmt/trie/trie_key.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package trie - -import ( - "bytes" - "fmt" - - "github.com/hyperledger/fabric/core/ledger/statemgmt" -) - -type trieKeyEncoder interface { - newTrieKey(originalBytes []byte) trieKeyInterface - getMaxTrieWidth() int - decodeTrieKeyBytes(encodedBytes []byte) (originalBytes []byte) -} - -type trieKeyInterface interface { - getLevel() int - getParentTrieKey() trieKeyInterface - getIndexInParent() int - getEncodedBytes() []byte -} - -var trieKeyEncoderImpl trieKeyEncoder = newByteTrieKeyEncoder() -var rootTrieKeyBytes = []byte{} -var rootTrieKeyStr = string(rootTrieKeyBytes) -var rootTrieKey = newTrieKeyFromCompositeKey(rootTrieKeyBytes) - -type trieKey struct { - trieKeyImpl trieKeyInterface -} - -func newTrieKey(chaincodeID string, key string) *trieKey { - compositeKey := statemgmt.ConstructCompositeKey(chaincodeID, key) - return newTrieKeyFromCompositeKey(compositeKey) -} - -func newTrieKeyFromCompositeKey(compositeKey []byte) *trieKey { - return &trieKey{trieKeyEncoderImpl.newTrieKey(compositeKey)} -} - -func decodeTrieKeyBytes(encodedBytes []byte) []byte { - return trieKeyEncoderImpl.decodeTrieKeyBytes(encodedBytes) -} - -func (key *trieKey) getEncodedBytes() []byte { - return key.trieKeyImpl.getEncodedBytes() -} - -func (key *trieKey) getLevel() int { - return key.trieKeyImpl.getLevel() -} - -func (key *trieKey) getIndexInParent() int { - if key.isRootKey() { - panic(fmt.Errorf("Parent for Trie root shoould not be asked for")) - } - return key.trieKeyImpl.getIndexInParent() -} - -func (key *trieKey) getParentTrieKey() *trieKey { - if key.isRootKey() { - panic(fmt.Errorf("Parent for Trie root shoould not be asked for")) - } - return &trieKey{key.trieKeyImpl.getParentTrieKey()} -} - -func (key *trieKey) getEncodedBytesAsStr() string { - return string(key.trieKeyImpl.getEncodedBytes()) -} - -func (key *trieKey) isRootKey() bool { - return len(key.getEncodedBytes()) == 0 -} - -func (key *trieKey) getParentLevel() int { - if key.isRootKey() { - panic(fmt.Errorf("Parent for Trie root shoould not be asked for")) - } - return key.getLevel() - 1 -} - -func (key *trieKey) assertIsChildOf(parentTrieKey *trieKey) { - if !bytes.Equal(key.getParentTrieKey().getEncodedBytes(), parentTrieKey.getEncodedBytes()) { - panic(fmt.Errorf("trie key [%s] is not a child of trie key [%s]", key, parentTrieKey)) - } -} diff --git a/core/ledger/statemgmt/trie/trie_node.go b/core/ledger/statemgmt/trie/trie_node.go deleted file mode 100644 index 268fba4657e..00000000000 --- a/core/ledger/statemgmt/trie/trie_node.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package trie - -import ( - "fmt" - "sort" - - "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/util" -) - -type trieNode struct { - trieKey *trieKey - value []byte - childrenCryptoHashes map[int][]byte - - valueUpdated bool - childrenCryptoHashesUpdated map[int]bool - markedForDeletion bool -} - -func newTrieNode(key *trieKey, value []byte, updated bool) *trieNode { - return &trieNode{ - trieKey: key, - value: value, - childrenCryptoHashes: make(map[int][]byte), - - valueUpdated: updated, - childrenCryptoHashesUpdated: make(map[int]bool), - } -} - -func (trieNode *trieNode) getLevel() int { - return trieNode.trieKey.getLevel() -} - -func (trieNode *trieNode) isRootNode() bool { - return trieNode.trieKey.isRootKey() -} - -func (trieNode *trieNode) setChildCryptoHash(index int, childCryptoHash []byte) { - if index >= trieKeyEncoderImpl.getMaxTrieWidth() { - panic(fmt.Errorf("Index for child crypto-hash cannot be greater than [%d]. Tried to access index value [%d]", trieKeyEncoderImpl.getMaxTrieWidth(), index)) - } - if childCryptoHash != nil { - trieNode.childrenCryptoHashes[index] = childCryptoHash - } - trieNode.childrenCryptoHashesUpdated[index] = true -} - -func (trieNode *trieNode) getParentTrieKey() *trieKey { - return trieNode.trieKey.getParentTrieKey() -} - -func (trieNode *trieNode) getParentLevel() int { - return trieNode.trieKey.getParentLevel() -} - -func (trieNode *trieNode) getIndexInParent() int { - return trieNode.trieKey.getIndexInParent() -} - -func (trieNode *trieNode) mergeMissingAttributesFrom(dbTrieNode *trieNode) { - stateTrieLogger.Debugf("Enter mergeMissingAttributesFrom() baseNode=[%s], mergeNode=[%s]", trieNode, dbTrieNode) - if !trieNode.valueUpdated { - trieNode.value = dbTrieNode.value - } - for k, v := range dbTrieNode.childrenCryptoHashes { - if !trieNode.childrenCryptoHashesUpdated[k] { - trieNode.childrenCryptoHashes[k] = v - } - } - stateTrieLogger.Debugf("Exit mergeMissingAttributesFrom() mergedNode=[%s]", trieNode) -} - -func (trieNode *trieNode) computeCryptoHash() []byte { - stateTrieLogger.Debugf("Enter computeCryptoHash() for trieNode [%s]", trieNode) - var cryptoHashContent []byte - if trieNode.containsValue() { - stateTrieLogger.Debugf("Adding value to hash computation for trieNode [%s]", trieNode) - key := trieNode.trieKey.getEncodedBytes() - cryptoHashContent = append(cryptoHashContent, proto.EncodeVarint(uint64(len(key)))...) - cryptoHashContent = append(cryptoHashContent, key...) - cryptoHashContent = append(cryptoHashContent, trieNode.value...) - } - - sortedChildrenIndexes := trieNode.getSortedChildrenIndex() - for _, index := range sortedChildrenIndexes { - childCryptoHash := trieNode.childrenCryptoHashes[index] - stateTrieLogger.Debugf("Adding hash [%#v] for child number [%d] to hash computation for trieNode [%s]", childCryptoHash, index, trieNode) - cryptoHashContent = append(cryptoHashContent, childCryptoHash...) - } - - if cryptoHashContent == nil { - // node has no associated value and no associated children. - stateTrieLogger.Debugf("Returning nil as hash for trieNode = [%s]. Also, marking this key for deletion.", trieNode) - trieNode.markedForDeletion = true - return nil - } - - if !trieNode.containsValue() && trieNode.getNumChildren() == 1 { - // node has no associated value and has a single child. Propagate the child hash up - stateTrieLogger.Debugf("Returning hash as of a single child for trieKey = [%s]", trieNode.trieKey) - return cryptoHashContent - } - - stateTrieLogger.Debugf("Recomputing hash for trieKey = [%s]", trieNode) - return util.ComputeCryptoHash(cryptoHashContent) -} - -func (trieNode *trieNode) containsValue() bool { - if trieNode.isRootNode() { - return false - } - return trieNode.value != nil -} - -func (trieNode *trieNode) marshal() ([]byte, error) { - buffer := proto.NewBuffer([]byte{}) - - // write value marker explicitly because rocksdb apis convertes a nil into an empty array and protobuf does it other-way around - var valueMarker uint64 = 0 // ignore golint warning. Dropping '= 0' makes assignment less clear - if trieNode.value != nil { - valueMarker = 1 - } - err := buffer.EncodeVarint(valueMarker) - if err != nil { - return nil, err - } - if trieNode.value != nil { - // write value - err = buffer.EncodeRawBytes(trieNode.value) - if err != nil { - return nil, err - } - } - //write number of crypto-hashes - numCryptoHashes := trieNode.getNumChildren() - err = buffer.EncodeVarint(uint64(numCryptoHashes)) - if err != nil { - return nil, err - } - - if numCryptoHashes == 0 { - return buffer.Bytes(), nil - } - - for i, cryptoHash := range trieNode.childrenCryptoHashes { - //write crypto-hash Index - err = buffer.EncodeVarint(uint64(i)) - if err != nil { - return nil, err - } - // write crypto-hash - err = buffer.EncodeRawBytes(cryptoHash) - if err != nil { - return nil, err - } - } - serializedBytes := buffer.Bytes() - stateTrieLogger.Debugf("Marshalled trieNode [%s]. Serialized bytes size = %d", trieNode.trieKey, len(serializedBytes)) - return serializedBytes, nil -} - -func unmarshalTrieNode(key *trieKey, serializedContent []byte) (*trieNode, error) { - stateTrieLogger.Debugf("key = [%s], len(serializedContent) = %d", key, len(serializedContent)) - trieNode := newTrieNode(key, nil, false) - buffer := proto.NewBuffer(serializedContent) - trieNode.value = unmarshalTrieNodeValueFromBuffer(buffer) - - numCryptoHashes, err := buffer.DecodeVarint() - stateTrieLogger.Debugf("numCryptoHashes = [%d]", numCryptoHashes) - if err != nil { - return nil, err - } - for i := uint64(0); i < numCryptoHashes; i++ { - index, err := buffer.DecodeVarint() - if err != nil { - return nil, err - } - cryptoHash, err := buffer.DecodeRawBytes(false) - if err != nil { - return nil, err - } - trieNode.childrenCryptoHashes[int(index)] = cryptoHash - } - stateTrieLogger.Debugf("unmarshalled trieNode = [%s]", trieNode) - return trieNode, nil -} - -func unmarshalTrieNodeValue(serializedContent []byte) []byte { - return unmarshalTrieNodeValueFromBuffer(proto.NewBuffer(serializedContent)) -} - -func unmarshalTrieNodeValueFromBuffer(buffer *proto.Buffer) []byte { - valueMarker, err := buffer.DecodeVarint() - if err != nil { - panic(fmt.Errorf("This error is not excpected: %s", err)) - } - if valueMarker == 0 { - return nil - } - value, err := buffer.DecodeRawBytes(false) - if err != nil { - panic(fmt.Errorf("This error is not excpected: %s", err)) - } - return value -} - -func (trieNode *trieNode) String() string { - return fmt.Sprintf("trieKey=[%s], value=[%#v], Num children hashes=[%#v]", - trieNode.trieKey, trieNode.value, trieNode.getNumChildren()) -} - -func (trieNode *trieNode) getNumChildren() int { - return len(trieNode.childrenCryptoHashes) -} - -func (trieNode *trieNode) getSortedChildrenIndex() []int { - keys := make([]int, trieNode.getNumChildren()) - i := 0 - for k := range trieNode.childrenCryptoHashes { - keys[i] = k - i++ - } - sort.Ints(keys) - return keys -} diff --git a/core/ledger/test.yaml b/core/ledger/test.yaml deleted file mode 100644 index 1e53fdd7225..00000000000 --- a/core/ledger/test.yaml +++ /dev/null @@ -1,17 +0,0 @@ -############################################################################### -# -# Peer section -# -############################################################################### -peer: - # Path on the file system where peer will store data - fileSystemPath: /var/hyperledger/test/ledger_test - -ledger: - - state: - - # Control the number state deltas that are maintained. This takes additional - # disk space, but allow the state to be rolled backwards and forwards - # without the need to replay transactions. - deltaHistorySize: 500 diff --git a/core/ledger/test/test.yaml b/core/ledger/test/test.yaml deleted file mode 100644 index 1e53fdd7225..00000000000 --- a/core/ledger/test/test.yaml +++ /dev/null @@ -1,17 +0,0 @@ -############################################################################### -# -# Peer section -# -############################################################################### -peer: - # Path on the file system where peer will store data - fileSystemPath: /var/hyperledger/test/ledger_test - -ledger: - - state: - - # Control the number state deltas that are maintained. This takes additional - # disk space, but allow the state to be rolled backwards and forwards - # without the need to replay transactions. - deltaHistorySize: 500 diff --git a/core/ledgernext/testutil/test_helper.go b/core/ledger/testutil/test_helper.go similarity index 100% rename from core/ledgernext/testutil/test_helper.go rename to core/ledger/testutil/test_helper.go diff --git a/core/ledger/testutil/test_util.go b/core/ledger/testutil/test_util.go index a9379312a71..04f1ae13c14 100644 --- a/core/ledger/testutil/test_util.go +++ b/core/ledger/testutil/test_util.go @@ -33,11 +33,13 @@ import ( "github.com/spf13/viper" ) +// TestRandomNumberGenerator a random number generator for testing type TestRandomNumberGenerator struct { rand *mathRand.Rand maxNumber int } +// NewTestRandomNumberGenerator constructs a new `TestRandomNumberGenerator` func NewTestRandomNumberGenerator(maxNumber int) *TestRandomNumberGenerator { return &TestRandomNumberGenerator{ mathRand.New(mathRand.NewSource(time.Now().UnixNano())), @@ -45,10 +47,12 @@ func NewTestRandomNumberGenerator(maxNumber int) *TestRandomNumberGenerator { } } +// Next generates next random number func (randNumGenerator *TestRandomNumberGenerator) Next() int { return randNumGenerator.rand.Intn(randNumGenerator.maxNumber) } +// SetupTestConfig sets up configurations for tetsing func SetupTestConfig() { viper.AddConfigPath(".") viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) @@ -68,10 +72,12 @@ func SetupTestConfig() { logging.SetFormatter(formatter) } +// SetLogLevel sets up log level func SetLogLevel(level logging.Level, module string) { logging.SetLevel(level, module) } +// ParseTestParams parses tests params func ParseTestParams() []string { testParams := flag.String("testParams", "", "Test specific parameters") flag.Parse() @@ -83,18 +89,21 @@ func ParseTestParams() []string { return paramsArray } +// AssertNil varifies that the value is nil func AssertNil(t testing.TB, value interface{}) { if !isNil(value) { t.Fatalf("Value not nil. value=[%#v]\n %s", value, getCallerInfo()) } } +// AssertNotNil varifies that the value is not nil func AssertNotNil(t testing.TB, value interface{}) { if isNil(value) { t.Fatalf("Values is nil. %s", getCallerInfo()) } } +// AssertSame varifies that the two values are same func AssertSame(t testing.TB, actual interface{}, expected interface{}) { t.Logf("%s: AssertSame [%#v] and [%#v]", getCallerInfo(), actual, expected) if actual != expected { @@ -102,6 +111,7 @@ func AssertSame(t testing.TB, actual interface{}, expected interface{}) { } } +// AssertEquals varifies that the two values are equal func AssertEquals(t testing.TB, actual interface{}, expected interface{}) { t.Logf("%s: AssertEquals [%#v] and [%#v]", getCallerInfo(), actual, expected) if expected == nil && isNil(actual) { @@ -112,24 +122,28 @@ func AssertEquals(t testing.TB, actual interface{}, expected interface{}) { } } +// AssertNotEquals varifies that the two values are not equal func AssertNotEquals(t testing.TB, actual interface{}, expected interface{}) { if reflect.DeepEqual(actual, expected) { t.Fatalf("Values are not supposed to be equal. Actual=[%#v], Expected=[%#v]\n %s", actual, expected, getCallerInfo()) } } +// AssertError varifies that the err is not nil func AssertError(t testing.TB, err error, message string) { if err == nil { t.Fatalf("%s\n %s", message, getCallerInfo()) } } +// AssertNoError varifies that the err is nil func AssertNoError(t testing.TB, err error, message string) { if err != nil { t.Fatalf("%s - Error: %s\n %s", message, err, getCallerInfo()) } } +// AssertContains varifies that the slice contains the value func AssertContains(t testing.TB, slice interface{}, value interface{}) { if reflect.TypeOf(slice).Kind() != reflect.Slice && reflect.TypeOf(slice).Kind() != reflect.Array { t.Fatalf("Type of argument 'slice' is expected to be a slice/array, found =[%s]\n %s", reflect.TypeOf(slice), getCallerInfo()) @@ -140,6 +154,7 @@ func AssertContains(t testing.TB, slice interface{}, value interface{}) { } } +// AssertContainsAll varifies that sliceActual is a superset of sliceExpected func AssertContainsAll(t testing.TB, sliceActual interface{}, sliceExpected interface{}) { if reflect.TypeOf(sliceActual).Kind() != reflect.Slice && reflect.TypeOf(sliceActual).Kind() != reflect.Array { t.Fatalf("Type of argument 'sliceActual' is expected to be a slice/array, found =[%s]\n %s", reflect.TypeOf(sliceActual), getCallerInfo()) @@ -158,6 +173,7 @@ func AssertContainsAll(t testing.TB, sliceActual interface{}, sliceExpected inte } } +// AssertPanic varifies that a panic is raised during a test func AssertPanic(t testing.TB, msg string) { x := recover() if x == nil { @@ -167,10 +183,12 @@ func AssertPanic(t testing.TB, msg string) { } } +// ComputeCryptoHash computes crypto hash for testing func ComputeCryptoHash(content ...[]byte) []byte { return util.ComputeCryptoHash(AppendAll(content...)) } +// AppendAll combines the bytes from different []byte into one []byte func AppendAll(content ...[]byte) []byte { combinedContent := []byte{} for _, b := range content { @@ -179,10 +197,12 @@ func AppendAll(content ...[]byte) []byte { return combinedContent } +// GenerateID generates a uuid func GenerateID(t *testing.T) string { return util.GenerateUUID() } +// ConstructRandomBytes constructs random bytes of given size func ConstructRandomBytes(t testing.TB, size int) []byte { value := make([]byte, size) _, err := rand.Read(value) diff --git a/core/ledgernext/testutil/test_util_test.go b/core/ledger/testutil/test_util_test.go similarity index 100% rename from core/ledgernext/testutil/test_util_test.go rename to core/ledger/testutil/test_util_test.go diff --git a/core/ledgernext/util/db/db.go b/core/ledger/util/db/db.go similarity index 98% rename from core/ledgernext/util/db/db.go rename to core/ledger/util/db/db.go index 16d00ae8a9b..d41c1bc854f 100644 --- a/core/ledgernext/util/db/db.go +++ b/core/ledger/util/db/db.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "github.com/hyperledger/fabric/core/ledgernext/util" + "github.com/hyperledger/fabric/core/ledger/util" "github.com/op/go-logging" "github.com/tecbot/gorocksdb" ) diff --git a/core/ledgernext/util/db/db_test.go b/core/ledger/util/db/db_test.go similarity index 96% rename from core/ledgernext/util/db/db_test.go rename to core/ledger/util/db/db_test.go index de04257773d..fc977618ef6 100644 --- a/core/ledgernext/util/db/db_test.go +++ b/core/ledger/util/db/db_test.go @@ -19,7 +19,7 @@ package db import ( "testing" - "github.com/hyperledger/fabric/core/ledgernext/testutil" + "github.com/hyperledger/fabric/core/ledger/testutil" ) func TestDBBasicWriteAndReads(t *testing.T) { diff --git a/core/ledgernext/util/ioutil.go b/core/ledger/util/ioutil.go similarity index 100% rename from core/ledgernext/util/ioutil.go rename to core/ledger/util/ioutil.go diff --git a/core/ledgernext/util/ioutil_test.go b/core/ledger/util/ioutil_test.go similarity index 98% rename from core/ledgernext/util/ioutil_test.go rename to core/ledger/util/ioutil_test.go index fee37de512d..4d9510f117a 100644 --- a/core/ledgernext/util/ioutil_test.go +++ b/core/ledger/util/ioutil_test.go @@ -21,7 +21,7 @@ import ( "os" "testing" - "github.com/hyperledger/fabric/core/ledgernext/testutil" + "github.com/hyperledger/fabric/core/ledger/testutil" ) var DbPathTest = "/tmp/v2/test/util" diff --git a/core/ledgernext/util/util_test.go b/core/ledger/util/util_test.go similarity index 100% rename from core/ledgernext/util/util_test.go rename to core/ledger/util/util_test.go diff --git a/core/ledgernext/testutil/test_util.go b/core/ledgernext/testutil/test_util.go deleted file mode 100644 index 04f1ae13c14..00000000000 --- a/core/ledgernext/testutil/test_util.go +++ /dev/null @@ -1,236 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testutil - -import ( - "crypto/rand" - "flag" - "fmt" - mathRand "math/rand" - "reflect" - "regexp" - "runtime" - "strings" - "testing" - "time" - - "github.com/hyperledger/fabric/core/util" - "github.com/op/go-logging" - "github.com/spf13/viper" -) - -// TestRandomNumberGenerator a random number generator for testing -type TestRandomNumberGenerator struct { - rand *mathRand.Rand - maxNumber int -} - -// NewTestRandomNumberGenerator constructs a new `TestRandomNumberGenerator` -func NewTestRandomNumberGenerator(maxNumber int) *TestRandomNumberGenerator { - return &TestRandomNumberGenerator{ - mathRand.New(mathRand.NewSource(time.Now().UnixNano())), - maxNumber, - } -} - -// Next generates next random number -func (randNumGenerator *TestRandomNumberGenerator) Next() int { - return randNumGenerator.rand.Intn(randNumGenerator.maxNumber) -} - -// SetupTestConfig sets up configurations for tetsing -func SetupTestConfig() { - viper.AddConfigPath(".") - viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) - viper.AutomaticEnv() - viper.SetDefault("peer.ledger.test.loadYAML", true) - loadYAML := viper.GetBool("peer.ledger.test.loadYAML") - if loadYAML { - viper.SetConfigName("test") - err := viper.ReadInConfig() - if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("Fatal error config file: %s \n", err)) - } - } - var formatter = logging.MustStringFormatter( - `%{color}%{time:15:04:05.000} [%{module}] %{shortfunc} [%{shortfile}] -> %{level:.4s} %{id:03x}%{color:reset} %{message}`, - ) - logging.SetFormatter(formatter) -} - -// SetLogLevel sets up log level -func SetLogLevel(level logging.Level, module string) { - logging.SetLevel(level, module) -} - -// ParseTestParams parses tests params -func ParseTestParams() []string { - testParams := flag.String("testParams", "", "Test specific parameters") - flag.Parse() - regex, err := regexp.Compile(",(\\s+)?") - if err != nil { - panic(fmt.Errorf("err = %s\n", err)) - } - paramsArray := regex.Split(*testParams, -1) - return paramsArray -} - -// AssertNil varifies that the value is nil -func AssertNil(t testing.TB, value interface{}) { - if !isNil(value) { - t.Fatalf("Value not nil. value=[%#v]\n %s", value, getCallerInfo()) - } -} - -// AssertNotNil varifies that the value is not nil -func AssertNotNil(t testing.TB, value interface{}) { - if isNil(value) { - t.Fatalf("Values is nil. %s", getCallerInfo()) - } -} - -// AssertSame varifies that the two values are same -func AssertSame(t testing.TB, actual interface{}, expected interface{}) { - t.Logf("%s: AssertSame [%#v] and [%#v]", getCallerInfo(), actual, expected) - if actual != expected { - t.Fatalf("Values actual=[%#v] and expected=[%#v] do not point to same object. %s", actual, expected, getCallerInfo()) - } -} - -// AssertEquals varifies that the two values are equal -func AssertEquals(t testing.TB, actual interface{}, expected interface{}) { - t.Logf("%s: AssertEquals [%#v] and [%#v]", getCallerInfo(), actual, expected) - if expected == nil && isNil(actual) { - return - } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("Values are not equal.\n Actual=[%#v], \n Expected=[%#v]\n %s", actual, expected, getCallerInfo()) - } -} - -// AssertNotEquals varifies that the two values are not equal -func AssertNotEquals(t testing.TB, actual interface{}, expected interface{}) { - if reflect.DeepEqual(actual, expected) { - t.Fatalf("Values are not supposed to be equal. Actual=[%#v], Expected=[%#v]\n %s", actual, expected, getCallerInfo()) - } -} - -// AssertError varifies that the err is not nil -func AssertError(t testing.TB, err error, message string) { - if err == nil { - t.Fatalf("%s\n %s", message, getCallerInfo()) - } -} - -// AssertNoError varifies that the err is nil -func AssertNoError(t testing.TB, err error, message string) { - if err != nil { - t.Fatalf("%s - Error: %s\n %s", message, err, getCallerInfo()) - } -} - -// AssertContains varifies that the slice contains the value -func AssertContains(t testing.TB, slice interface{}, value interface{}) { - if reflect.TypeOf(slice).Kind() != reflect.Slice && reflect.TypeOf(slice).Kind() != reflect.Array { - t.Fatalf("Type of argument 'slice' is expected to be a slice/array, found =[%s]\n %s", reflect.TypeOf(slice), getCallerInfo()) - } - - if !contains(slice, value) { - t.Fatalf("Expected value [%s] not found in slice %s\n %s", value, slice, getCallerInfo()) - } -} - -// AssertContainsAll varifies that sliceActual is a superset of sliceExpected -func AssertContainsAll(t testing.TB, sliceActual interface{}, sliceExpected interface{}) { - if reflect.TypeOf(sliceActual).Kind() != reflect.Slice && reflect.TypeOf(sliceActual).Kind() != reflect.Array { - t.Fatalf("Type of argument 'sliceActual' is expected to be a slice/array, found =[%s]\n %s", reflect.TypeOf(sliceActual), getCallerInfo()) - } - - if reflect.TypeOf(sliceExpected).Kind() != reflect.Slice && reflect.TypeOf(sliceExpected).Kind() != reflect.Array { - t.Fatalf("Type of argument 'sliceExpected' is expected to be a slice/array, found =[%s]\n %s", reflect.TypeOf(sliceExpected), getCallerInfo()) - } - - array := reflect.ValueOf(sliceExpected) - for i := 0; i < array.Len(); i++ { - element := array.Index(i).Interface() - if !contains(sliceActual, element) { - t.Fatalf("Expected value [%s] not found in slice %s\n %s", element, sliceActual, getCallerInfo()) - } - } -} - -// AssertPanic varifies that a panic is raised during a test -func AssertPanic(t testing.TB, msg string) { - x := recover() - if x == nil { - t.Fatal(msg) - } else { - t.Logf("A panic was caught successfully. Actual msg = %s", x) - } -} - -// ComputeCryptoHash computes crypto hash for testing -func ComputeCryptoHash(content ...[]byte) []byte { - return util.ComputeCryptoHash(AppendAll(content...)) -} - -// AppendAll combines the bytes from different []byte into one []byte -func AppendAll(content ...[]byte) []byte { - combinedContent := []byte{} - for _, b := range content { - combinedContent = append(combinedContent, b...) - } - return combinedContent -} - -// GenerateID generates a uuid -func GenerateID(t *testing.T) string { - return util.GenerateUUID() -} - -// ConstructRandomBytes constructs random bytes of given size -func ConstructRandomBytes(t testing.TB, size int) []byte { - value := make([]byte, size) - _, err := rand.Read(value) - if err != nil { - t.Fatalf("Error while generating random bytes: %s", err) - } - return value -} - -func contains(slice interface{}, value interface{}) bool { - array := reflect.ValueOf(slice) - for i := 0; i < array.Len(); i++ { - element := array.Index(i).Interface() - if value == element || reflect.DeepEqual(element, value) { - return true - } - } - return false -} - -func isNil(in interface{}) bool { - return in == nil || reflect.ValueOf(in).IsNil() || (reflect.TypeOf(in).Kind() == reflect.Slice && reflect.ValueOf(in).Len() == 0) -} - -func getCallerInfo() string { - _, file, line, ok := runtime.Caller(2) - if !ok { - return "Could not retrieve caller's info" - } - return fmt.Sprintf("CallerInfo = [%s:%d]", file, line) -} diff --git a/core/ledgernext/util/util.go b/core/ledgernext/util/util.go deleted file mode 100644 index fdc768a7e2f..00000000000 --- a/core/ledgernext/util/util.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "encoding/binary" - "fmt" - - "github.com/golang/protobuf/proto" -) - -// EncodeOrderPreservingVarUint64 returns a byte-representation for a uint64 number such that -// all zero-bits starting bytes are trimmed in order to reduce the length of the array -// For preserving the order in a default bytes-comparison, first byte contains the number of remaining bytes. -// The presence of first byte also allows to use the returned bytes as part of other larger byte array such as a -// composite-key representation in db -func EncodeOrderPreservingVarUint64(number uint64) []byte { - bytes := make([]byte, 8) - binary.BigEndian.PutUint64(bytes, number) - startingIndex := 0 - size := 0 - for i, b := range bytes { - if b != 0x00 { - startingIndex = i - size = 8 - i - break - } - } - sizeBytes := proto.EncodeVarint(uint64(size)) - if len(sizeBytes) > 1 { - panic(fmt.Errorf("[]sizeBytes should not be more than one byte because the max number it needs to hold is 8. size=%d", size)) - } - encodedBytes := make([]byte, size+1) - encodedBytes[0] = sizeBytes[0] - copy(encodedBytes[1:], bytes[startingIndex:]) - return encodedBytes -} - -// DecodeOrderPreservingVarUint64 decodes the number from the bytes obtained from method 'EncodeOrderPreservingVarUint64'. -// Also, returns the number of bytes that are consumed in the process -func DecodeOrderPreservingVarUint64(bytes []byte) (uint64, int) { - s, _ := proto.DecodeVarint(bytes) - size := int(s) - decodedBytes := make([]byte, 8) - copy(decodedBytes[8-size:], bytes[1:size+1]) - numBytesConsumed := size + 1 - return binary.BigEndian.Uint64(decodedBytes), numBytesConsumed -} diff --git a/core/peer/peer.go b/core/peer/peer.go index 27b0e4196b5..0d6c1b02c7c 100644 --- a/core/peer/peer.go +++ b/core/peer/peer.go @@ -37,9 +37,6 @@ import ( "github.com/hyperledger/fabric/core/crypto" "github.com/hyperledger/fabric/core/db" "github.com/hyperledger/fabric/core/discovery" - "github.com/hyperledger/fabric/core/ledger" - "github.com/hyperledger/fabric/core/ledger/statemgmt" - "github.com/hyperledger/fabric/core/ledger/statemgmt/state" "github.com/hyperledger/fabric/core/util" pb "github.com/hyperledger/fabric/protos" ) @@ -50,34 +47,6 @@ type Peer interface { NewOpenchainDiscoveryHello() (*pb.Message, error) } -// BlockChainAccessor interface for retreiving blocks by block number -type BlockChainAccessor interface { - GetBlockByNumber(blockNumber uint64) (*pb.Block, error) - GetBlockchainSize() uint64 - GetCurrentStateHash() (stateHash []byte, err error) -} - -// BlockChainModifier interface for applying changes to the block chain -type BlockChainModifier interface { - ApplyStateDelta(id interface{}, delta *statemgmt.StateDelta) error - RollbackStateDelta(id interface{}) error - CommitStateDelta(id interface{}) error - EmptyState() error - PutBlock(blockNumber uint64, block *pb.Block) error -} - -// BlockChainUtil interface for interrogating the block chain -type BlockChainUtil interface { - HashBlock(block *pb.Block) ([]byte, error) - VerifyBlockchain(start, finish uint64) (uint64, error) -} - -// StateAccessor interface for retreiving blocks by block number -type StateAccessor interface { - GetStateSnapshot() (*state.StateSnapshot, error) - GetStateDelta(blockNumber uint64) (*statemgmt.StateDelta, error) -} - // MessageHandler standard interface for handling Openchain messages. type MessageHandler interface { HandleMessage(msg *pb.Message) error @@ -90,10 +59,6 @@ type MessageHandler interface { type MessageHandlerCoordinator interface { Peer SecurityAccessor - BlockChainAccessor - BlockChainModifier - BlockChainUtil - StateAccessor RegisterHandler(messageHandler MessageHandler) error DeregisterHandler(messageHandler MessageHandler) error Broadcast(*pb.Message, pb.PeerEndpoint_Type) []error @@ -147,11 +112,6 @@ func NewPeerClientConnectionWithAddress(peerAddress string) (*grpc.ClientConn, e return comm.NewClientConnectionWithAddress(peerAddress, true, false, nil) } -type ledgerWrapper struct { - sync.RWMutex - ledger *ledger.Ledger -} - type handlerMap struct { sync.RWMutex m map[pb.PeerID]MessageHandler @@ -167,7 +127,6 @@ type EngineFactory func(MessageHandlerCoordinator) (Engine, error) type Impl struct { handlerFactory HandlerFactory handlerMap *handlerMap - ledgerWrapper *ledgerWrapper secHelper crypto.Peer engine Engine isValidator bool @@ -209,9 +168,6 @@ func NewPeerWithHandler(secHelperFunc func() crypto.Peer, handlerFact HandlerFac } } - //PDMP - no more old ledger - peer.ledgerWrapper = &ledgerWrapper{ledger: nil} - peer.chatWithSomePeers(peerNodes) return peer, nil } @@ -233,10 +189,6 @@ func NewPeerWithEngine(secHelperFunc func() crypto.Peer, engFactory EngineFactor } } - //PDMP - no more old ledger - peer.ledgerWrapper = &ledgerWrapper{ledger: nil} - - //PDMP - no more consensus factory if engFactory != nil { peer.engine, err = engFactory(peer) if err != nil { @@ -624,94 +576,6 @@ func (p *Impl) newHelloMessage() (*pb.HelloMessage, error) { return &pb.HelloMessage{PeerEndpoint: endpoint}, nil } -// GetBlockByNumber return a block by block number -func (p *Impl) GetBlockByNumber(blockNumber uint64) (*pb.Block, error) { - p.ledgerWrapper.RLock() - defer p.ledgerWrapper.RUnlock() - return p.ledgerWrapper.ledger.GetBlockByNumber(blockNumber) -} - -// GetBlockchainSize returns the height/length of the blockchain -func (p *Impl) GetBlockchainSize() uint64 { - p.ledgerWrapper.RLock() - defer p.ledgerWrapper.RUnlock() - return p.ledgerWrapper.ledger.GetBlockchainSize() -} - -// GetCurrentStateHash returns the current non-committed hash of the in memory state -func (p *Impl) GetCurrentStateHash() (stateHash []byte, err error) { - p.ledgerWrapper.RLock() - defer p.ledgerWrapper.RUnlock() - return p.ledgerWrapper.ledger.GetTempStateHash() -} - -// HashBlock returns the hash of the included block, useful for mocking -func (p *Impl) HashBlock(block *pb.Block) ([]byte, error) { - return block.GetHash() -} - -// VerifyBlockchain checks the integrity of the blockchain between indices start and finish, -// returning the first block who's PreviousBlockHash field does not match the hash of the previous block -func (p *Impl) VerifyBlockchain(start, finish uint64) (uint64, error) { - p.ledgerWrapper.RLock() - defer p.ledgerWrapper.RUnlock() - return p.ledgerWrapper.ledger.VerifyChain(start, finish) -} - -// ApplyStateDelta applies a state delta to the current state -// The result of this function can be retrieved using GetCurrentStateDelta -// To commit the result, call CommitStateDelta, or to roll it back -// call RollbackStateDelta -func (p *Impl) ApplyStateDelta(id interface{}, delta *statemgmt.StateDelta) error { - p.ledgerWrapper.Lock() - defer p.ledgerWrapper.Unlock() - return p.ledgerWrapper.ledger.ApplyStateDelta(id, delta) -} - -// CommitStateDelta makes the result of ApplyStateDelta permanent -// and releases the resources necessary to rollback the delta -func (p *Impl) CommitStateDelta(id interface{}) error { - p.ledgerWrapper.Lock() - defer p.ledgerWrapper.Unlock() - return p.ledgerWrapper.ledger.CommitStateDelta(id) -} - -// RollbackStateDelta undoes the results of ApplyStateDelta to revert -// the current state back to the state before ApplyStateDelta was invoked -func (p *Impl) RollbackStateDelta(id interface{}) error { - p.ledgerWrapper.Lock() - defer p.ledgerWrapper.Unlock() - return p.ledgerWrapper.ledger.RollbackStateDelta(id) -} - -// EmptyState completely empties the state and prepares it to restore a snapshot -func (p *Impl) EmptyState() error { - p.ledgerWrapper.Lock() - defer p.ledgerWrapper.Unlock() - return p.ledgerWrapper.ledger.DeleteALLStateKeysAndValues() -} - -// GetStateSnapshot return the state snapshot -func (p *Impl) GetStateSnapshot() (*state.StateSnapshot, error) { - p.ledgerWrapper.RLock() - defer p.ledgerWrapper.RUnlock() - return p.ledgerWrapper.ledger.GetStateSnapshot() -} - -// GetStateDelta return the state delta for the requested block number -func (p *Impl) GetStateDelta(blockNumber uint64) (*statemgmt.StateDelta, error) { - p.ledgerWrapper.RLock() - defer p.ledgerWrapper.RUnlock() - return p.ledgerWrapper.ledger.GetStateDelta(blockNumber) -} - -// PutBlock inserts a raw block into the blockchain at the specified index, nearly no error checking is performed -func (p *Impl) PutBlock(blockNumber uint64, block *pb.Block) error { - p.ledgerWrapper.Lock() - defer p.ledgerWrapper.Unlock() - return p.ledgerWrapper.ledger.PutRawBlock(block, blockNumber) -} - // NewOpenchainDiscoveryHello constructs a new HelloMessage for sending func (p *Impl) NewOpenchainDiscoveryHello() (*pb.Message, error) { helloMessage, err := p.newHelloMessage() diff --git a/core/rest/api.go b/core/rest/api.go index fbda451fe4d..e86697ee20d 100644 --- a/core/rest/api.go +++ b/core/rest/api.go @@ -22,11 +22,8 @@ import ( "golang.org/x/net/context" - "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/empty" - "github.com/hyperledger/fabric/core/ledger" pb "github.com/hyperledger/fabric/protos" - "github.com/spf13/viper" ) var ( @@ -43,22 +40,19 @@ type PeerInfo interface { // ServerOpenchain defines the Openchain server object, which holds the // Ledger data structure and the pointer to the peerServer. type ServerOpenchain struct { - ledger *ledger.Ledger peerInfo PeerInfo } // NewOpenchainServer creates a new instance of the ServerOpenchain. func NewOpenchainServer() (*ServerOpenchain, error) { - //PDMP - do not use old ledger...set it to nil and let it crash on access - s := &ServerOpenchain{ledger: nil} + s := &ServerOpenchain{} return s, nil } // NewOpenchainServerWithPeerInfo creates a new instance of the ServerOpenchain. func NewOpenchainServerWithPeerInfo(peerServer PeerInfo) (*ServerOpenchain, error) { - //PDMP - do not use old ledger...set it to nil and let it crash on access - s := &ServerOpenchain{ledger: nil, peerInfo: peerServer} + s := &ServerOpenchain{peerInfo: peerServer} return s, nil } @@ -66,90 +60,29 @@ func NewOpenchainServerWithPeerInfo(peerServer PeerInfo) (*ServerOpenchain, erro // GetBlockchainInfo returns information about the blockchain ledger such as // height, current block hash, and previous block hash. func (s *ServerOpenchain) GetBlockchainInfo(ctx context.Context, e *empty.Empty) (*pb.BlockchainInfo, error) { - blockchainInfo, err := s.ledger.GetBlockchainInfo() - if blockchainInfo.Height == 0 { - return nil, fmt.Errorf("No blocks in blockchain.") - } - return blockchainInfo, err + return nil, fmt.Errorf("GetBlockchainInfo not implemented") } // GetBlockByNumber returns the data contained within a specific block in the // blockchain. The genesis block is block zero. func (s *ServerOpenchain) GetBlockByNumber(ctx context.Context, num *pb.BlockNumber) (*pb.Block, error) { - block, err := s.ledger.GetBlockByNumber(num.Number) - if err != nil { - switch err { - case ledger.ErrOutOfBounds: - return nil, ErrNotFound - default: - return nil, fmt.Errorf("Error retrieving block from blockchain: %s", err) - } - } - - // Remove payload from deploy transactions. This is done to make rest api - // calls more lightweight as the payload for these types of transactions - // can be very large. If the payload is needed, the caller should fetch the - // individual transaction. - blockTransactions := block.GetTransactions() - for _, transaction := range blockTransactions { - if transaction.Type == pb.Transaction_CHAINCODE_DEPLOY { - deploymentSpec := &pb.ChaincodeDeploymentSpec{} - err := proto.Unmarshal(transaction.Payload, deploymentSpec) - if err != nil { - if !viper.GetBool("security.privacy") { - return nil, err - } - //if privacy is enabled, payload is encrypted and unmarshal will - //likely fail... given we were going to just set the CodePackage - //to nil anyway, just recover and continue - deploymentSpec = &pb.ChaincodeDeploymentSpec{} - } - deploymentSpec.CodePackage = nil - deploymentSpecBytes, err := proto.Marshal(deploymentSpec) - if err != nil { - return nil, err - } - transaction.Payload = deploymentSpecBytes - } - } - - return block, nil + return nil, fmt.Errorf("GetBlockByNumber not implemented") } // GetBlockCount returns the current number of blocks in the blockchain data // structure. func (s *ServerOpenchain) GetBlockCount(ctx context.Context, e *empty.Empty) (*pb.BlockCount, error) { - // Total number of blocks in the blockchain. - size := s.ledger.GetBlockchainSize() - - // Check the number of blocks in the blockchain. If the blockchain is empty, - // return error. There will always be at least one block in the blockchain, - // the genesis block. - if size > 0 { - count := &pb.BlockCount{Count: size} - return count, nil - } - - return nil, fmt.Errorf("No blocks in blockchain.") + return nil, fmt.Errorf("GetBlockCount not implemented") } // GetState returns the value for a particular chaincode ID and key func (s *ServerOpenchain) GetState(ctx context.Context, chaincodeID, key string) ([]byte, error) { - return s.ledger.GetState(chaincodeID, key, true) + return nil, fmt.Errorf("GetState not implemented") } // GetTransactionByID returns a transaction matching the specified ID func (s *ServerOpenchain) GetTransactionByID(ctx context.Context, txID string) (*pb.Transaction, error) { - transaction, err := s.ledger.GetTransactionByID(txID) - if err != nil { - switch err { - case ledger.ErrResourceNotFound: - return nil, ErrNotFound - default: - return nil, fmt.Errorf("Error retrieving transaction from blockchain: %s", err) - } - } - return transaction, nil + return nil, fmt.Errorf("GetTransactionByID not implemented") } // GetPeers returns a list of all peer nodes currently connected to the target peer. diff --git a/peer/node/node.go b/peer/node/node.go index 80fc15c4e84..5bfc08c8a3a 100755 --- a/peer/node/node.go +++ b/peer/node/node.go @@ -19,7 +19,7 @@ package node import ( "fmt" - "github.com/hyperledger/fabric/core/ledgernext/kvledger" + "github.com/hyperledger/fabric/core/ledger/kvledger" "github.com/op/go-logging" "github.com/spf13/cobra"