diff --git a/Makefile b/Makefile index 3276d5acc8f..d639f01bf2f 100644 --- a/Makefile +++ b/Makefile @@ -95,7 +95,10 @@ include docker-env.mk all: native docker checks -checks: linter license unit-test behave +checks: linter license spelling unit-test behave + +spelling: buildenv + @scripts/check_spelling.sh license: buildenv @scripts/check_license.sh diff --git a/bccsp/pkcs11/fileks.go b/bccsp/pkcs11/fileks.go index 6e5d650bedc..72c003d4a04 100644 --- a/bccsp/pkcs11/fileks.go +++ b/bccsp/pkcs11/fileks.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package pkcs11 import ( @@ -31,7 +32,7 @@ import ( "github.com/hyperledger/fabric/bccsp/utils" ) -// fileBasedKeyStore is a folder-based KeyStore. +// FileBasedKeyStore is a folder-based KeyStore. // Each key is stored in a separated file whose name contains the key's SKI // and flags to identity the key's type. All the keys are stored in // a folder whose path is provided at initialization time. diff --git a/bccsp/pkcs11/impl_test.go b/bccsp/pkcs11/impl_test.go index 3aaab770ef8..bd8bd95d7f1 100644 --- a/bccsp/pkcs11/impl_test.go +++ b/bccsp/pkcs11/impl_test.go @@ -1468,7 +1468,7 @@ func TestRSAKeyGenEphemeral(t *testing.T) { t.Fatalf("Failed generating RSA corresponding public key [%s]", err) } if pk == nil { - t.Fatal("PK must be diffrent from nil") + t.Fatal("PK must be different from nil") } b, err := k.Bytes() diff --git a/bccsp/sw/impl_test.go b/bccsp/sw/impl_test.go index 2802a9e295a..7a028f4bb3f 100644 --- a/bccsp/sw/impl_test.go +++ b/bccsp/sw/impl_test.go @@ -1374,7 +1374,7 @@ func TestRSAKeyGenEphemeral(t *testing.T) { t.Fatalf("Failed generating RSA corresponding public key [%s]", err) } if pk == nil { - t.Fatal("PK must be diffrent from nil") + t.Fatal("PK must be different from nil") } b, err := k.Bytes() diff --git a/bccsp/utils/keys.go b/bccsp/utils/keys.go index 971a5b7a5f2..4f3c95c1940 100644 --- a/bccsp/utils/keys.go +++ b/bccsp/utils/keys.go @@ -208,11 +208,11 @@ func DERToPrivateKey(der []byte) (key interface{}, err error) { // PEMtoPrivateKey unmarshals a pem to private key func PEMtoPrivateKey(raw []byte, pwd []byte) (interface{}, error) { if len(raw) == 0 { - return nil, errors.New("Invalid PEM. It must be diffrent from nil.") + return nil, errors.New("Invalid PEM. It must be different from nil.") } block, _ := pem.Decode(raw) if block == nil { - return nil, fmt.Errorf("Failed decoding PEM. Block must be diffrent from nil. [% x]", raw) + return nil, fmt.Errorf("Failed decoding PEM. Block must be different from nil. [% x]", raw) } // TODO: derive from header the type of the key @@ -244,7 +244,7 @@ func PEMtoPrivateKey(raw []byte, pwd []byte) (interface{}, error) { // PEMtoAES extracts from the PEM an AES key func PEMtoAES(raw []byte, pwd []byte) ([]byte, error) { if len(raw) == 0 { - return nil, errors.New("Invalid PEM. It must be diffrent from nil.") + return nil, errors.New("Invalid PEM. It must be different from nil.") } block, _ := pem.Decode(raw) if block == nil { @@ -404,7 +404,7 @@ func PublicKeyToEncryptedPEM(publicKey interface{}, pwd []byte) ([]byte, error) // PEMtoPublicKey unmarshals a pem to public key func PEMtoPublicKey(raw []byte, pwd []byte) (interface{}, error) { if len(raw) == 0 { - return nil, errors.New("Invalid PEM. It must be diffrent from nil.") + return nil, errors.New("Invalid PEM. It must be different from nil.") } block, _ := pem.Decode(raw) if block == nil { @@ -439,7 +439,7 @@ func PEMtoPublicKey(raw []byte, pwd []byte) (interface{}, error) { // DERToPublicKey unmarshals a der to public key func DERToPublicKey(raw []byte) (pub interface{}, err error) { if len(raw) == 0 { - return nil, errors.New("Invalid DER. It must be diffrent from nil.") + return nil, errors.New("Invalid DER. It must be different from nil.") } key, err := x509.ParsePKIXPublicKey(raw) diff --git a/bddtests/context_endorser.go b/bddtests/context_endorser.go index f25788a9ad7..a2b7bd6e5bd 100644 --- a/bddtests/context_endorser.go +++ b/bddtests/context_endorser.go @@ -316,7 +316,7 @@ func (b *BDDContext) userExpectsProposalResponsesWithStatusFromEndorsers(enrollI var userRegistration *UserRegistration var keyedProposalResponseMap KeyedProposalResponseMap errRetFunc := func() error { - return fmt.Errorf("Error verifying proposal reponse '%s' for user '%s' with expected response code of '%s': %s", proposalResponseAlias, enrollID, respStatusCode, err) + return fmt.Errorf("Error verifying proposal response '%s' for user '%s' with expected response code of '%s': %s", proposalResponseAlias, enrollID, respStatusCode, err) } if userRegistration, err = b.GetUserRegistration(enrollID); err != nil { return errRetFunc() diff --git a/bddtests/regression/go/ote/ote.go b/bddtests/regression/go/ote/ote.go index 51de7f997aa..c6ab60b9119 100644 --- a/bddtests/regression/go/ote/ote.go +++ b/bddtests/regression/go/ote/ote.go @@ -54,33 +54,34 @@ package main // + return a pass/fail result and a result summary string import ( - "fmt" - "os" - "strings" - "strconv" - "math" - "os/exec" - "log" - "time" - "sync" - genesisconfigProvisional "github.com/hyperledger/fabric/common/configtx/tool/provisional" - genesisconfig "github.com/hyperledger/fabric/common/configtx/tool/localconfig" // config for genesis.yaml - "github.com/hyperledger/fabric/orderer/localconfig" // config, for the orderer.yaml - cb "github.com/hyperledger/fabric/protos/common" - ab "github.com/hyperledger/fabric/protos/orderer" - "github.com/hyperledger/fabric/protos/utils" - "golang.org/x/net/context" - "github.com/golang/protobuf/proto" - "google.golang.org/grpc" + "fmt" + "log" + "math" + "os" + "os/exec" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + genesisconfig "github.com/hyperledger/fabric/common/configtx/tool/localconfig" // config for genesis.yaml + genesisconfigProvisional "github.com/hyperledger/fabric/common/configtx/tool/provisional" + "github.com/hyperledger/fabric/orderer/localconfig" // config, for the orderer.yaml + cb "github.com/hyperledger/fabric/protos/common" + ab "github.com/hyperledger/fabric/protos/orderer" + "github.com/hyperledger/fabric/protos/utils" + "golang.org/x/net/context" + "google.golang.org/grpc" ) var ordConf *config.TopLevel var genConf *genesisconfig.TopLevel var genesisConfigLocation = "CONFIGTX_ORDERER_" var ordererConfigLocation = "ORDERER_GENERAL_" -var batchSizeParamStr = genesisConfigLocation+"BATCHSIZE_MAXMESSAGECOUNT" -var batchTimeoutParamStr = genesisConfigLocation+"BATCHTIMEOUT" -var ordererTypeParamStr = genesisConfigLocation+"ORDERERTYPE" +var batchSizeParamStr = genesisConfigLocation + "BATCHSIZE_MAXMESSAGECOUNT" +var batchTimeoutParamStr = genesisConfigLocation + "BATCHTIMEOUT" +var ordererTypeParamStr = genesisConfigLocation + "ORDERERTYPE" var debugflagLaunch = false var debugflagAPI = true @@ -94,7 +95,7 @@ var logEnabled = false var envvar string var numChannels = 1 -var numOrdsInNtwk = 1 +var numOrdsInNtwk = 1 var numOrdsToWatch = 1 var ordererType = "solo" var numKBrokers int @@ -124,980 +125,1083 @@ var optimizeClientsMode = false var ordStartPort uint16 = 5005 func initialize() { - // When running multiple tests, e.g. from go test, reset to defaults - // for the parameters that could change per test. - // We do NOT reset things that would apply to every test, such as - // settings for environment variables - logEnabled = false - envvar = "" - numChannels = 1 - numOrdsInNtwk = 1 - numOrdsToWatch = 1 - ordererType = "solo" - numKBrokers = 0 - numConsumers = 1 - numProducers = 1 - numTxToSend = 1 - producersPerCh = 1 - initLogger("ote") + // When running multiple tests, e.g. from go test, reset to defaults + // for the parameters that could change per test. + // We do NOT reset things that would apply to every test, such as + // settings for environment variables + logEnabled = false + envvar = "" + numChannels = 1 + numOrdsInNtwk = 1 + numOrdsToWatch = 1 + ordererType = "solo" + numKBrokers = 0 + numConsumers = 1 + numProducers = 1 + numTxToSend = 1 + producersPerCh = 1 + initLogger("ote") } func initLogger(fileName string) { - if !logEnabled { - layout := "Jan_02_2006" - // Format Now with the layout const. - t := time.Now() - res := t.Format(layout) - var err error - logFile, err = os.OpenFile(fileName+"-"+res+".log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) - if err != nil { - panic(fmt.Sprintf("error opening file: %s", err)) - } - logEnabled = true - log.SetOutput(logFile) - //log.SetFlags(log.LstdFlags | log.Lshortfile) - log.SetFlags(log.LstdFlags) - } + if !logEnabled { + layout := "Jan_02_2006" + // Format Now with the layout const. + t := time.Now() + res := t.Format(layout) + var err error + logFile, err = os.OpenFile(fileName+"-"+res+".log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + panic(fmt.Sprintf("error opening file: %s", err)) + } + logEnabled = true + log.SetOutput(logFile) + //log.SetFlags(log.LstdFlags | log.Lshortfile) + log.SetFlags(log.LstdFlags) + } } func logger(printStmt string) { - fmt.Println(printStmt) - if !logEnabled { - return - } - log.Println(printStmt) + fmt.Println(printStmt) + if !logEnabled { + return + } + log.Println(printStmt) } func closeLogger() { - if logFile != nil { - logFile.Close() - } - logEnabled = false + if logFile != nil { + logFile.Close() + } + logEnabled = false } type ordererdriveClient struct { - client ab.AtomicBroadcast_DeliverClient - chainID string + client ab.AtomicBroadcast_DeliverClient + chainID string } type broadcastClient struct { - client ab.AtomicBroadcast_BroadcastClient - chainID string + client ab.AtomicBroadcast_BroadcastClient + chainID string } + func newOrdererdriveClient(client ab.AtomicBroadcast_DeliverClient, chainID string) *ordererdriveClient { - return &ordererdriveClient{client: client, chainID: chainID} + return &ordererdriveClient{client: client, chainID: chainID} } func newBroadcastClient(client ab.AtomicBroadcast_BroadcastClient, chainID string) *broadcastClient { - return &broadcastClient{client: client, chainID: chainID} + return &broadcastClient{client: client, chainID: chainID} } func seekHelper(chainID string, start *ab.SeekPosition) *cb.Envelope { - return &cb.Envelope{ - Payload: utils.MarshalOrPanic(&cb.Payload{ - Header: &cb.Header{ - //ChainHeader: &cb.ChainHeader{ - // ChainID: b.chainID, - //}, - ChannelHeader: &cb.ChannelHeader{ - ChannelId: chainID, - }, - SignatureHeader: &cb.SignatureHeader{}, - }, - - Data: utils.MarshalOrPanic(&ab.SeekInfo{ - Start: &ab.SeekPosition{Type: &ab.SeekPosition_Oldest{Oldest: &ab.SeekOldest{}}}, - Stop: &ab.SeekPosition{Type: &ab.SeekPosition_Specified{Specified: &ab.SeekSpecified{Number: math.MaxUint64}}}, - Behavior: ab.SeekInfo_BLOCK_UNTIL_READY, - }), - }), - } + return &cb.Envelope{ + Payload: utils.MarshalOrPanic(&cb.Payload{ + Header: &cb.Header{ + //ChainHeader: &cb.ChainHeader{ + // ChainID: b.chainID, + //}, + ChannelHeader: &cb.ChannelHeader{ + ChannelId: chainID, + }, + SignatureHeader: &cb.SignatureHeader{}, + }, + + Data: utils.MarshalOrPanic(&ab.SeekInfo{ + Start: &ab.SeekPosition{Type: &ab.SeekPosition_Oldest{Oldest: &ab.SeekOldest{}}}, + Stop: &ab.SeekPosition{Type: &ab.SeekPosition_Specified{Specified: &ab.SeekSpecified{Number: math.MaxUint64}}}, + Behavior: ab.SeekInfo_BLOCK_UNTIL_READY, + }), + }), + } } func (r *ordererdriveClient) seekOldest() error { - return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Oldest{Oldest: &ab.SeekOldest{}}})) + return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Oldest{Oldest: &ab.SeekOldest{}}})) } func (r *ordererdriveClient) seekNewest() error { - return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Newest{Newest: &ab.SeekNewest{}}})) + return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Newest{Newest: &ab.SeekNewest{}}})) } func (r *ordererdriveClient) seek(blockNumber uint64) error { - return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Specified{Specified: &ab.SeekSpecified{Number: blockNumber}}})) + return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Specified{Specified: &ab.SeekSpecified{Number: blockNumber}}})) } func (r *ordererdriveClient) readUntilClose(ordererIndex int, channelIndex int, txRecvCntrP *int64, blockRecvCntrP *int64) { - for { - msg, err := r.client.Recv() - if err != nil { - if !strings.Contains(err.Error(),"transport is closing") { - // print if we do not see the msg indicating graceful closing of the connection - logger(fmt.Sprintf("Consumer for orderer %d channel %d readUntilClose() Recv error: %v", ordererIndex, channelIndex, err)) - } - return - } - switch t := msg.Type.(type) { - case *ab.DeliverResponse_Status: - logger(fmt.Sprintf("Got DeliverResponse_Status: %v", t)) - return - case *ab.DeliverResponse_Block: - if t.Block.Header.Number > 0 { - if debugflag2 { logger(fmt.Sprintf("Consumer recvd a block, o %d c %d blkNum %d numtrans %d", ordererIndex, channelIndex, t.Block.Header.Number, len(t.Block.Data.Data))) } - if debugflag3 { logger(fmt.Sprintf("blk: %v", t.Block.Data.Data)) } - } - *txRecvCntrP += int64(len(t.Block.Data.Data)) - //*blockRecvCntrP = int64(t.Block.Header.Number) // this assumes header number is the block number; instead let's just add one - (*blockRecvCntrP)++ - } - } + for { + msg, err := r.client.Recv() + if err != nil { + if !strings.Contains(err.Error(), "transport is closing") { + // print if we do not see the msg indicating graceful closing of the connection + logger(fmt.Sprintf("Consumer for orderer %d channel %d readUntilClose() Recv error: %v", ordererIndex, channelIndex, err)) + } + return + } + switch t := msg.Type.(type) { + case *ab.DeliverResponse_Status: + logger(fmt.Sprintf("Got DeliverResponse_Status: %v", t)) + return + case *ab.DeliverResponse_Block: + if t.Block.Header.Number > 0 { + if debugflag2 { + logger(fmt.Sprintf("Consumer recvd a block, o %d c %d blkNum %d numtrans %d", ordererIndex, channelIndex, t.Block.Header.Number, len(t.Block.Data.Data))) + } + if debugflag3 { + logger(fmt.Sprintf("blk: %v", t.Block.Data.Data)) + } + } + *txRecvCntrP += int64(len(t.Block.Data.Data)) + //*blockRecvCntrP = int64(t.Block.Header.Number) // this assumes header number is the block number; instead let's just add one + (*blockRecvCntrP)++ + } + } } func (b *broadcastClient) broadcast(transaction []byte) error { - payload, err := proto.Marshal(&cb.Payload{ - Header: &cb.Header{ - //ChainHeader: &cb.ChainHeader{ - // ChainID: b.chainID, - //}, - ChannelHeader: &cb.ChannelHeader{ - ChannelId: b.chainID, - }, - SignatureHeader: &cb.SignatureHeader{}, - }, - Data: transaction, - }) - if err != nil { - panic(err) - } - return b.client.Send(&cb.Envelope{Payload: payload}) + payload, err := proto.Marshal(&cb.Payload{ + Header: &cb.Header{ + //ChainHeader: &cb.ChainHeader{ + // ChainID: b.chainID, + //}, + ChannelHeader: &cb.ChannelHeader{ + ChannelId: b.chainID, + }, + SignatureHeader: &cb.SignatureHeader{}, + }, + Data: transaction, + }) + if err != nil { + panic(err) + } + return b.client.Send(&cb.Envelope{Payload: payload}) } func (b *broadcastClient) getAck() error { - msg, err := b.client.Recv() - if err != nil { - return err - } - if msg.Status != cb.Status_SUCCESS { - return fmt.Errorf("Got unexpected status: %v", msg.Status) - } - return nil + msg, err := b.client.Recv() + if err != nil { + return err + } + if msg.Status != cb.Status_SUCCESS { + return fmt.Errorf("Got unexpected status: %v", msg.Status) + } + return nil } func startConsumer(serverAddr string, chainID string, ordererIndex int, channelIndex int, txRecvCntrP *int64, blockRecvCntrP *int64, consumerConnP **grpc.ClientConn) { - conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) - if err != nil { - logger(fmt.Sprintf("Error on Consumer ord[%d] ch[%d] connecting (grpc) to %s, err: %v", ordererIndex, channelIndex, serverAddr, err)) - return - } - (*consumerConnP) = conn - client, err := ab.NewAtomicBroadcastClient(*consumerConnP).Deliver(context.TODO()) - if err != nil { - logger(fmt.Sprintf("Error on Consumer ord[%d] ch[%d] invoking Deliver() on grpc connection to %s, err: %v", ordererIndex, channelIndex, serverAddr, err)) - return - } - s := newOrdererdriveClient(client, chainID) - err = s.seekOldest() - if err == nil { - if debugflag1 { logger(fmt.Sprintf("Started Consumer to recv delivered batches from ord[%d] ch[%d] srvr=%s chID=%s", ordererIndex, channelIndex, serverAddr, chainID)) } - } else { - logger(fmt.Sprintf("ERROR starting Consumer client for ord[%d] ch[%d] for srvr=%s chID=%s; err: %v", ordererIndex, channelIndex, serverAddr, chainID, err)) - } - s.readUntilClose(ordererIndex, channelIndex, txRecvCntrP, blockRecvCntrP) + conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) + if err != nil { + logger(fmt.Sprintf("Error on Consumer ord[%d] ch[%d] connecting (grpc) to %s, err: %v", ordererIndex, channelIndex, serverAddr, err)) + return + } + (*consumerConnP) = conn + client, err := ab.NewAtomicBroadcastClient(*consumerConnP).Deliver(context.TODO()) + if err != nil { + logger(fmt.Sprintf("Error on Consumer ord[%d] ch[%d] invoking Deliver() on grpc connection to %s, err: %v", ordererIndex, channelIndex, serverAddr, err)) + return + } + s := newOrdererdriveClient(client, chainID) + err = s.seekOldest() + if err == nil { + if debugflag1 { + logger(fmt.Sprintf("Started Consumer to recv delivered batches from ord[%d] ch[%d] srvr=%s chID=%s", ordererIndex, channelIndex, serverAddr, chainID)) + } + } else { + logger(fmt.Sprintf("ERROR starting Consumer client for ord[%d] ch[%d] for srvr=%s chID=%s; err: %v", ordererIndex, channelIndex, serverAddr, chainID, err)) + } + s.readUntilClose(ordererIndex, channelIndex, txRecvCntrP, blockRecvCntrP) } func startConsumerMaster(serverAddr string, chainIDsP *[]string, ordererIndex int, txRecvCntrsP *[]int64, blockRecvCntrsP *[]int64, consumerConnP **grpc.ClientConn) { - // create one conn to the orderer and share it for communications to all channels - conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) - if err != nil { - logger(fmt.Sprintf("Error on MasterConsumer ord[%d] connecting (grpc) to %s, err: %v", ordererIndex, serverAddr, err)) - return - } - (*consumerConnP) = conn - - // create an orderer driver client for every channel on this orderer - //[][]*ordererdriveClient // numChannels - dc := make ([]*ordererdriveClient, numChannels) - for c := 0; c < numChannels; c++ { - client, err := ab.NewAtomicBroadcastClient(*consumerConnP).Deliver(context.TODO()) - if err != nil { - logger(fmt.Sprintf("Error on MasterConsumer ord[%d] invoking Deliver() on grpc connection to %s, err: %v", ordererIndex, serverAddr, err)) - return - } - dc[c] = newOrdererdriveClient(client, (*chainIDsP)[c]) - err = dc[c].seekOldest() - if err == nil { - if debugflag1 { logger(fmt.Sprintf("Started MasterConsumer to recv delivered batches from ord[%d] ch[%d] srvr=%s chID=%s", ordererIndex, c, serverAddr, (*chainIDsP)[c])) } - } else { - logger(fmt.Sprintf("ERROR starting MasterConsumer client for ord[%d] ch[%d] for srvr=%s chID=%s; err: %v", ordererIndex, c, serverAddr, (*chainIDsP)[c], err)) - } - // we would prefer to skip these go threads, and just have on "readUntilClose" that looks for deliveries on all channels!!! (see below.) - // otherwise, what have we really saved? - go dc[c].readUntilClose(ordererIndex, c, &((*txRecvCntrsP)[c]), &((*blockRecvCntrsP)[c])) - } + // create one conn to the orderer and share it for communications to all channels + conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) + if err != nil { + logger(fmt.Sprintf("Error on MasterConsumer ord[%d] connecting (grpc) to %s, err: %v", ordererIndex, serverAddr, err)) + return + } + (*consumerConnP) = conn + + // create an orderer driver client for every channel on this orderer + //[][]*ordererdriveClient // numChannels + dc := make([]*ordererdriveClient, numChannels) + for c := 0; c < numChannels; c++ { + client, err := ab.NewAtomicBroadcastClient(*consumerConnP).Deliver(context.TODO()) + if err != nil { + logger(fmt.Sprintf("Error on MasterConsumer ord[%d] invoking Deliver() on grpc connection to %s, err: %v", ordererIndex, serverAddr, err)) + return + } + dc[c] = newOrdererdriveClient(client, (*chainIDsP)[c]) + err = dc[c].seekOldest() + if err == nil { + if debugflag1 { + logger(fmt.Sprintf("Started MasterConsumer to recv delivered batches from ord[%d] ch[%d] srvr=%s chID=%s", ordererIndex, c, serverAddr, (*chainIDsP)[c])) + } + } else { + logger(fmt.Sprintf("ERROR starting MasterConsumer client for ord[%d] ch[%d] for srvr=%s chID=%s; err: %v", ordererIndex, c, serverAddr, (*chainIDsP)[c], err)) + } + // we would prefer to skip these go threads, and just have on "readUntilClose" that looks for deliveries on all channels!!! (see below.) + // otherwise, what have we really saved? + go dc[c].readUntilClose(ordererIndex, c, &((*txRecvCntrsP)[c]), &((*blockRecvCntrsP)[c])) + } } func executeCmd(cmd string) []byte { - out, err := exec.Command("/bin/sh", "-c", cmd).Output() - if (err != nil) { - logger(fmt.Sprintf("Unsuccessful exec command: "+cmd+"\nstdout="+string(out)+"\nstderr=%v", err)) - log.Fatal(err) - } - return out + out, err := exec.Command("/bin/sh", "-c", cmd).Output() + if err != nil { + logger(fmt.Sprintf("Unsuccessful exec command: "+cmd+"\nstdout="+string(out)+"\nstderr=%v", err)) + log.Fatal(err) + } + return out } func executeCmdAndDisplay(cmd string) { - out := executeCmd(cmd) - logger("Results of exec command: "+cmd+"\nstdout="+string(out)) + out := executeCmd(cmd) + logger("Results of exec command: " + cmd + "\nstdout=" + string(out)) } func connClose(consumerConnsPP **([][]*grpc.ClientConn)) { - for i := 0; i < numOrdsToWatch; i++ { - for j := 0; j < numChannels; j++ { - if (**consumerConnsPP)[i][j] != nil { - _ = (**consumerConnsPP)[i][j].Close() - } - } - } + for i := 0; i < numOrdsToWatch; i++ { + for j := 0; j < numChannels; j++ { + if (**consumerConnsPP)[i][j] != nil { + _ = (**consumerConnsPP)[i][j].Close() + } + } + } } func cleanNetwork(consumerConnsP *([][]*grpc.ClientConn)) { - if debugflag1 { logger("Removing the Network Consumers") } - connClose(&consumerConnsP) - - // Docker is not perfect; we need to unpause any paused containers, before we can kill them. - //_ = executeCmd("docker ps -aq -f status=paused | xargs docker unpause") - if out := executeCmd("docker ps -aq -f status=paused"); out != nil && string(out) != "" { - logger("Removing paused docker containers: " + string(out)) - _ = executeCmd("docker ps -aq -f status=paused | xargs docker unpause") - } - - // kill any containers that are still running - //_ = executeCmd("docker kill $(docker ps -q)") - - if debugflag1 { logger("Removing the Network orderers and associated docker containers") } - _ = executeCmd("docker rm -f $(docker ps -aq)") + if debugflag1 { + logger("Removing the Network Consumers") + } + connClose(&consumerConnsP) + + // Docker is not perfect; we need to unpause any paused containers, before we can kill them. + //_ = executeCmd("docker ps -aq -f status=paused | xargs docker unpause") + if out := executeCmd("docker ps -aq -f status=paused"); out != nil && string(out) != "" { + logger("Removing paused docker containers: " + string(out)) + _ = executeCmd("docker ps -aq -f status=paused | xargs docker unpause") + } + + // kill any containers that are still running + //_ = executeCmd("docker kill $(docker ps -q)") + + if debugflag1 { + logger("Removing the Network orderers and associated docker containers") + } + _ = executeCmd("docker rm -f $(docker ps -aq)") } func launchNetwork(appendFlags string) { - // Alternative way: hardcoded docker compose (not driver.sh tool) - // _ = executeCmd("docker-compose -f docker-compose-3orderers.yml up -d") - - cmd := fmt.Sprintf("./driver.sh -a create -p 1 %s", appendFlags) - logger(fmt.Sprintf("Launching network: %s", cmd)) - if debugflagLaunch { - executeCmdAndDisplay(cmd) // show stdout logs; debugging help - } else { - executeCmd(cmd) - } - - // display the network of docker containers with the orderers and such - executeCmdAndDisplay("docker ps -a") + // Alternative way: hardcoded docker compose (not driver.sh tool) + // _ = executeCmd("docker-compose -f docker-compose-3orderers.yml up -d") + + cmd := fmt.Sprintf("./driver.sh -a create -p 1 %s", appendFlags) + logger(fmt.Sprintf("Launching network: %s", cmd)) + if debugflagLaunch { + executeCmdAndDisplay(cmd) // show stdout logs; debugging help + } else { + executeCmd(cmd) + } + + // display the network of docker containers with the orderers and such + executeCmdAndDisplay("docker ps -a") } func countGenesis() int64 { - return int64(numChannels) + return int64(numChannels) } func sendEqualRecv(numTxToSend int64, totalTxRecvP *[]int64, totalTxRecvMismatch bool, totalBlockRecvMismatch bool) bool { - var matching = false; - if (*totalTxRecvP)[0] == numTxToSend { - // recv count on orderer 0 matches the send count - if !totalTxRecvMismatch && !totalBlockRecvMismatch { - // all orderers have same recv counters - matching = true - } - } - return matching + var matching = false + if (*totalTxRecvP)[0] == numTxToSend { + // recv count on orderer 0 matches the send count + if !totalTxRecvMismatch && !totalBlockRecvMismatch { + // all orderers have same recv counters + matching = true + } + } + return matching } func moreDeliveries(txSentP *[][]int64, totalNumTxSentP *int64, txSentFailuresP *[][]int64, totalNumTxSentFailuresP *int64, txRecvP *[][]int64, totalTxRecvP *[]int64, totalTxRecvMismatchP *bool, blockRecvP *[][]int64, totalBlockRecvP *[]int64, totalBlockRecvMismatchP *bool) (moreReceived bool) { - moreReceived = false - prevTotalTxRecv := *totalTxRecvP - computeTotals(txSentP, totalNumTxSentP, txSentFailuresP, totalNumTxSentFailuresP, txRecvP, totalTxRecvP, totalTxRecvMismatchP, blockRecvP, totalBlockRecvP, totalBlockRecvMismatchP) - for ordNum := 0; ordNum < numOrdsToWatch; ordNum++ { - if prevTotalTxRecv[ordNum] != (*totalTxRecvP)[ordNum] { moreReceived = true } - } - return moreReceived + moreReceived = false + prevTotalTxRecv := *totalTxRecvP + computeTotals(txSentP, totalNumTxSentP, txSentFailuresP, totalNumTxSentFailuresP, txRecvP, totalTxRecvP, totalTxRecvMismatchP, blockRecvP, totalBlockRecvP, totalBlockRecvMismatchP) + for ordNum := 0; ordNum < numOrdsToWatch; ordNum++ { + if prevTotalTxRecv[ordNum] != (*totalTxRecvP)[ordNum] { + moreReceived = true + } + } + return moreReceived } func startProducer(serverAddr string, chainID string, ordererIndex int, channelIndex int, txReq int64, txSentCntrP *int64, txSentFailureCntrP *int64) { - conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) - defer func() { - _ = conn.Close() - }() - if err != nil { - logger(fmt.Sprintf("Error creating connection for Producer for ord[%d] ch[%d], err: %v", ordererIndex, channelIndex, err)) - return - } - client, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.TODO()) - if err != nil { - logger(fmt.Sprintf("Error creating Producer for ord[%d] ch[%d], err: %v", ordererIndex, channelIndex, err)) - return - } - if debugflag1 { logger(fmt.Sprintf("Started Producer to send %d TXs to ord[%d] ch[%d] srvr=%s chID=%s, %v", txReq, ordererIndex, channelIndex, serverAddr, chainID, time.Now())) } - b := newBroadcastClient(client, chainID) - - // print a log after sending mulitples of this percentage of requested TX: 25,50,75%... - // only on one producer, and assume all producers are generating at same rate. - // e.g. when txReq = 50, to print log every 10. set progressPercentage = 20 - printProgressLogs := false - var progressPercentage int64 = 25 // set this between 1 and 99 + conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) + defer func() { + _ = conn.Close() + }() + if err != nil { + logger(fmt.Sprintf("Error creating connection for Producer for ord[%d] ch[%d], err: %v", ordererIndex, channelIndex, err)) + return + } + client, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.TODO()) + if err != nil { + logger(fmt.Sprintf("Error creating Producer for ord[%d] ch[%d], err: %v", ordererIndex, channelIndex, err)) + return + } + if debugflag1 { + logger(fmt.Sprintf("Started Producer to send %d TXs to ord[%d] ch[%d] srvr=%s chID=%s, %v", txReq, ordererIndex, channelIndex, serverAddr, chainID, time.Now())) + } + b := newBroadcastClient(client, chainID) + + // print a log after sending multiples of this percentage of requested TX: 25,50,75%... + // only on one producer, and assume all producers are generating at same rate. + // e.g. when txReq = 50, to print log every 10. set progressPercentage = 20 + printProgressLogs := false + var progressPercentage int64 = 25 // set this between 1 and 99 printLogCnt := txReq * progressPercentage / 100 - if debugflag1 { - printProgressLogs = true // to test logs for all producers - } else { - if txReq > 10000 && printLogCnt > 0 && ordererIndex==0 && channelIndex==0 { - printProgressLogs = true - } - } - var mult int64 = 0 - - firstErr := false - for i := int64(0); i < txReq ; i++ { - b.broadcast([]byte(fmt.Sprintf("Testing %v", time.Now()))) - err = b.getAck() - if err == nil { - (*txSentCntrP)++ - if printProgressLogs && (*txSentCntrP)%printLogCnt == 0 { - mult++ - if debugflag1 { - logger(fmt.Sprintf("Producer ord[%d] ch[%d] sent %4d /%4d = %3d%%, %v", ordererIndex, channelIndex, (*txSentCntrP), txReq, progressPercentage*mult, time.Now())) - } else { - logger(fmt.Sprintf("Sent %3d%%, %v", progressPercentage*mult, time.Now())) - } - } - } else { - (*txSentFailureCntrP)++ - if !firstErr { - firstErr = true - logger(fmt.Sprintf("Broadcast error on TX %d (the first error for Producer ord[%d] ch[%d]); err: %v", i+1, ordererIndex, channelIndex, err)) - } - } - } - if err != nil { - logger(fmt.Sprintf("Broadcast error on last TX %d of Producer ord[%d] ch[%d]: %v", txReq, ordererIndex, channelIndex, err)) - } - if txReq == *txSentCntrP { - if debugflag1 { logger(fmt.Sprintf("Producer finished sending broadcast msgs to ord[%d] ch[%d]: ACKs %9d (100%%) , %v", ordererIndex, channelIndex, *txSentCntrP, time.Now())) } - } else { - logger(fmt.Sprintf("Producer finished sending broadcast msgs to ord[%d] ch[%d]: ACKs %9d NACK %d Other %d , %v", ordererIndex, channelIndex, *txSentCntrP, *txSentFailureCntrP, txReq - *txSentFailureCntrP - *txSentCntrP, time.Now())) - } - producersWG.Done() + if debugflag1 { + printProgressLogs = true // to test logs for all producers + } else { + if txReq > 10000 && printLogCnt > 0 && ordererIndex == 0 && channelIndex == 0 { + printProgressLogs = true + } + } + var mult int64 = 0 + + firstErr := false + for i := int64(0); i < txReq; i++ { + b.broadcast([]byte(fmt.Sprintf("Testing %v", time.Now()))) + err = b.getAck() + if err == nil { + (*txSentCntrP)++ + if printProgressLogs && (*txSentCntrP)%printLogCnt == 0 { + mult++ + if debugflag1 { + logger(fmt.Sprintf("Producer ord[%d] ch[%d] sent %4d /%4d = %3d%%, %v", ordererIndex, channelIndex, (*txSentCntrP), txReq, progressPercentage*mult, time.Now())) + } else { + logger(fmt.Sprintf("Sent %3d%%, %v", progressPercentage*mult, time.Now())) + } + } + } else { + (*txSentFailureCntrP)++ + if !firstErr { + firstErr = true + logger(fmt.Sprintf("Broadcast error on TX %d (the first error for Producer ord[%d] ch[%d]); err: %v", i+1, ordererIndex, channelIndex, err)) + } + } + } + if err != nil { + logger(fmt.Sprintf("Broadcast error on last TX %d of Producer ord[%d] ch[%d]: %v", txReq, ordererIndex, channelIndex, err)) + } + if txReq == *txSentCntrP { + if debugflag1 { + logger(fmt.Sprintf("Producer finished sending broadcast msgs to ord[%d] ch[%d]: ACKs %9d (100%%) , %v", ordererIndex, channelIndex, *txSentCntrP, time.Now())) + } + } else { + logger(fmt.Sprintf("Producer finished sending broadcast msgs to ord[%d] ch[%d]: ACKs %9d NACK %d Other %d , %v", ordererIndex, channelIndex, *txSentCntrP, *txSentFailureCntrP, txReq-*txSentFailureCntrP-*txSentCntrP, time.Now())) + } + producersWG.Done() } func startProducerMaster(serverAddr string, chainIDs *[]string, ordererIndex int, txReqP *[]int64, txSentCntrP *[]int64, txSentFailureCntrP *[]int64) { - // This function creates a grpc connection to one orderer, - // creates multiple clients (one per numChannels) for that one orderer, - // and sends a TX to all channels repeatedly until no more to send. - var txReqTotal int64 - var txMax int64 - for c := 0; c < numChannels; c++ { - txReqTotal += (*txReqP)[c] - if txMax < (*txReqP)[c] { txMax = (*txReqP)[c] } - } - conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) - defer func() { - _ = conn.Close() - }() - if err != nil { - logger(fmt.Sprintf("Error creating connection for MasterProducer for ord[%d], err: %v", ordererIndex, err)) - return - } - client, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.TODO()) - if err != nil { - logger(fmt.Sprintf("Error creating MasterProducer for ord[%d], err: %v", ordererIndex, err)) - return - } - logger(fmt.Sprintf("Started MasterProducer to send %d TXs to ord[%d] srvr=%s distributed across all channels", txReqTotal, ordererIndex, serverAddr)) - - // create the broadcast clients for every channel on this orderer - bc := make ([]*broadcastClient, numChannels) - for c := 0; c < numChannels; c++ { - bc[c] = newBroadcastClient(client, (*chainIDs)[c]) - } - - firstErr := false - for i := int64(0); i < txMax; i++ { - // send one TX to every broadcast client (one TX on each chnl) - for c := 0; c < numChannels; c++ { - if i < (*txReqP)[c] { - // more TXs to send on this channel - bc[c].broadcast([]byte(fmt.Sprintf("Testing %v", time.Now()))) - err = bc[c].getAck() - if err == nil { - (*txSentCntrP)[c]++ - } else { - (*txSentFailureCntrP)[c]++ - if !firstErr { - firstErr = true - logger(fmt.Sprintf("Broadcast error on TX %d (the first error for MasterProducer on ord[%d] ch[%d] channelID=%s); err: %v", i+1, ordererIndex, c, (*chainIDs)[c], err)) - } - } - } - } - } - if err != nil { - logger(fmt.Sprintf("Broadcast error on last TX %d of MasterProducer on ord[%d] ch[%d]: %v", txReqTotal, ordererIndex, numChannels-1, err)) - } - var txSentTotal int64 - var txSentFailTotal int64 - for c := 0; c < numChannels; c++ { - txSentTotal += (*txSentCntrP)[c] - txSentFailTotal += (*txSentFailureCntrP)[c] - } - if txReqTotal == txSentTotal { - logger(fmt.Sprintf("MasterProducer finished sending broadcast msgs to all channels on ord[%d]: ACKs %9d (100%%)", ordererIndex, txSentTotal)) - } else { - logger(fmt.Sprintf("MasterProducer finished sending broadcast msgs to all channels on ord[%d]: ACKs %9d NACK %d Other %d", ordererIndex, txSentTotal, txSentFailTotal, txReqTotal - txSentTotal - txSentFailTotal)) - } - producersWG.Done() + // This function creates a grpc connection to one orderer, + // creates multiple clients (one per numChannels) for that one orderer, + // and sends a TX to all channels repeatedly until no more to send. + var txReqTotal int64 + var txMax int64 + for c := 0; c < numChannels; c++ { + txReqTotal += (*txReqP)[c] + if txMax < (*txReqP)[c] { + txMax = (*txReqP)[c] + } + } + conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) + defer func() { + _ = conn.Close() + }() + if err != nil { + logger(fmt.Sprintf("Error creating connection for MasterProducer for ord[%d], err: %v", ordererIndex, err)) + return + } + client, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.TODO()) + if err != nil { + logger(fmt.Sprintf("Error creating MasterProducer for ord[%d], err: %v", ordererIndex, err)) + return + } + logger(fmt.Sprintf("Started MasterProducer to send %d TXs to ord[%d] srvr=%s distributed across all channels", txReqTotal, ordererIndex, serverAddr)) + + // create the broadcast clients for every channel on this orderer + bc := make([]*broadcastClient, numChannels) + for c := 0; c < numChannels; c++ { + bc[c] = newBroadcastClient(client, (*chainIDs)[c]) + } + + firstErr := false + for i := int64(0); i < txMax; i++ { + // send one TX to every broadcast client (one TX on each chnl) + for c := 0; c < numChannels; c++ { + if i < (*txReqP)[c] { + // more TXs to send on this channel + bc[c].broadcast([]byte(fmt.Sprintf("Testing %v", time.Now()))) + err = bc[c].getAck() + if err == nil { + (*txSentCntrP)[c]++ + } else { + (*txSentFailureCntrP)[c]++ + if !firstErr { + firstErr = true + logger(fmt.Sprintf("Broadcast error on TX %d (the first error for MasterProducer on ord[%d] ch[%d] channelID=%s); err: %v", i+1, ordererIndex, c, (*chainIDs)[c], err)) + } + } + } + } + } + if err != nil { + logger(fmt.Sprintf("Broadcast error on last TX %d of MasterProducer on ord[%d] ch[%d]: %v", txReqTotal, ordererIndex, numChannels-1, err)) + } + var txSentTotal int64 + var txSentFailTotal int64 + for c := 0; c < numChannels; c++ { + txSentTotal += (*txSentCntrP)[c] + txSentFailTotal += (*txSentFailureCntrP)[c] + } + if txReqTotal == txSentTotal { + logger(fmt.Sprintf("MasterProducer finished sending broadcast msgs to all channels on ord[%d]: ACKs %9d (100%%)", ordererIndex, txSentTotal)) + } else { + logger(fmt.Sprintf("MasterProducer finished sending broadcast msgs to all channels on ord[%d]: ACKs %9d NACK %d Other %d", ordererIndex, txSentTotal, txSentFailTotal, txReqTotal-txSentTotal-txSentFailTotal)) + } + producersWG.Done() } func computeTotals(txSent *[][]int64, totalNumTxSent *int64, txSentFailures *[][]int64, totalNumTxSentFailures *int64, txRecv *[][]int64, totalTxRecv *[]int64, totalTxRecvMismatch *bool, blockRecv *[][]int64, totalBlockRecv *[]int64, totalBlockRecvMismatch *bool) { - // The counters for Producers are indexed by orderer (numOrdsInNtwk) - // and channel (numChannels). - // Total count includes all counters for all channels on ALL orderers. - // e.g. totalNumTxSent = sum of txSent[*][*] - // e.g. totalNumTxSentFailures = sum of txSentFailures[*][*] - - *totalNumTxSent = 0 - *totalNumTxSentFailures = 0 - for i := 0; i < numOrdsInNtwk; i++ { - for j := 0; j < numChannels; j++ { - *totalNumTxSent += (*txSent)[i][j] - *totalNumTxSentFailures += (*txSentFailures)[i][j] - } - } - - // Counters for consumers are indexed by orderer (numOrdsToWatch) - // and channel (numChannels). - // The total count includes all counters for all channels on - // ONLY ONE orderer. - // Tally up the totals for all the channels on each orderer, and - // store them for comparison; they should all be the same. - // e.g. totalTxRecv[k] = sum of txRecv[k][*] - // e.g. totalBlockRecv[k] = sum of blockRecv[k][*] - - *totalTxRecvMismatch = false - *totalBlockRecvMismatch = false - for k := 0; k < numOrdsToWatch; k++ { - // count only the requested TXs - not the genesis block TXs - (*totalTxRecv)[k] = -countGenesis() - (*totalBlockRecv)[k] = -countGenesis() - for l := 0; l < numChannels; l++ { - (*totalTxRecv)[k] += (*txRecv)[k][l] - (*totalBlockRecv)[k] += (*blockRecv)[k][l] - if debugflag3 { logger(fmt.Sprintf("in compute(): k %d l %d txRecv[k][l] %d blockRecv[k][l] %d", k , l , (*txRecv)[k][l] , (*blockRecv)[k][l] )) } - } - if (k>0) && (*totalTxRecv)[k] != (*totalTxRecv)[k-1] { *totalTxRecvMismatch = true } - if (k>0) && (*totalBlockRecv)[k] != (*totalBlockRecv)[k-1] { *totalBlockRecvMismatch = true } - } - if debugflag2 { logger(fmt.Sprintf("in compute(): totalTxRecv[]= %v, totalBlockRecv[]= %v", *totalTxRecv, *totalBlockRecv)) } + // The counters for Producers are indexed by orderer (numOrdsInNtwk) + // and channel (numChannels). + // Total count includes all counters for all channels on ALL orderers. + // e.g. totalNumTxSent = sum of txSent[*][*] + // e.g. totalNumTxSentFailures = sum of txSentFailures[*][*] + + *totalNumTxSent = 0 + *totalNumTxSentFailures = 0 + for i := 0; i < numOrdsInNtwk; i++ { + for j := 0; j < numChannels; j++ { + *totalNumTxSent += (*txSent)[i][j] + *totalNumTxSentFailures += (*txSentFailures)[i][j] + } + } + + // Counters for consumers are indexed by orderer (numOrdsToWatch) + // and channel (numChannels). + // The total count includes all counters for all channels on + // ONLY ONE orderer. + // Tally up the totals for all the channels on each orderer, and + // store them for comparison; they should all be the same. + // e.g. totalTxRecv[k] = sum of txRecv[k][*] + // e.g. totalBlockRecv[k] = sum of blockRecv[k][*] + + *totalTxRecvMismatch = false + *totalBlockRecvMismatch = false + for k := 0; k < numOrdsToWatch; k++ { + // count only the requested TXs - not the genesis block TXs + (*totalTxRecv)[k] = -countGenesis() + (*totalBlockRecv)[k] = -countGenesis() + for l := 0; l < numChannels; l++ { + (*totalTxRecv)[k] += (*txRecv)[k][l] + (*totalBlockRecv)[k] += (*blockRecv)[k][l] + if debugflag3 { + logger(fmt.Sprintf("in compute(): k %d l %d txRecv[k][l] %d blockRecv[k][l] %d", k, l, (*txRecv)[k][l], (*blockRecv)[k][l])) + } + } + if (k > 0) && (*totalTxRecv)[k] != (*totalTxRecv)[k-1] { + *totalTxRecvMismatch = true + } + if (k > 0) && (*totalBlockRecv)[k] != (*totalBlockRecv)[k-1] { + *totalBlockRecvMismatch = true + } + } + if debugflag2 { + logger(fmt.Sprintf("in compute(): totalTxRecv[]= %v, totalBlockRecv[]= %v", *totalTxRecv, *totalBlockRecv)) + } } func reportTotals(testname string, numTxToSendTotal int64, countToSend [][]int64, txSent [][]int64, totalNumTxSent int64, txSentFailures [][]int64, totalNumTxSentFailures int64, batchSize int64, txRecv [][]int64, totalTxRecv []int64, totalTxRecvMismatch bool, blockRecv [][]int64, totalBlockRecv []int64, totalBlockRecvMismatch bool, masterSpy bool, channelIDs *[]string) (successResult bool, resultStr string) { - // default to failed - var passFailStr = "FAILED" - successResult = false - resultStr = "TEST " + testname + " " - - // For each Producer, print the ordererIndex and channelIndex, the - // number of TX requested to be sent, the actual number of TX sent, - // and the number we failed to send. - - if numOrdsInNtwk > 3 || numChannels > 3 { - logger(fmt.Sprintf("Print only the first 3 chans of only the first 3 ordererIdx; and any others ONLY IF they contain failures.\nTotals numOrdInNtwk=%d numChan=%d numPRODUCERs=%d", numOrdsInNtwk, numChannels, numOrdsInNtwk*numChannels)) - } - logger("PRODUCERS OrdererIdx ChannelIdx ChannelID TX Target ACK NACK") - for i := 0; i < numOrdsInNtwk; i++ { - for j := 0; j < numChannels; j++ { - if (i < 3 && j < 3) || txSentFailures[i][j] > 0 || countToSend[i][j] != txSent[i][j] + txSentFailures[i][j] { - logger(fmt.Sprintf("%22d%12d %-20s%12d%12d%12d",i,j,(*channelIDs)[j],countToSend[i][j],txSent[i][j],txSentFailures[i][j])) - } else if (i < 3 && j == 3) { - logger(fmt.Sprintf("%34s","...")) - } else if (i == 3 && j == 0) { - logger(fmt.Sprintf("%22s","...")) - } - } - } - - // for each consumer print the ordererIndex & channel, the num blocks and the num transactions received/delivered - if numOrdsToWatch > 3 || numChannels > 3 { - logger(fmt.Sprintf("Print only the first 3 chans of only the first 3 ordererIdx (and the last ordererIdx if masterSpy is present), plus any others that contain failures.\nTotals numOrdIdx=%d numChanIdx=%d numCONSUMERS=%d", numOrdsToWatch, numChannels, numOrdsToWatch*numChannels)) - } - logger("CONSUMERS OrdererIdx ChannelIdx ChannelID TXs Batches") - for i := 0; i < numOrdsToWatch; i++ { - for j := 0; j < numChannels; j++ { - if (j < 3 && (i < 3 || (masterSpy && i==numOrdsInNtwk-1))) || (i>1 && (blockRecv[i][j] != blockRecv[1][j] || txRecv[1][j] != txRecv[1][j])) { - // Subtract one from the received Block count and TX count, to ignore the genesis block - // (we already ignore genesis blocks when we compute the totals in totalTxRecv[n] , totalBlockRecv[n]) - logger(fmt.Sprintf("%22d%12d %-20s%12d%12d",i,j,(*channelIDs)[j],txRecv[i][j]-1,blockRecv[i][j]-1)) - } else if (i < 3 && j == 3) { - logger(fmt.Sprintf("%34s","...")) - } else if (i == 3 && j == 0) { - logger(fmt.Sprintf("%22s","...")) - } - } - } - - // Check for differences on the deliveries from the orderers. These are - // probably errors - unless the test stopped an orderer on purpose and - // never restarted it, while the others continued to deliver TXs. - // (If an orderer is restarted, then it would reprocess all the - // back-ordered transactions to catch up with the others.) - - if totalTxRecvMismatch { logger("!!!!! Num TXs Delivered is not same on all orderers!!!!!") } - if totalBlockRecvMismatch { logger("!!!!! Num Blocks Delivered is not same on all orderers!!!!!") } - - if totalTxRecvMismatch || totalBlockRecvMismatch { - resultStr += "Orderers were INCONSISTENT! " - } - if totalTxRecv[0] == numTxToSendTotal { - // recv count on orderer 0 matches the send count - if !totalTxRecvMismatch && !totalBlockRecvMismatch { - logger("Hooray! Every TX was successfully sent AND delivered by orderer service.") - successResult = true - passFailStr = "PASSED" - } else { - resultStr += "Every TX was successfully sent AND delivered by orderer0 but not all orderers" - } - } else if totalTxRecv[0] == totalNumTxSent { - resultStr += "Every ACked TX was delivered, but failures occurred:" - } else if totalTxRecv[0] < totalNumTxSent { - resultStr += "BAD! Some ACKed TX were LOST by orderer service!" - } else { - resultStr += "BAD! Some EXTRA TX were delivered by orderer service!" - } - - //////////////////////////////////////////////////////////////////////// - // - // Before we declare success, let's check some more things... - // - // At this point, we have decided if most of the numbers make sense by - // setting succssResult to true if the tests passed. Thus we assume - // successReult=true and just set it to false if we find a problem. - - // Check the totals to verify if the number of blocks on each channel - // is appropriate for the given batchSize and number of TXs sent. - - expectedBlocksOnChan := make([]int64, numChannels) // create a counter for all the channels on one orderer - for c := 0; c < numChannels; c++ { - var chanSentTotal int64 - for ord := 0; ord < numOrdsInNtwk; ord++ { - chanSentTotal += txSent[ord][c] - } - expectedBlocksOnChan[c] = chanSentTotal / batchSize - if chanSentTotal % batchSize > 0 { expectedBlocksOnChan[c]++ } - for ord := 0; ord < numOrdsToWatch; ord++ { - if expectedBlocksOnChan[c] != blockRecv[ord][c] - 1 { // ignore genesis block - successResult = false - passFailStr = "FAILED" - logger(fmt.Sprintf("Error: Unexpected Block count %d (expected %d) on ordIndx=%d channelIDs[%d]=%s, chanSentTxTotal=%d BatchSize=%d", blockRecv[ord][c]-1, expectedBlocksOnChan[c], ord, c, (*channelIDs)[c], chanSentTotal, batchSize)) - } else { - if debugflag1 { logger(fmt.Sprintf("GOOD block count %d on ordIndx=%d channelIDs[%d]=%s chanSentTxTotal=%d BatchSize=%d", expectedBlocksOnChan[c], ord, c, (*channelIDs)[c], chanSentTotal, batchSize)) } - } - } - } - - - // TODO - Verify the contents of the last block of transactions. - // Since we do not know exactly what should be in the block, - // then at least we can do: - // for each channel, verify if the block delivered from - // each orderer is the same (i.e. contains the same - // Data bytes (transactions) in the last block) - - - // print some counters totals - logger(fmt.Sprintf("Not counting genesis blks (1 per chan)%9d", countGenesis())) - logger(fmt.Sprintf("Total TX broadcasts Requested to Send %9d", numTxToSendTotal)) - logger(fmt.Sprintf("Total TX broadcasts send success ACK %9d", totalNumTxSent)) - logger(fmt.Sprintf("Total TX broadcasts sendFailed - NACK %9d", totalNumTxSentFailures)) - logger(fmt.Sprintf("Total Send-LOST TX (Not Ack or Nack)) %9d", numTxToSendTotal - totalNumTxSent - totalNumTxSentFailures )) - logger(fmt.Sprintf("Total Recv-LOST TX (Ack but not Recvd)%9d", totalNumTxSent - totalTxRecv[0] )) - if successResult { - logger(fmt.Sprintf("Total deliveries received TX %9d", totalTxRecv[0])) - logger(fmt.Sprintf("Total deliveries received Blocks %9d", totalBlockRecv[0])) - } else { - logger(fmt.Sprintf("Total deliveries received TX on each ordrr %7d", totalTxRecv)) - logger(fmt.Sprintf("Total deliveries received Blocks on each ordrr %7d", totalBlockRecv)) - } - - // print output result and counts : overall summary - resultStr += fmt.Sprintf(" RESULT=%s: TX Req=%d BrdcstACK=%d NACK=%d DelivBlk=%d DelivTX=%d numChannels=%d batchSize=%d", passFailStr, numTxToSendTotal, totalNumTxSent, totalNumTxSentFailures, totalBlockRecv, totalTxRecv, numChannels, batchSize) - logger(fmt.Sprintf(resultStr)) - - return successResult, resultStr + // default to failed + var passFailStr = "FAILED" + successResult = false + resultStr = "TEST " + testname + " " + + // For each Producer, print the ordererIndex and channelIndex, the + // number of TX requested to be sent, the actual number of TX sent, + // and the number we failed to send. + + if numOrdsInNtwk > 3 || numChannels > 3 { + logger(fmt.Sprintf("Print only the first 3 chans of only the first 3 ordererIdx; and any others ONLY IF they contain failures.\nTotals numOrdInNtwk=%d numChan=%d numPRODUCERs=%d", numOrdsInNtwk, numChannels, numOrdsInNtwk*numChannels)) + } + logger("PRODUCERS OrdererIdx ChannelIdx ChannelID TX Target ACK NACK") + for i := 0; i < numOrdsInNtwk; i++ { + for j := 0; j < numChannels; j++ { + if (i < 3 && j < 3) || txSentFailures[i][j] > 0 || countToSend[i][j] != txSent[i][j]+txSentFailures[i][j] { + logger(fmt.Sprintf("%22d%12d %-20s%12d%12d%12d", i, j, (*channelIDs)[j], countToSend[i][j], txSent[i][j], txSentFailures[i][j])) + } else if i < 3 && j == 3 { + logger(fmt.Sprintf("%34s", "...")) + } else if i == 3 && j == 0 { + logger(fmt.Sprintf("%22s", "...")) + } + } + } + + // for each consumer print the ordererIndex & channel, the num blocks and the num transactions received/delivered + if numOrdsToWatch > 3 || numChannels > 3 { + logger(fmt.Sprintf("Print only the first 3 chans of only the first 3 ordererIdx (and the last ordererIdx if masterSpy is present), plus any others that contain failures.\nTotals numOrdIdx=%d numChanIdx=%d numCONSUMERS=%d", numOrdsToWatch, numChannels, numOrdsToWatch*numChannels)) + } + logger("CONSUMERS OrdererIdx ChannelIdx ChannelID TXs Batches") + for i := 0; i < numOrdsToWatch; i++ { + for j := 0; j < numChannels; j++ { + if (j < 3 && (i < 3 || (masterSpy && i == numOrdsInNtwk-1))) || (i > 1 && (blockRecv[i][j] != blockRecv[1][j] || txRecv[1][j] != txRecv[1][j])) { + // Subtract one from the received Block count and TX count, to ignore the genesis block + // (we already ignore genesis blocks when we compute the totals in totalTxRecv[n] , totalBlockRecv[n]) + logger(fmt.Sprintf("%22d%12d %-20s%12d%12d", i, j, (*channelIDs)[j], txRecv[i][j]-1, blockRecv[i][j]-1)) + } else if i < 3 && j == 3 { + logger(fmt.Sprintf("%34s", "...")) + } else if i == 3 && j == 0 { + logger(fmt.Sprintf("%22s", "...")) + } + } + } + + // Check for differences on the deliveries from the orderers. These are + // probably errors - unless the test stopped an orderer on purpose and + // never restarted it, while the others continued to deliver TXs. + // (If an orderer is restarted, then it would reprocess all the + // back-ordered transactions to catch up with the others.) + + if totalTxRecvMismatch { + logger("!!!!! Num TXs Delivered is not same on all orderers!!!!!") + } + if totalBlockRecvMismatch { + logger("!!!!! Num Blocks Delivered is not same on all orderers!!!!!") + } + + if totalTxRecvMismatch || totalBlockRecvMismatch { + resultStr += "Orderers were INCONSISTENT! " + } + if totalTxRecv[0] == numTxToSendTotal { + // recv count on orderer 0 matches the send count + if !totalTxRecvMismatch && !totalBlockRecvMismatch { + logger("Hooray! Every TX was successfully sent AND delivered by orderer service.") + successResult = true + passFailStr = "PASSED" + } else { + resultStr += "Every TX was successfully sent AND delivered by orderer0 but not all orderers" + } + } else if totalTxRecv[0] == totalNumTxSent { + resultStr += "Every ACked TX was delivered, but failures occurred:" + } else if totalTxRecv[0] < totalNumTxSent { + resultStr += "BAD! Some ACKed TX were LOST by orderer service!" + } else { + resultStr += "BAD! Some EXTRA TX were delivered by orderer service!" + } + + //////////////////////////////////////////////////////////////////////// + // + // Before we declare success, let's check some more things... + // + // At this point, we have decided if most of the numbers make sense by + // setting succssResult to true if the tests passed. Thus we assume + // successReult=true and just set it to false if we find a problem. + + // Check the totals to verify if the number of blocks on each channel + // is appropriate for the given batchSize and number of TXs sent. + + expectedBlocksOnChan := make([]int64, numChannels) // create a counter for all the channels on one orderer + for c := 0; c < numChannels; c++ { + var chanSentTotal int64 + for ord := 0; ord < numOrdsInNtwk; ord++ { + chanSentTotal += txSent[ord][c] + } + expectedBlocksOnChan[c] = chanSentTotal / batchSize + if chanSentTotal%batchSize > 0 { + expectedBlocksOnChan[c]++ + } + for ord := 0; ord < numOrdsToWatch; ord++ { + if expectedBlocksOnChan[c] != blockRecv[ord][c]-1 { // ignore genesis block + successResult = false + passFailStr = "FAILED" + logger(fmt.Sprintf("Error: Unexpected Block count %d (expected %d) on ordIndx=%d channelIDs[%d]=%s, chanSentTxTotal=%d BatchSize=%d", blockRecv[ord][c]-1, expectedBlocksOnChan[c], ord, c, (*channelIDs)[c], chanSentTotal, batchSize)) + } else { + if debugflag1 { + logger(fmt.Sprintf("GOOD block count %d on ordIndx=%d channelIDs[%d]=%s chanSentTxTotal=%d BatchSize=%d", expectedBlocksOnChan[c], ord, c, (*channelIDs)[c], chanSentTotal, batchSize)) + } + } + } + } + + // TODO - Verify the contents of the last block of transactions. + // Since we do not know exactly what should be in the block, + // then at least we can do: + // for each channel, verify if the block delivered from + // each orderer is the same (i.e. contains the same + // Data bytes (transactions) in the last block) + + // print some counters totals + logger(fmt.Sprintf("Not counting genesis blks (1 per chan)%9d", countGenesis())) + logger(fmt.Sprintf("Total TX broadcasts Requested to Send %9d", numTxToSendTotal)) + logger(fmt.Sprintf("Total TX broadcasts send success ACK %9d", totalNumTxSent)) + logger(fmt.Sprintf("Total TX broadcasts sendFailed - NACK %9d", totalNumTxSentFailures)) + logger(fmt.Sprintf("Total Send-LOST TX (Not Ack or Nack)) %9d", numTxToSendTotal-totalNumTxSent-totalNumTxSentFailures)) + logger(fmt.Sprintf("Total Recv-LOST TX (Ack but not Recvd)%9d", totalNumTxSent-totalTxRecv[0])) + if successResult { + logger(fmt.Sprintf("Total deliveries received TX %9d", totalTxRecv[0])) + logger(fmt.Sprintf("Total deliveries received Blocks %9d", totalBlockRecv[0])) + } else { + logger(fmt.Sprintf("Total deliveries received TX on each ordrr %7d", totalTxRecv)) + logger(fmt.Sprintf("Total deliveries received Blocks on each ordrr %7d", totalBlockRecv)) + } + + // print output result and counts : overall summary + resultStr += fmt.Sprintf(" RESULT=%s: TX Req=%d BrdcstACK=%d NACK=%d DelivBlk=%d DelivTX=%d numChannels=%d batchSize=%d", passFailStr, numTxToSendTotal, totalNumTxSent, totalNumTxSentFailures, totalBlockRecv, totalTxRecv, numChannels, batchSize) + logger(fmt.Sprintf(resultStr)) + + return successResult, resultStr } // Function: ote - the Orderer Test Engine // Outputs: print report to stdout with lots of counters // Returns: passed bool, resultSummary string -func ote( testname string, txs int64, chans int, orderers int, ordType string, kbs int, masterSpy bool, pPerCh int ) (passed bool, resultSummary string) { - - initialize() // multiple go tests could be run; we must call initialize() each time - - passed = false - resultSummary = testname + " test not completed: INPUT ERROR: " - defer closeLogger() - - logger(fmt.Sprintf("========== OTE testname=%s TX=%d Channels=%d Orderers=%d ordererType=%s kafka-brokers=%d addMasterSpy=%t producersPerCh=%d", testname, txs, chans, orderers, ordType, kbs, masterSpy, pPerCh)) - - // Establish the default configuration from yaml files - and this also - // picks up any variables overridden on command line or in environment - ordConf = config.Load() - genConf = genesisconfig.Load() - var launchAppendFlags string - - //////////////////////////////////////////////////////////////////////// - // Check parameters and/or env vars to see if user wishes to override - // default config parms. - //////////////////////////////////////////////////////////////////////// - - ////////////////////////////////////////////////////////////////////// - // Arguments for OTE settings for test variations: - ////////////////////////////////////////////////////////////////////// - - if txs > 0 { numTxToSend = txs } else { return passed, resultSummary + "number of transactions must be > 0" } - if chans > 0 { numChannels = chans } else { return passed, resultSummary + "number of channels must be > 0" } - if orderers > 0 { - numOrdsInNtwk = orderers - launchAppendFlags += fmt.Sprintf(" -o %d", orderers) - } else { return passed, resultSummary + "number of orderers in network must be > 0" } - - if pPerCh > 1 { - producersPerCh = pPerCh - return passed, resultSummary + "Multiple producersPerChannel NOT SUPPORTED yet." - } - - numOrdsToWatch = numOrdsInNtwk // Watch every orderer to verify they are all delivering the same. - if masterSpy { numOrdsToWatch++ } // We are not creating another orderer here, but we do need - // another set of counters; the masterSpy will be created for - // this test to watch every channel on an orderer - so that means - // one orderer is being watched twice - - // this is not an argument, but user may set this tuning parameter before running test - envvar = os.Getenv("OTE_CLIENTS_SHARE_CONNS") - if envvar != "" { - if (strings.ToLower(envvar) == "true" || strings.ToLower(envvar) == "t") { - optimizeClientsMode = true - } - if debugflagAPI { - logger(fmt.Sprintf("%-50s %s=%t", "OTE_CLIENTS_SHARE_CONNS="+envvar, "optimizeClientsMode", optimizeClientsMode)) - logger("Setting OTE_CLIENTS_SHARE_CONNS option to true does the following:\n1. All Consumers on an orderer (one GO thread per each channel) will share grpc connection.\n2. All Producers on an orderer will share a grpc conn AND share one GO-thread.\nAlthough this reduces concurrency and lengthens the test duration, it satisfies\nthe objective of reducing swap space requirements and should be selected when\nrunning tests with numerous channels or producers per channel.") - } - } - if optimizeClientsMode { - // use only one MasterProducer and one MasterConsumer on each orderer - numProducers = numOrdsInNtwk - numConsumers = numOrdsInNtwk - } else { - // one Producer and one Consumer for EVERY channel on each orderer - numProducers = numOrdsInNtwk * numChannels - numConsumers = numOrdsInNtwk * numChannels - } - - ////////////////////////////////////////////////////////////////////// - // Arguments to override configuration parameter values in yaml file: - ////////////////////////////////////////////////////////////////////// - - // ordererType is an argument of ote(), and is also in the genesisconfig - ordererType = genConf.Orderer.OrdererType - if ordType != "" { - ordererType = ordType - } else { - logger(fmt.Sprintf("Null value provided for ordererType; using value from config file: %s", ordererType)) - } - launchAppendFlags += fmt.Sprintf(" -t %s", ordererType) - if "kafka" == strings.ToLower(ordererType) { - if kbs > 0 { - numKBrokers = kbs - launchAppendFlags += fmt.Sprintf(" -k %d", numKBrokers) - } else { - return passed, resultSummary + "When using kafka ordererType, number of kafka-brokers must be > 0" - } - } else { numKBrokers = 0 } - - // batchSize is not an argument of ote(), but is in the genesisconfig - // variable may be overridden on command line or by exporting it. - batchSize := int64(genConf.Orderer.BatchSize.MaxMessageCount) // retype the uint32 - envvar = os.Getenv(batchSizeParamStr) - if envvar != "" { launchAppendFlags += fmt.Sprintf(" -b %d", batchSize) } - if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", batchSizeParamStr+"="+envvar, "batchSize", batchSize)) } - - // batchTimeout is not an argument of ote(), but is in the genesisconfig - //logger(fmt.Sprintf("DEBUG=====BatchTimeout conf:%v Seconds-float():%v Seconds-int:%v", genConf.Orderer.BatchTimeout, (genConf.Orderer.BatchTimeout).Seconds(), int((genConf.Orderer.BatchTimeout).Seconds()))) - batchTimeout := int((genConf.Orderer.BatchTimeout).Seconds()) // Seconds() converts time.Duration to float64, and then retypecast to int - envvar = os.Getenv(batchTimeoutParamStr) - if envvar != "" { launchAppendFlags += fmt.Sprintf(" -c %d", batchTimeout) } - if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", batchTimeoutParamStr+"="+envvar, "batchTimeout", batchTimeout)) } - - // CoreLoggingLevel - envvar = strings.ToUpper(os.Getenv("CORE_LOGGING_LEVEL")) // (default = not set)|CRITICAL|ERROR|WARNING|NOTICE|INFO|DEBUG - if envvar != "" { - launchAppendFlags += fmt.Sprintf(" -l %s", envvar) - } - if debugflagAPI { logger(fmt.Sprintf("CORE_LOGGING_LEVEL=%s", envvar)) } - - // CoreLedgerStateDB - envvar = os.Getenv("CORE_LEDGER_STATE_STATEDATABASE") // goleveldb | CouchDB - if envvar != "" { - launchAppendFlags += fmt.Sprintf(" -d %s", envvar) - } - if debugflagAPI { logger(fmt.Sprintf("CORE_LEDGER_STATE_STATEDATABASE=%s", envvar)) } - - // CoreSecurityLevel - envvar = os.Getenv("CORE_SECURITY_LEVEL") // 256 | 384 - if envvar != "" { - launchAppendFlags += fmt.Sprintf(" -w %s", envvar) - } - if debugflagAPI { logger(fmt.Sprintf("CORE_SECURITY_LEVEL=%s", envvar)) } - - // CoreSecurityHashAlgorithm - envvar = os.Getenv("CORE_SECURITY_HASHALGORITHM") // SHA2 | SHA3 - if envvar != "" { - launchAppendFlags += fmt.Sprintf(" -x %s", envvar) - } - if debugflagAPI { logger(fmt.Sprintf("CORE_SECURITY_HASHALGORITHM=%s", envvar)) } - - - ////////////////////////////////////////////////////////////////////////// - // Each producer sends TXs to one channel on one orderer, and increments - // its own counters for the successfully sent Tx, and the send-failures - // (rejected/timeout). These arrays are indexed by dimensions: - // numOrdsInNtwk and numChannels - - var countToSend [][]int64 - var txSent [][]int64 - var txSentFailures [][]int64 - var totalNumTxSent int64 - var totalNumTxSentFailures int64 - - // Each consumer receives blocks delivered on one channel from one - // orderer, and must track its own counters for the received number of - // blocks and received number of Tx. - // We will create consumers for every channel on an orderer, and total - // up the TXs received. And do that for all the orderers (indexed by - // numOrdsToWatch). We will check to ensure all the orderers receive - // all the same deliveries. These arrays are indexed by dimensions: - // numOrdsToWatch and numChannels - - var txRecv [][]int64 - var blockRecv [][]int64 - var totalTxRecv []int64 // total TXs rcvd by all consumers on an orderer, indexed by numOrdsToWatch - var totalBlockRecv []int64 // total Blks recvd by all consumers on an orderer, indexed by numOrdsToWatch - var totalTxRecvMismatch = false - var totalBlockRecvMismatch = false - var consumerConns [][]*grpc.ClientConn - - //////////////////////////////////////////////////////////////////////// - // Create the 1D and 2D slices of counters for the producers and - // consumers. All are initialized to zero. - - for i := 0; i < numOrdsInNtwk; i++ { // for all orderers - - countToSendForOrd := make([]int64, numChannels) // create a counter for all the channels on one orderer - countToSend = append(countToSend, countToSendForOrd) // orderer-i gets a set - - sendPassCntrs := make([]int64, numChannels) // create a counter for all the channels on one orderer - txSent = append(txSent, sendPassCntrs) // orderer-i gets a set - - sendFailCntrs := make([]int64, numChannels) // create a counter for all the channels on one orderer - txSentFailures = append(txSentFailures, sendFailCntrs) // orderer-i gets a set - } - - for i := 0; i < numOrdsToWatch; i++ { // for all orderers which we will watch/monitor for deliveries - - blockRecvCntrs := make([]int64, numChannels) // create a set of block counters for each channel - blockRecv = append(blockRecv, blockRecvCntrs) // orderer-i gets a set - - txRecvCntrs := make([]int64, numChannels) // create a set of tx counters for each channel - txRecv = append(txRecv, txRecvCntrs) // orderer-i gets a set - - consumerRow := make([]*grpc.ClientConn, numChannels) - consumerConns = append(consumerConns, consumerRow) - } - - totalTxRecv = make([]int64, numOrdsToWatch) // create counter for each orderer, for total tx received (for all channels) - totalBlockRecv = make([]int64, numOrdsToWatch) // create counter for each orderer, for total blk received (for all channels) - - - //////////////////////////////////////////////////////////////////////// - - launchNetwork(launchAppendFlags) - time.Sleep(10 * time.Second) - - //////////////////////////////////////////////////////////////////////// - // Create the 1D slice of channel IDs, and create names for them - // which we will use when producing/broadcasting/sending msgs and - // consuming/delivering/receiving msgs. - - var channelIDs []string - channelIDs = make([]string, numChannels) - - // TODO (after FAB-2001 and FAB-2083 are fixed) - Remove the if-then clause. - // Due to those bugs, we cannot pass many tests using multiple orderers and multiple channels. - // TEMPORARY PARTIAL SOLUTION: To test multiple orderers with a single channel, - // use hardcoded TestChainID and skip creating any channels. - if numChannels == 1 { - channelIDs[0] = genesisconfigProvisional.TestChainID - logger(fmt.Sprintf("Using DEFAULT channelID = %s", channelIDs[0])) - } else { - logger(fmt.Sprintf("Using %d new channelIDs, e.g. test-chan.00023", numChannels)) - for c:=0; c < numChannels; c++ { - channelIDs[c] = fmt.Sprintf("test-chan.%05d", c) - cmd := fmt.Sprintf("cd $GOPATH/src/github.com/hyperledger/fabric && CORE_PEER_COMMITTER_LEDGER_ORDERER=127.0.0.1:%d peer channel create -c %s", ordStartPort, channelIDs[c]) - _ = executeCmd(cmd) - //executeCmdAndDisplay(cmd) - } - } - - //////////////////////////////////////////////////////////////////////// - // Start threads for each consumer to watch each channel on all (the - // specified number of) orderers. This code assumes orderers in the - // network will use increasing port numbers, which is the same logic - // used by the driver.sh tool that starts the network for us: the first - // orderer uses ordStartPort, the second uses ordStartPort+1, etc. - - for ord := 0; ord < numOrdsToWatch; ord++ { - serverAddr := fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort + uint16(ord)) - if masterSpy && ord == numOrdsToWatch-1 { - // Special case: this is the last row of counters, - // added (and incremented numOrdsToWatch) for the - // masterSpy to use to watch the first orderer for - // deliveries, on all channels. This will be a duplicate - // Consumer (it is the second one monitoring the first - // orderer), so we need to reuse the first port. - serverAddr = fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort) - go startConsumerMaster(serverAddr, &channelIDs, ord, &(txRecv[ord]), &(blockRecv[ord]), &(consumerConns[ord][0])) - } else - if optimizeClientsMode { - // Create just one Consumer to receive all deliveries - // (on all channels) on an orderer. - go startConsumerMaster(serverAddr, &channelIDs, ord, &(txRecv[ord]), &(blockRecv[ord]), &(consumerConns[ord][0])) - } else { - // Normal mode: create a unique consumer client - // go-thread for each channel on each orderer. - for c := 0 ; c < numChannels ; c++ { - go startConsumer(serverAddr, channelIDs[c], ord, c, &(txRecv[ord][c]), &(blockRecv[ord][c]), &(consumerConns[ord][c])) - } - } - - } - - logger("Finished creating all CONSUMERS clients") - time.Sleep(5 * time.Second) - defer cleanNetwork(&consumerConns) - - //////////////////////////////////////////////////////////////////////// - // Now that the orderer service network is running, and the consumers - // are watching for deliveries, we can start clients which will - // broadcast the specified number of TXs to their associated orderers. - - if optimizeClientsMode { - producersWG.Add(numOrdsInNtwk) - } else { - producersWG.Add(numProducers) - } - sendStart := time.Now().Unix() - for ord := 0; ord < numOrdsInNtwk; ord++ { - serverAddr := fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort + uint16(ord)) - for c := 0 ; c < numChannels ; c++ { - countToSend[ord][c] = numTxToSend / int64(numOrdsInNtwk * numChannels) - if c==0 && ord==0 { countToSend[ord][c] += numTxToSend % int64(numOrdsInNtwk * numChannels) } - } - if optimizeClientsMode { - // create one Producer for all channels on this orderer - go startProducerMaster(serverAddr, &channelIDs, ord, &(countToSend[ord]), &(txSent[ord]), &(txSentFailures[ord])) - } else { - // Normal mode: create a unique consumer client - // go thread for each channel - for c := 0 ; c < numChannels ; c++ { - go startProducer(serverAddr, channelIDs[c], ord, c, countToSend[ord][c], &(txSent[ord][c]), &(txSentFailures[ord][c])) - } - } - } - - if optimizeClientsMode { - logger(fmt.Sprintf("Finished creating all %d MASTER-PRODUCERs", numOrdsInNtwk)) - } else { - logger(fmt.Sprintf("Finished creating all %d PRODUCERs", numOrdsInNtwk * numChannels)) - } - producersWG.Wait() - logger(fmt.Sprintf("Send Duration (seconds): %4d", time.Now().Unix() - sendStart)) - recoverStart := time.Now().Unix() - - //////////////////////////////////////////////////////////////////////// - // All producer threads are finished sending broadcast transactions. - // Let's determine if the deliveries have all been received by the - // consumer threads. We will check if the receive counts match the send - // counts on all consumers, or if all consumers are no longer receiving - // blocks. Wait and continue rechecking as necessary, as long as the - // delivery (recv) counters are climbing closer to the broadcast (send) - // counter. If the counts do not match, wait for up to batchTimeout - // seconds, to ensure that we received the last (non-full) batch. - - computeTotals(&txSent, &totalNumTxSent, &txSentFailures, &totalNumTxSentFailures, &txRecv, &totalTxRecv, &totalTxRecvMismatch, &blockRecv, &totalBlockRecv, &totalBlockRecvMismatch) - - waitSecs := 0 - for !sendEqualRecv(numTxToSend, &totalTxRecv, totalTxRecvMismatch, totalBlockRecvMismatch) && (moreDeliveries(&txSent, &totalNumTxSent, &txSentFailures, &totalNumTxSentFailures, &txRecv, &totalTxRecv, &totalTxRecvMismatch, &blockRecv, &totalBlockRecv, &totalBlockRecvMismatch) || waitSecs < batchTimeout) { time.Sleep(1 * time.Second); waitSecs++ } - - // Recovery Duration = time spent waiting for orderer service to finish delivering transactions, - // after all producers finished sending them. - // waitSecs = some possibly idle time spent waiting for the last batch to be generated (waiting for batchTimeout) - logger(fmt.Sprintf("Recovery Duration (secs):%4d", time.Now().Unix() - recoverStart)) - logger(fmt.Sprintf("waitSecs for last batch: %4d", waitSecs)) - passed, resultSummary = reportTotals(testname, numTxToSend, countToSend, txSent, totalNumTxSent, txSentFailures, totalNumTxSentFailures, batchSize, txRecv, totalTxRecv, totalTxRecvMismatch, blockRecv, totalBlockRecv, totalBlockRecvMismatch, masterSpy, &channelIDs) - - return passed, resultSummary +func ote(testname string, txs int64, chans int, orderers int, ordType string, kbs int, masterSpy bool, pPerCh int) (passed bool, resultSummary string) { + + initialize() // multiple go tests could be run; we must call initialize() each time + + passed = false + resultSummary = testname + " test not completed: INPUT ERROR: " + defer closeLogger() + + logger(fmt.Sprintf("========== OTE testname=%s TX=%d Channels=%d Orderers=%d ordererType=%s kafka-brokers=%d addMasterSpy=%t producersPerCh=%d", testname, txs, chans, orderers, ordType, kbs, masterSpy, pPerCh)) + + // Establish the default configuration from yaml files - and this also + // picks up any variables overridden on command line or in environment + ordConf = config.Load() + genConf = genesisconfig.Load() + var launchAppendFlags string + + //////////////////////////////////////////////////////////////////////// + // Check parameters and/or env vars to see if user wishes to override + // default config parms. + //////////////////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////////////////// + // Arguments for OTE settings for test variations: + ////////////////////////////////////////////////////////////////////// + + if txs > 0 { + numTxToSend = txs + } else { + return passed, resultSummary + "number of transactions must be > 0" + } + if chans > 0 { + numChannels = chans + } else { + return passed, resultSummary + "number of channels must be > 0" + } + if orderers > 0 { + numOrdsInNtwk = orderers + launchAppendFlags += fmt.Sprintf(" -o %d", orderers) + } else { + return passed, resultSummary + "number of orderers in network must be > 0" + } + + if pPerCh > 1 { + producersPerCh = pPerCh + return passed, resultSummary + "Multiple producersPerChannel NOT SUPPORTED yet." + } + + numOrdsToWatch = numOrdsInNtwk // Watch every orderer to verify they are all delivering the same. + if masterSpy { + numOrdsToWatch++ + } // We are not creating another orderer here, but we do need + // another set of counters; the masterSpy will be created for + // this test to watch every channel on an orderer - so that means + // one orderer is being watched twice + + // this is not an argument, but user may set this tuning parameter before running test + envvar = os.Getenv("OTE_CLIENTS_SHARE_CONNS") + if envvar != "" { + if strings.ToLower(envvar) == "true" || strings.ToLower(envvar) == "t" { + optimizeClientsMode = true + } + if debugflagAPI { + logger(fmt.Sprintf("%-50s %s=%t", "OTE_CLIENTS_SHARE_CONNS="+envvar, "optimizeClientsMode", optimizeClientsMode)) + logger("Setting OTE_CLIENTS_SHARE_CONNS option to true does the following:\n1. All Consumers on an orderer (one GO thread per each channel) will share grpc connection.\n2. All Producers on an orderer will share a grpc conn AND share one GO-thread.\nAlthough this reduces concurrency and lengthens the test duration, it satisfies\nthe objective of reducing swap space requirements and should be selected when\nrunning tests with numerous channels or producers per channel.") + } + } + if optimizeClientsMode { + // use only one MasterProducer and one MasterConsumer on each orderer + numProducers = numOrdsInNtwk + numConsumers = numOrdsInNtwk + } else { + // one Producer and one Consumer for EVERY channel on each orderer + numProducers = numOrdsInNtwk * numChannels + numConsumers = numOrdsInNtwk * numChannels + } + + ////////////////////////////////////////////////////////////////////// + // Arguments to override configuration parameter values in yaml file: + ////////////////////////////////////////////////////////////////////// + + // ordererType is an argument of ote(), and is also in the genesisconfig + ordererType = genConf.Orderer.OrdererType + if ordType != "" { + ordererType = ordType + } else { + logger(fmt.Sprintf("Null value provided for ordererType; using value from config file: %s", ordererType)) + } + launchAppendFlags += fmt.Sprintf(" -t %s", ordererType) + if "kafka" == strings.ToLower(ordererType) { + if kbs > 0 { + numKBrokers = kbs + launchAppendFlags += fmt.Sprintf(" -k %d", numKBrokers) + } else { + return passed, resultSummary + "When using kafka ordererType, number of kafka-brokers must be > 0" + } + } else { + numKBrokers = 0 + } + + // batchSize is not an argument of ote(), but is in the genesisconfig + // variable may be overridden on command line or by exporting it. + batchSize := int64(genConf.Orderer.BatchSize.MaxMessageCount) // retype the uint32 + envvar = os.Getenv(batchSizeParamStr) + if envvar != "" { + launchAppendFlags += fmt.Sprintf(" -b %d", batchSize) + } + if debugflagAPI { + logger(fmt.Sprintf("%-50s %s=%d", batchSizeParamStr+"="+envvar, "batchSize", batchSize)) + } + + // batchTimeout is not an argument of ote(), but is in the genesisconfig + //logger(fmt.Sprintf("DEBUG=====BatchTimeout conf:%v Seconds-float():%v Seconds-int:%v", genConf.Orderer.BatchTimeout, (genConf.Orderer.BatchTimeout).Seconds(), int((genConf.Orderer.BatchTimeout).Seconds()))) + batchTimeout := int((genConf.Orderer.BatchTimeout).Seconds()) // Seconds() converts time.Duration to float64, and then retypecast to int + envvar = os.Getenv(batchTimeoutParamStr) + if envvar != "" { + launchAppendFlags += fmt.Sprintf(" -c %d", batchTimeout) + } + if debugflagAPI { + logger(fmt.Sprintf("%-50s %s=%d", batchTimeoutParamStr+"="+envvar, "batchTimeout", batchTimeout)) + } + + // CoreLoggingLevel + envvar = strings.ToUpper(os.Getenv("CORE_LOGGING_LEVEL")) // (default = not set)|CRITICAL|ERROR|WARNING|NOTICE|INFO|DEBUG + if envvar != "" { + launchAppendFlags += fmt.Sprintf(" -l %s", envvar) + } + if debugflagAPI { + logger(fmt.Sprintf("CORE_LOGGING_LEVEL=%s", envvar)) + } + + // CoreLedgerStateDB + envvar = os.Getenv("CORE_LEDGER_STATE_STATEDATABASE") // goleveldb | CouchDB + if envvar != "" { + launchAppendFlags += fmt.Sprintf(" -d %s", envvar) + } + if debugflagAPI { + logger(fmt.Sprintf("CORE_LEDGER_STATE_STATEDATABASE=%s", envvar)) + } + + // CoreSecurityLevel + envvar = os.Getenv("CORE_SECURITY_LEVEL") // 256 | 384 + if envvar != "" { + launchAppendFlags += fmt.Sprintf(" -w %s", envvar) + } + if debugflagAPI { + logger(fmt.Sprintf("CORE_SECURITY_LEVEL=%s", envvar)) + } + + // CoreSecurityHashAlgorithm + envvar = os.Getenv("CORE_SECURITY_HASHALGORITHM") // SHA2 | SHA3 + if envvar != "" { + launchAppendFlags += fmt.Sprintf(" -x %s", envvar) + } + if debugflagAPI { + logger(fmt.Sprintf("CORE_SECURITY_HASHALGORITHM=%s", envvar)) + } + + ////////////////////////////////////////////////////////////////////////// + // Each producer sends TXs to one channel on one orderer, and increments + // its own counters for the successfully sent Tx, and the send-failures + // (rejected/timeout). These arrays are indexed by dimensions: + // numOrdsInNtwk and numChannels + + var countToSend [][]int64 + var txSent [][]int64 + var txSentFailures [][]int64 + var totalNumTxSent int64 + var totalNumTxSentFailures int64 + + // Each consumer receives blocks delivered on one channel from one + // orderer, and must track its own counters for the received number of + // blocks and received number of Tx. + // We will create consumers for every channel on an orderer, and total + // up the TXs received. And do that for all the orderers (indexed by + // numOrdsToWatch). We will check to ensure all the orderers receive + // all the same deliveries. These arrays are indexed by dimensions: + // numOrdsToWatch and numChannels + + var txRecv [][]int64 + var blockRecv [][]int64 + var totalTxRecv []int64 // total TXs rcvd by all consumers on an orderer, indexed by numOrdsToWatch + var totalBlockRecv []int64 // total Blks recvd by all consumers on an orderer, indexed by numOrdsToWatch + var totalTxRecvMismatch = false + var totalBlockRecvMismatch = false + var consumerConns [][]*grpc.ClientConn + + //////////////////////////////////////////////////////////////////////// + // Create the 1D and 2D slices of counters for the producers and + // consumers. All are initialized to zero. + + for i := 0; i < numOrdsInNtwk; i++ { // for all orderers + + countToSendForOrd := make([]int64, numChannels) // create a counter for all the channels on one orderer + countToSend = append(countToSend, countToSendForOrd) // orderer-i gets a set + + sendPassCntrs := make([]int64, numChannels) // create a counter for all the channels on one orderer + txSent = append(txSent, sendPassCntrs) // orderer-i gets a set + + sendFailCntrs := make([]int64, numChannels) // create a counter for all the channels on one orderer + txSentFailures = append(txSentFailures, sendFailCntrs) // orderer-i gets a set + } + + for i := 0; i < numOrdsToWatch; i++ { // for all orderers which we will watch/monitor for deliveries + + blockRecvCntrs := make([]int64, numChannels) // create a set of block counters for each channel + blockRecv = append(blockRecv, blockRecvCntrs) // orderer-i gets a set + + txRecvCntrs := make([]int64, numChannels) // create a set of tx counters for each channel + txRecv = append(txRecv, txRecvCntrs) // orderer-i gets a set + + consumerRow := make([]*grpc.ClientConn, numChannels) + consumerConns = append(consumerConns, consumerRow) + } + + totalTxRecv = make([]int64, numOrdsToWatch) // create counter for each orderer, for total tx received (for all channels) + totalBlockRecv = make([]int64, numOrdsToWatch) // create counter for each orderer, for total blk received (for all channels) + + //////////////////////////////////////////////////////////////////////// + + launchNetwork(launchAppendFlags) + time.Sleep(10 * time.Second) + + //////////////////////////////////////////////////////////////////////// + // Create the 1D slice of channel IDs, and create names for them + // which we will use when producing/broadcasting/sending msgs and + // consuming/delivering/receiving msgs. + + var channelIDs []string + channelIDs = make([]string, numChannels) + + // TODO (after FAB-2001 and FAB-2083 are fixed) - Remove the if-then clause. + // Due to those bugs, we cannot pass many tests using multiple orderers and multiple channels. + // TEMPORARY PARTIAL SOLUTION: To test multiple orderers with a single channel, + // use hardcoded TestChainID and skip creating any channels. + if numChannels == 1 { + channelIDs[0] = genesisconfigProvisional.TestChainID + logger(fmt.Sprintf("Using DEFAULT channelID = %s", channelIDs[0])) + } else { + logger(fmt.Sprintf("Using %d new channelIDs, e.g. test-chan.00023", numChannels)) + for c := 0; c < numChannels; c++ { + channelIDs[c] = fmt.Sprintf("test-chan.%05d", c) + cmd := fmt.Sprintf("cd $GOPATH/src/github.com/hyperledger/fabric && CORE_PEER_COMMITTER_LEDGER_ORDERER=127.0.0.1:%d peer channel create -c %s", ordStartPort, channelIDs[c]) + _ = executeCmd(cmd) + //executeCmdAndDisplay(cmd) + } + } + + //////////////////////////////////////////////////////////////////////// + // Start threads for each consumer to watch each channel on all (the + // specified number of) orderers. This code assumes orderers in the + // network will use increasing port numbers, which is the same logic + // used by the driver.sh tool that starts the network for us: the first + // orderer uses ordStartPort, the second uses ordStartPort+1, etc. + + for ord := 0; ord < numOrdsToWatch; ord++ { + serverAddr := fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort+uint16(ord)) + if masterSpy && ord == numOrdsToWatch-1 { + // Special case: this is the last row of counters, + // added (and incremented numOrdsToWatch) for the + // masterSpy to use to watch the first orderer for + // deliveries, on all channels. This will be a duplicate + // Consumer (it is the second one monitoring the first + // orderer), so we need to reuse the first port. + serverAddr = fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort) + go startConsumerMaster(serverAddr, &channelIDs, ord, &(txRecv[ord]), &(blockRecv[ord]), &(consumerConns[ord][0])) + } else if optimizeClientsMode { + // Create just one Consumer to receive all deliveries + // (on all channels) on an orderer. + go startConsumerMaster(serverAddr, &channelIDs, ord, &(txRecv[ord]), &(blockRecv[ord]), &(consumerConns[ord][0])) + } else { + // Normal mode: create a unique consumer client + // go-thread for each channel on each orderer. + for c := 0; c < numChannels; c++ { + go startConsumer(serverAddr, channelIDs[c], ord, c, &(txRecv[ord][c]), &(blockRecv[ord][c]), &(consumerConns[ord][c])) + } + } + + } + + logger("Finished creating all CONSUMERS clients") + time.Sleep(5 * time.Second) + defer cleanNetwork(&consumerConns) + + //////////////////////////////////////////////////////////////////////// + // Now that the orderer service network is running, and the consumers + // are watching for deliveries, we can start clients which will + // broadcast the specified number of TXs to their associated orderers. + + if optimizeClientsMode { + producersWG.Add(numOrdsInNtwk) + } else { + producersWG.Add(numProducers) + } + sendStart := time.Now().Unix() + for ord := 0; ord < numOrdsInNtwk; ord++ { + serverAddr := fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort+uint16(ord)) + for c := 0; c < numChannels; c++ { + countToSend[ord][c] = numTxToSend / int64(numOrdsInNtwk*numChannels) + if c == 0 && ord == 0 { + countToSend[ord][c] += numTxToSend % int64(numOrdsInNtwk*numChannels) + } + } + if optimizeClientsMode { + // create one Producer for all channels on this orderer + go startProducerMaster(serverAddr, &channelIDs, ord, &(countToSend[ord]), &(txSent[ord]), &(txSentFailures[ord])) + } else { + // Normal mode: create a unique consumer client + // go thread for each channel + for c := 0; c < numChannels; c++ { + go startProducer(serverAddr, channelIDs[c], ord, c, countToSend[ord][c], &(txSent[ord][c]), &(txSentFailures[ord][c])) + } + } + } + + if optimizeClientsMode { + logger(fmt.Sprintf("Finished creating all %d MASTER-PRODUCERs", numOrdsInNtwk)) + } else { + logger(fmt.Sprintf("Finished creating all %d PRODUCERs", numOrdsInNtwk*numChannels)) + } + producersWG.Wait() + logger(fmt.Sprintf("Send Duration (seconds): %4d", time.Now().Unix()-sendStart)) + recoverStart := time.Now().Unix() + + //////////////////////////////////////////////////////////////////////// + // All producer threads are finished sending broadcast transactions. + // Let's determine if the deliveries have all been received by the + // consumer threads. We will check if the receive counts match the send + // counts on all consumers, or if all consumers are no longer receiving + // blocks. Wait and continue rechecking as necessary, as long as the + // delivery (recv) counters are climbing closer to the broadcast (send) + // counter. If the counts do not match, wait for up to batchTimeout + // seconds, to ensure that we received the last (non-full) batch. + + computeTotals(&txSent, &totalNumTxSent, &txSentFailures, &totalNumTxSentFailures, &txRecv, &totalTxRecv, &totalTxRecvMismatch, &blockRecv, &totalBlockRecv, &totalBlockRecvMismatch) + + waitSecs := 0 + for !sendEqualRecv(numTxToSend, &totalTxRecv, totalTxRecvMismatch, totalBlockRecvMismatch) && (moreDeliveries(&txSent, &totalNumTxSent, &txSentFailures, &totalNumTxSentFailures, &txRecv, &totalTxRecv, &totalTxRecvMismatch, &blockRecv, &totalBlockRecv, &totalBlockRecvMismatch) || waitSecs < batchTimeout) { + time.Sleep(1 * time.Second) + waitSecs++ + } + + // Recovery Duration = time spent waiting for orderer service to finish delivering transactions, + // after all producers finished sending them. + // waitSecs = some possibly idle time spent waiting for the last batch to be generated (waiting for batchTimeout) + logger(fmt.Sprintf("Recovery Duration (secs):%4d", time.Now().Unix()-recoverStart)) + logger(fmt.Sprintf("waitSecs for last batch: %4d", waitSecs)) + passed, resultSummary = reportTotals(testname, numTxToSend, countToSend, txSent, totalNumTxSent, txSentFailures, totalNumTxSentFailures, batchSize, txRecv, totalTxRecv, totalTxRecvMismatch, blockRecv, totalBlockRecv, totalBlockRecvMismatch, masterSpy, &channelIDs) + + return passed, resultSummary } func main() { - initialize() - - // Set reasonable defaults in case any env vars are unset. - var txs int64 = 55 - chans := numChannels - orderers := numOrdsInNtwk - ordType := ordererType - kbs := numKBrokers - - - // Set addMasterSpy to true to create one additional consumer client - // that monitors all channels on one orderer with one grpc connection. - addMasterSpy := false - - pPerCh := producersPerCh - // TODO lPerCh := listenersPerCh - - // Read env vars - if debugflagAPI { logger("==========Environment variables provided for this test, and corresponding values actually used for the test:") } - testcmd := "" - envvar := os.Getenv("OTE_TXS") - if envvar != "" { txs, _ = strconv.ParseInt(envvar, 10, 64); testcmd += " OTE_TXS="+envvar } - if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", "OTE_TXS="+envvar, "txs", txs)) } - - envvar = os.Getenv("OTE_CHANNELS") - if envvar != "" { chans, _ = strconv.Atoi(envvar); testcmd += " OTE_CHANNELS="+envvar } - if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", "OTE_CHANNELS="+envvar, "chans", chans)) } - - envvar = os.Getenv("OTE_ORDERERS") - if envvar != "" { orderers, _ = strconv.Atoi(envvar); testcmd += " OTE_ORDERERS="+envvar } - if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", "OTE_ORDERERS="+envvar, "orderers", orderers)) } - - envvar = os.Getenv(ordererTypeParamStr) - if envvar != "" { ordType = envvar; testcmd += " "+ordererTypeParamStr+"="+envvar } - if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%s", ordererTypeParamStr+"="+envvar, "ordType", ordType)) } - - envvar = os.Getenv("OTE_KAFKABROKERS") - if envvar != "" { kbs, _ = strconv.Atoi(envvar); testcmd += " OTE_KAFKABROKERS="+envvar } - if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", "OTE_KAFKABROKERS="+envvar, "kbs", kbs)) } - - envvar = os.Getenv("OTE_MASTERSPY") - if "true" == strings.ToLower(envvar) || "t" == strings.ToLower(envvar) { addMasterSpy = true; testcmd += " OTE_MASTERSPY="+envvar } - if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%t", "OTE_MASTERSPY="+envvar, "masterSpy", addMasterSpy)) } - - envvar = os.Getenv("OTE_PRODUCERS_PER_CHANNEL") - if envvar != "" { pPerCh, _ = strconv.Atoi(envvar); testcmd += " OTE_PRODUCERS_PER_CHANNEL="+envvar } - if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", "OTE_PRODUCERS_PER_CHANNEL="+envvar, "producersPerCh", pPerCh)) } - - _, _ = ote( ""+testcmd+" ote", txs, chans, orderers, ordType, kbs, addMasterSpy, pPerCh) + initialize() + + // Set reasonable defaults in case any env vars are unset. + var txs int64 = 55 + chans := numChannels + orderers := numOrdsInNtwk + ordType := ordererType + kbs := numKBrokers + + // Set addMasterSpy to true to create one additional consumer client + // that monitors all channels on one orderer with one grpc connection. + addMasterSpy := false + + pPerCh := producersPerCh + // TODO lPerCh := listenersPerCh + + // Read env vars + if debugflagAPI { + logger("==========Environment variables provided for this test, and corresponding values actually used for the test:") + } + testcmd := "" + envvar := os.Getenv("OTE_TXS") + if envvar != "" { + txs, _ = strconv.ParseInt(envvar, 10, 64) + testcmd += " OTE_TXS=" + envvar + } + if debugflagAPI { + logger(fmt.Sprintf("%-50s %s=%d", "OTE_TXS="+envvar, "txs", txs)) + } + + envvar = os.Getenv("OTE_CHANNELS") + if envvar != "" { + chans, _ = strconv.Atoi(envvar) + testcmd += " OTE_CHANNELS=" + envvar + } + if debugflagAPI { + logger(fmt.Sprintf("%-50s %s=%d", "OTE_CHANNELS="+envvar, "chans", chans)) + } + + envvar = os.Getenv("OTE_ORDERERS") + if envvar != "" { + orderers, _ = strconv.Atoi(envvar) + testcmd += " OTE_ORDERERS=" + envvar + } + if debugflagAPI { + logger(fmt.Sprintf("%-50s %s=%d", "OTE_ORDERERS="+envvar, "orderers", orderers)) + } + + envvar = os.Getenv(ordererTypeParamStr) + if envvar != "" { + ordType = envvar + testcmd += " " + ordererTypeParamStr + "=" + envvar + } + if debugflagAPI { + logger(fmt.Sprintf("%-50s %s=%s", ordererTypeParamStr+"="+envvar, "ordType", ordType)) + } + + envvar = os.Getenv("OTE_KAFKABROKERS") + if envvar != "" { + kbs, _ = strconv.Atoi(envvar) + testcmd += " OTE_KAFKABROKERS=" + envvar + } + if debugflagAPI { + logger(fmt.Sprintf("%-50s %s=%d", "OTE_KAFKABROKERS="+envvar, "kbs", kbs)) + } + + envvar = os.Getenv("OTE_MASTERSPY") + if "true" == strings.ToLower(envvar) || "t" == strings.ToLower(envvar) { + addMasterSpy = true + testcmd += " OTE_MASTERSPY=" + envvar + } + if debugflagAPI { + logger(fmt.Sprintf("%-50s %s=%t", "OTE_MASTERSPY="+envvar, "masterSpy", addMasterSpy)) + } + + envvar = os.Getenv("OTE_PRODUCERS_PER_CHANNEL") + if envvar != "" { + pPerCh, _ = strconv.Atoi(envvar) + testcmd += " OTE_PRODUCERS_PER_CHANNEL=" + envvar + } + if debugflagAPI { + logger(fmt.Sprintf("%-50s %s=%d", "OTE_PRODUCERS_PER_CHANNEL="+envvar, "producersPerCh", pPerCh)) + } + + _, _ = ote(""+testcmd+" ote", txs, chans, orderers, ordType, kbs, addMasterSpy, pPerCh) } diff --git a/common/config/msp/config.go b/common/config/msp/config.go index c6290458443..8ecb48e24d8 100644 --- a/common/config/msp/config.go +++ b/common/config/msp/config.go @@ -54,7 +54,7 @@ func (bh *MSPConfigHandler) BeginConfig(tx interface{}) { defer bh.pendingLock.Unlock() _, ok := bh.pendingConfig[tx] if ok { - panic("Programming error, called BeginConfig mulitply for the same tx") + panic("Programming error, called BeginConfig multiply for the same tx") } bh.pendingConfig[tx] = &mspConfigStore{ idMap: make(map[string]*pendingMSPConfig), @@ -74,7 +74,7 @@ func (bh *MSPConfigHandler) CommitProposals(tx interface{}) { defer bh.pendingLock.Unlock() pendingConfig, ok := bh.pendingConfig[tx] if !ok { - panic("Programming error, called BeginConfig mulitply for the same tx") + panic("Programming error, called BeginConfig multiply for the same tx") } bh.MSPManager = pendingConfig.proposedMgr @@ -87,7 +87,7 @@ func (bh *MSPConfigHandler) ProposeMSP(tx interface{}, mspConfig *mspprotos.MSPC pendingConfig, ok := bh.pendingConfig[tx] bh.pendingLock.RUnlock() if !ok { - panic("Programming error, called BeginConfig mulitply for the same tx") + panic("Programming error, called BeginConfig multiply for the same tx") } // check that the type for that MSP is supported diff --git a/common/config/standardvalues.go b/common/config/standardvalues.go index 69b4d82db82..97ed6dcdc82 100644 --- a/common/config/standardvalues.go +++ b/common/config/standardvalues.go @@ -48,7 +48,7 @@ func NewStandardValues(protosStructs ...interface{}) (*standardValues, error) { } // Deserialize looks up the backing Values proto of the given name, unmarshals the given bytes -// to populate the backing message structure, and retuns a referenced to the retained deserialized +// to populate the backing message structure, and returns a referenced to the retained deserialized // message (or an error, either because the key did not exist, or there was an an error unmarshaling func (sv *standardValues) Deserialize(key string, value []byte) (proto.Message, error) { msg, ok := sv.lookup[key] @@ -95,7 +95,7 @@ func (sv *standardValues) initializeProtosStruct(objValue reflect.Value) error { _, ok = sv.lookup[structField.Name] if ok { - return fmt.Errorf("Ambiguous field name specified, multiple occurances of %s", structField.Name) + return fmt.Errorf("Ambiguous field name specified, multiple occurrences of %s", structField.Name) } sv.lookup[structField.Name] = proto diff --git a/common/configtx/configmap.go b/common/configtx/configmap.go index 398b5c37e63..dde82879b78 100644 --- a/common/configtx/configmap.go +++ b/common/configtx/configmap.go @@ -28,7 +28,7 @@ const ( GroupPrefix = "[Groups] " ValuePrefix = "[Values] " - PolicyPrefix = "[Policy] " // The plurarility doesn't match, but, it makes the logs much easier being the same lenght as "Groups" and "Values" + PolicyPrefix = "[Policy] " // The plurarility doesn't match, but, it makes the logs much easier being the same length as "Groups" and "Values" PathSeparator = "/" ) diff --git a/common/configtx/template.go b/common/configtx/template.go index 6dc681c6fc3..dfe9c224a6d 100644 --- a/common/configtx/template.go +++ b/common/configtx/template.go @@ -38,7 +38,7 @@ const ( epoch = 0 ) -// Template can be used to faciliate creation of config transactions +// Template can be used to facilitate creation of config transactions type Template interface { // Envelope returns a ConfigUpdateEnvelope for the given chainID Envelope(chainID string) (*cb.ConfigUpdateEnvelope, error) diff --git a/common/configtx/update_test.go b/common/configtx/update_test.go index cf3922fe051..26547a994ca 100644 --- a/common/configtx/update_test.go +++ b/common/configtx/update_test.go @@ -155,7 +155,7 @@ func TestPolicyForItem(t *testing.T) { ModPolicy: "rootPolicy", }, }) - assert.False(t, ok, "Should not have found rootPolicy off a non-existant manager") + assert.False(t, ok, "Should not have found rootPolicy off a non-existent manager") policy, ok = cm.policyForItem(comparable{ path: []string{"root", "foo"}, diff --git a/common/ledger/blkstorage/fsblkstorage/blockindex_test.go b/common/ledger/blkstorage/fsblkstorage/blockindex_test.go index 16be6b00e36..92826ea35c1 100644 --- a/common/ledger/blkstorage/fsblkstorage/blockindex_test.go +++ b/common/ledger/blkstorage/fsblkstorage/blockindex_test.go @@ -86,7 +86,7 @@ func testBlockIndexSync(t *testing.T, numBlocks int, numBlocksToIndex int, syncB // Plug-in back the original index blkfileMgr.index = origIndex - // The first set of blocks should be present in the orginal index + // The first set of blocks should be present in the original index for i := 0; i < numBlocksToIndex; i++ { block, err := blkfileMgr.retrieveBlockByNumber(uint64(i)) testutil.AssertNoError(t, err, fmt.Sprintf("block [%d] should have been present in the index", i)) diff --git a/common/ledger/util/ioutil.go b/common/ledger/util/ioutil.go index 15422967726..36ef32e90ab 100644 --- a/common/ledger/util/ioutil.go +++ b/common/ledger/util/ioutil.go @@ -89,7 +89,7 @@ func ListSubdirs(dirPath string) ([]string, error) { func logDirStatus(msg string, dirPath string) { exists, _, err := FileExists(dirPath) if err != nil { - logger.Errorf("Error while checking for dir existance") + logger.Errorf("Error while checking for dir existence") } if exists { logger.Debugf("%s - [%s] exists", msg, dirPath) diff --git a/common/ledger/util/ioutil_test.go b/common/ledger/util/ioutil_test.go index aa5f934245d..167ff82c1ed 100644 --- a/common/ledger/util/ioutil_test.go +++ b/common/ledger/util/ioutil_test.go @@ -104,7 +104,7 @@ func TestListSubdirs(t *testing.T) { } func createAndWriteAFile(sentence string) (int, error) { - //create a file in the direcotry + //create a file in the directory f, err2 := os.Create(dbFileTest) if err2 != nil { return 0, err2 diff --git a/common/policies/policy.go b/common/policies/policy.go index e736ba969ca..e987b361587 100644 --- a/common/policies/policy.go +++ b/common/policies/policy.go @@ -230,7 +230,7 @@ func (pm *ManagerImpl) BeginPolicyProposals(tx interface{}, groups []string) ([] defer pm.pendingLock.Unlock() pendingConfig, ok := pm.pendingConfig[tx] if ok { - logger.Panicf("Serious Programming error: cannot call begin mulitply for the same proposal") + logger.Panicf("Serious Programming error: cannot call begin multiply for the same proposal") } pendingConfig = &policyConfig{ diff --git a/common/viperutil/config_test.go b/common/viperutil/config_test.go index f22473be533..ffdfcc0a625 100644 --- a/common/viperutil/config_test.go +++ b/common/viperutil/config_test.go @@ -66,7 +66,7 @@ func TestEnvSlice(t *testing.T) { expected := []string{"a", "b", "c"} if !reflect.DeepEqual(uconf.Inner.Slice, expected) { - t.Fatalf("Did not get back the right slice, expeced: %v got %v", expected, uconf.Inner.Slice) + t.Fatalf("Did not get back the right slice, expected: %v got %v", expected, uconf.Inner.Slice) } } @@ -119,7 +119,7 @@ func TestByteSize(t *testing.T) { t.Fatalf("Failed to unmarshal with: %s", err) } if uconf.Inner.ByteSize != tc.expected { - t.Fatalf("Did not get back the right byte size, expeced: %v got %v", tc.expected, uconf.Inner.ByteSize) + t.Fatalf("Did not get back the right byte size, expected: %v got %v", tc.expected, uconf.Inner.ByteSize) } }) } diff --git a/core/chaincode/handler.go b/core/chaincode/handler.go index a8f3efb7a58..9069cde6714 100644 --- a/core/chaincode/handler.go +++ b/core/chaincode/handler.go @@ -80,7 +80,7 @@ type nextStateInfo struct { sendSync bool } -// Handler responsbile for management of Peer's side of chaincode stream +// Handler responsible for management of Peer's side of chaincode stream type Handler struct { sync.RWMutex //peer to shim grpc serializer. User only in serialSend diff --git a/core/committer/committer_impl.go b/core/committer/committer_impl.go index 0713f5dc6e3..b2a111a80c0 100644 --- a/core/committer/committer_impl.go +++ b/core/committer/committer_impl.go @@ -37,7 +37,7 @@ func init() { } // LedgerCommitter is the implementation of Committer interface -// it keeps the reference to the ledger to commit blocks and retreive +// it keeps the reference to the ledger to commit blocks and retrieve // chain information type LedgerCommitter struct { ledger ledger.PeerLedger diff --git a/core/common/ccprovider/ccprovider.go b/core/common/ccprovider/ccprovider.go index 472485f327f..c6ee8040455 100644 --- a/core/common/ccprovider/ccprovider.go +++ b/core/common/ccprovider/ccprovider.go @@ -299,7 +299,7 @@ type ChaincodeData struct { //Reset resets func (cd *ChaincodeData) Reset() { *cd = ChaincodeData{} } -//String convers to string +//String converts to string func (cd *ChaincodeData) String() string { return proto.CompactTextString(cd) } //ProtoMessage just exists to make proto happy diff --git a/core/common/ccprovider/cdspackage.go b/core/common/ccprovider/cdspackage.go index 84cb3948dd7..555ff4522cd 100644 --- a/core/common/ccprovider/cdspackage.go +++ b/core/common/ccprovider/cdspackage.go @@ -48,7 +48,7 @@ type CDSData struct { //Reset resets func (data *CDSData) Reset() { *data = CDSData{} } -//String convers to string +//String converts to string func (data *CDSData) String() string { return proto.CompactTextString(data) } //ProtoMessage just exists to make proto happy diff --git a/core/common/ccprovider/sigcdspackage.go b/core/common/ccprovider/sigcdspackage.go index 33b2e4792ae..c5260d3a27d 100644 --- a/core/common/ccprovider/sigcdspackage.go +++ b/core/common/ccprovider/sigcdspackage.go @@ -48,7 +48,7 @@ type SignedCDSData struct { //Reset resets func (data *SignedCDSData) Reset() { *data = SignedCDSData{} } -//String convers to string +//String converts to string func (data *SignedCDSData) String() string { return proto.CompactTextString(data) } //ProtoMessage just exists to make proto happy diff --git a/core/endorser/endorser.go b/core/endorser/endorser.go index ba13a6511e5..8e1ee732ecb 100644 --- a/core/endorser/endorser.go +++ b/core/endorser/endorser.go @@ -327,7 +327,7 @@ func (e *Endorser) ProcessProposal(ctx context.Context, signedProp *pb.SignedPro // Check for uniqueness of prop.TxID with ledger // Notice that ValidateProposalMessage has already verified - // that TxID is computed propertly + // that TxID is computed properly txid := chdr.TxId if txid == "" { err = errors.New("Invalid txID. It must be different from the empty string.") diff --git a/core/ledger/kvledger/kv_ledger_provider_test.go b/core/ledger/kvledger/kv_ledger_provider_test.go index 7b1465a73bb..6a51a0ea89b 100644 --- a/core/ledger/kvledger/kv_ledger_provider_test.go +++ b/core/ledger/kvledger/kv_ledger_provider_test.go @@ -108,7 +108,7 @@ func TestRecovery(t *testing.T) { testutil.AssertNoError(t, err, "Failed to open the ledger") ledger.Close() - // Case 0: assume a crash happens before the genesis block of ledger 2 is comitted + // Case 0: assume a crash happens before the genesis block of ledger 2 is committed // Open the ID store (inventory of chainIds/ledgerIds) provider.(*Provider).idStore.setUnderConstructionFlag(constructTestLedgerID(2)) provider.Close() @@ -203,7 +203,7 @@ func TestLedgerBackup(t *testing.T) { // Create restore environment env = createTestEnv(t, restorePath) - // remove the statedb, historydb, and block indexes (they are suppoed to be auto created during opening of an existing ledger) + // remove the statedb, historydb, and block indexes (they are supposed to be auto created during opening of an existing ledger) // and rename the originalPath to restorePath testutil.AssertNoError(t, os.RemoveAll(ledgerconfig.GetStateLevelDBPath()), "") testutil.AssertNoError(t, os.RemoveAll(ledgerconfig.GetHistoryLevelDBPath()), "") diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/query_wrapper.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/query_wrapper.go index 8cea9aa11cd..357613d7970 100644 --- a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/query_wrapper.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/query_wrapper.go @@ -112,7 +112,7 @@ func ApplyQueryWrapper(namespace, queryString string, queryLimit, querySkip int) } -//setNamespaceInSelector adds an additional heirarchy in the "selector" +//setNamespaceInSelector adds an additional hierarchy in the "selector" //{"owner": {"$eq": "tom"}} //would be mapped as (assuming a namespace of "marble"): //{"$and":[{"chaincodeid":"marble"},{"data.owner":{"$eq":"tom"}}]} @@ -129,7 +129,7 @@ func setNamespaceInSelector(namespace, jsonValue interface{}, //Add the context filter and the existing selector value queryParts = append(queryParts, namespaceFilter, jsonValue) - //Create a new mapping for the new query stucture + //Create a new mapping for the new query structure mappedSelector := make(map[string]interface{}) //Specify the "$and" operator for the parts of the query diff --git a/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/helper.go b/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/helper.go index 73a15cd1f6d..e893309f146 100644 --- a/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/helper.go +++ b/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/helper.go @@ -159,7 +159,7 @@ func newResultsItr(ns string, startKey string, endKey string, // Before returning the next result, update the EndKey and ItrExhausted in rangeQueryInfo // If we set the EndKey in the constructor (as we do for the StartKey) to what is // supplied in the original query, we may be capturing the unnecessary longer range if the -// caller decides to stop iterating at some intermidiate point. Alternatively, we could have +// caller decides to stop iterating at some intermediate point. Alternatively, we could have // set the EndKey and ItrExhausted in the Close() function but it may not be desirable to change // transactional behaviour based on whether the Close() was invoked or not func (itr *resultsItr) Next() (commonledger.QueryResult, error) { diff --git a/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_txmgr.go b/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_txmgr.go index 40edbd8baef..63a95e8f9f1 100644 --- a/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_txmgr.go +++ b/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_txmgr.go @@ -89,7 +89,7 @@ func (txmgr *LockBasedTxMgr) Commit() error { logger.Debugf("Committing updates to state database") txmgr.commitRWLock.Lock() defer txmgr.commitRWLock.Unlock() - logger.Debugf("Write lock aquired for committing updates to state database") + logger.Debugf("Write lock acquired for committing updates to state database") if txmgr.batch == nil { panic("validateAndPrepare() method should have been called before calling commit()") } diff --git a/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator_test.go b/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator_test.go index 12f2f4f1f3e..6473b709ac0 100644 --- a/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator_test.go +++ b/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator_test.go @@ -139,7 +139,7 @@ func TestPhantomValidation(t *testing.T) { rwsetBuilder2.AddToRangeQuerySet("ns1", rqi2) checkValidation(t, validator, []*rwsetutil.TxRwSet{rwsetBuilder2.GetTxReadWriteSet()}, nil, []int{0}) - //rwset3 should not be valid - simulate key3 got commited to db + //rwset3 should not be valid - simulate key3 got committed to db rwsetBuilder3 := rwsetutil.NewRWSetBuilder() rqi3 := &kvrwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: false} rqi3.SetRawReads([]*kvrwset.KVRead{ diff --git a/core/ledger/kvledger/txmgmt/version/version.go b/core/ledger/kvledger/txmgmt/version/version.go index 81ac93b5b28..296193fab05 100644 --- a/core/ledger/kvledger/txmgmt/version/version.go +++ b/core/ledger/kvledger/txmgmt/version/version.go @@ -44,7 +44,7 @@ func (h *Height) ToBytes() []byte { } // Compare return a -1, zero, or +1 based on whether this height is -// less than, equals to, or greater than the specified height repectively. +// less than, equals to, or greater than the specified height respectively. func (h *Height) Compare(h1 *Height) int { res := 0 switch { diff --git a/core/ledger/ledger_interface.go b/core/ledger/ledger_interface.go index def2f4dd088..81d17cbe632 100644 --- a/core/ledger/ledger_interface.go +++ b/core/ledger/ledger_interface.go @@ -25,7 +25,7 @@ import ( // PeerLedgerProvider provides handle to ledger instances type PeerLedgerProvider interface { // Create creates a new ledger with the given genesis block. - // This function guarentees that the creation of ledger and committing the genesis block would an atomic action + // This function guarantees that the creation of ledger and committing the genesis block would an atomic action // The chain id retrieved from the genesis block is treated as a ledger id Create(genesisBlock *common.Block) (PeerLedger, error) // Open opens an already created ledger @@ -120,7 +120,7 @@ type TxSimulator interface { // GetTxSimulationResults encapsulates the results of the transaction simulation. // This should contain enough detail for // - The update in the state that would be caused if the transaction is to be committed - // - The environment in which the transaction is executed so as to be able to decide the validity of the enviroment + // - The environment in which the transaction is executed so as to be able to decide the validity of the environment // (at a later time on a different peer) during committing the transactions // Different ledger implementation (or configurations of a single implementation) may want to represent the above two pieces // of information in different way in order to support different data-models or optimize the information representations. diff --git a/core/ledger/ledgermgmt/ledger_mgmt.go b/core/ledger/ledgermgmt/ledger_mgmt.go index dccb0e99ca7..5ced1383a6f 100644 --- a/core/ledger/ledgermgmt/ledger_mgmt.go +++ b/core/ledger/ledgermgmt/ledger_mgmt.go @@ -65,7 +65,7 @@ func initialize() { } // CreateLedger creates a new ledger with the given genesis block. -// This function guarentees that the creation of ledger and committing the genesis block would an atomic action +// This function guarantees that the creation of ledger and committing the genesis block would an atomic action // The chain id retrieved from the genesis block is treated as a ledger id func CreateLedger(genesisBlock *common.Block) (ledger.PeerLedger, error) { lock.Lock() diff --git a/core/peer/peer.go b/core/peer/peer.go index 2862128d021..7aa45d9c9fe 100644 --- a/core/peer/peer.go +++ b/core/peer/peer.go @@ -370,7 +370,7 @@ func updateTrustedRoots(cm configtxapi.Manager) { } // populates the appRootCAs and orderRootCAs maps by getting the -// root and intermediate certs for all msps assocaited with the MSPManager +// root and intermediate certs for all msps associated with the MSPManager func buildTrustedRootsForChain(cm configtxapi.Manager) { rootCASupport.Lock() defer rootCASupport.Unlock() diff --git a/core/scc/lscc/lscc.go b/core/scc/lscc/lscc.go index 6e839368cb7..e111163b94e 100644 --- a/core/scc/lscc/lscc.go +++ b/core/scc/lscc/lscc.go @@ -82,7 +82,7 @@ const ( //---------- the LSCC ----------------- -// LifeCycleSysCC implements chaincode lifecycle and policies aroud it +// LifeCycleSysCC implements chaincode lifecycle and policies around it type LifeCycleSysCC struct { // sccprovider is the interface with which we call // methods of the system chaincode package without @@ -579,7 +579,7 @@ func (lscc *LifeCycleSysCC) executeDeploy(stub shim.ChaincodeStubInterface, chai return nil, fmt.Errorf("cannot get package for the chaincode to be instantiated (%s:%s)-%s", cds.ChaincodeSpec.ChaincodeId.Name, cds.ChaincodeSpec.ChaincodeId.Version, err) } - //this is guranteed to be not nil + //this is guarantees to be not nil cd := ccpack.GetChaincodeData() //retain chaincode specific data and fill channel specific ones diff --git a/examples/ccchecker/init.go b/examples/ccchecker/init.go index e0120c7e733..0730e726664 100644 --- a/examples/ccchecker/init.go +++ b/examples/ccchecker/init.go @@ -28,7 +28,7 @@ import ( "github.com/hyperledger/fabric/peer/common" ) -//This is where all initializations take place. These closley follow CLI +//This is where all initializations take place. These closely follow CLI //initializations. //read CC checker configuration from -s . Defaults to ccchecker.json diff --git a/examples/chaincode/go/chaincode_example04/chaincode_example04.go b/examples/chaincode/go/chaincode_example04/chaincode_example04.go index 8442ed8de45..81461c7eb79 100644 --- a/examples/chaincode/go/chaincode_example04/chaincode_example04.go +++ b/examples/chaincode/go/chaincode_example04/chaincode_example04.go @@ -31,7 +31,7 @@ import ( type SimpleChaincode struct { } -// Init takes two arguements, a string and int. These are stored in the key/value pair in the state +// Init takes two arguments, a string and int. These are stored in the key/value pair in the state func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { var event string // Indicates whether event has happened. Initially 0 var eventVal int // State of event diff --git a/examples/chaincode/go/map/map.go b/examples/chaincode/go/map/map.go index 20af5092c4e..daa7ae70a2e 100644 --- a/examples/chaincode/go/map/map.go +++ b/examples/chaincode/go/map/map.go @@ -43,7 +43,7 @@ func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { } // Invoke has two functions -// put - takes two arguements, a key and value, and stores them in the state +// put - takes two arguments, a key and value, and stores them in the state // remove - takes one argument, a key, and removes if from the state func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { function, args := stub.GetFunctionAndParameters() diff --git a/examples/chaincode/go/utxo/util/util.go b/examples/chaincode/go/utxo/util/util.go index 9e3a89a835a..d9094c376a2 100644 --- a/examples/chaincode/go/utxo/util/util.go +++ b/examples/chaincode/go/utxo/util/util.go @@ -54,7 +54,7 @@ func ReadVarInt(buffer *bytes.Buffer) uint64 { return finalResult } -// ParseUTXOBytes parses a bitcoin sytle transaction +// ParseUTXOBytes parses a bitcoin style transaction func ParseUTXOBytes(txAsUTXOBytes []byte) *TX { buffer := bytes.NewBuffer(txAsUTXOBytes) var version int32 diff --git a/examples/events/block-listener/block-listener.go b/examples/events/block-listener/block-listener.go index c642563398d..dffa168fad9 100644 --- a/examples/events/block-listener/block-listener.go +++ b/examples/events/block-listener/block-listener.go @@ -44,7 +44,7 @@ func (a *adapter) Recv(msg *pb.Event) (bool, error) { a.notfy <- o return true, nil } - return false, fmt.Errorf("Receive unkown type event: %v", msg) + return false, fmt.Errorf("Receive unknown type event: %v", msg) } //Disconnected implements consumer.EventAdapter interface for disconnecting diff --git a/gossip/gossip/batcher_test.go b/gossip/gossip/batcher_test.go index fd7aeb99d41..8040eda58f8 100644 --- a/gossip/gossip/batcher_test.go +++ b/gossip/gossip/batcher_test.go @@ -67,7 +67,7 @@ func TestBatchingEmitterExpiration(t *testing.T) { emitter.Add(1) time.Sleep(time.Duration(500) * time.Millisecond) - assert.Equal(t, int32(10), atomic.LoadInt32(&disseminationAttempts), "Inadaquate amount of dissemination attempts detected") + assert.Equal(t, int32(10), atomic.LoadInt32(&disseminationAttempts), "Inadequate amount of dissemination attempts detected") assert.Equal(t, 0, emitter.Size()) } diff --git a/gossip/gossip/channel/channel_test.go b/gossip/gossip/channel/channel_test.go index 670aeb876c0..1db35dc1d10 100644 --- a/gossip/gossip/channel/channel_test.go +++ b/gossip/gossip/channel/channel_test.go @@ -684,7 +684,7 @@ func TestChannelAddToMessageStoreExpire(t *testing.T) { if msg.IsDigestMsg() { assert.Equal(t, 1, len(msg.GetDataDig().Digests), "Number of digests returned by channel blockPuller incorrect") } else { - t.Fatal("Not correct pull msg type in responce - expect digest") + t.Fatal("Not correct pull msg type in response - expect digest") } } @@ -728,7 +728,7 @@ func TestChannelAddToMessageStoreExpire(t *testing.T) { if msg.IsDigestMsg() { assert.Equal(t, 1, len(msg.GetDataDig().Digests), "Number of digests returned by channel blockPuller incorrect") } else { - t.Fatal("Not correct pull msg type in responce - expect digest") + t.Fatal("Not correct pull msg type in response - expect digest") } } diff --git a/gotools/Makefile b/gotools/Makefile index 6d2bdb3b3c2..a307eb83544 100644 --- a/gotools/Makefile +++ b/gotools/Makefile @@ -18,7 +18,7 @@ OBJDIR ?= build TMP_GOPATH=$(OBJDIR)/gopath GOBIN=$(abspath $(TMP_GOPATH)/bin) -GOTOOLS = golint govendor goimports protoc-gen-go ginkgo gocov gocov-xml +GOTOOLS = golint govendor goimports protoc-gen-go ginkgo gocov gocov-xml misspell GOTOOLS_BIN = $(patsubst %,$(GOBIN)/%, $(GOTOOLS)) # go tool->path mapping @@ -28,6 +28,7 @@ go.fqp.goimports := golang.org/x/tools/cmd/goimports go.fqp.ginkgo := github.com/onsi/ginkgo/ginkgo go.fqp.gocov := github.com/axw/gocov/... go.fqp.gocov-xml := github.com/AlekSi/gocov-xml +go.fqp.misspell := github.com/client9/misspell/cmd/misspell all: $(GOTOOLS_BIN) diff --git a/msp/configbuilder.go b/msp/configbuilder.go index 389d04a00c8..853f4592b70 100644 --- a/msp/configbuilder.go +++ b/msp/configbuilder.go @@ -183,7 +183,7 @@ func getMspConfig(dir string, bccspConfig *factory.FactoryOpts, ID string, sigid intermediatecert, err := getPemMaterialFromDir(intermediatecertsDir) if os.IsNotExist(err) { - mspLogger.Infof("intermidiate certs folder not found at [%s]. Skipping.: [%s]", intermediatecertsDir, err) + mspLogger.Infof("intermediate certs folder not found at [%s]. Skipping.: [%s]", intermediatecertsDir, err) } else if err != nil { return nil, fmt.Errorf("Failed loading intermediate ca certs at [%s]: [%s]", intermediatecertsDir, err) } diff --git a/orderer/common/filter/filter.go b/orderer/common/filter/filter.go index 257815fda0a..266c80484bb 100644 --- a/orderer/common/filter/filter.go +++ b/orderer/common/filter/filter.go @@ -43,7 +43,7 @@ type Rule interface { // Committer is returned by postfiltering and should be invoked once the message has been written to the blockchain type Committer interface { - // Commit performs whatever action should be performed upon commiting of a message + // Commit performs whatever action should be performed upon committing of a message Commit() // Isolated returns whether this transaction should have a block to itself or may be mixed with other transactions diff --git a/orderer/kafka/consumer.go b/orderer/kafka/consumer.go index a367fc0fcb9..9c970ce3346 100644 --- a/orderer/kafka/consumer.go +++ b/orderer/kafka/consumer.go @@ -57,7 +57,7 @@ func (c *consumerImpl) Recv() <-chan *sarama.ConsumerMessage { return c.partition.Messages() } -// Errors returns a channel with errors occuring during +// Errors returns a channel with errors occurring during // the consumption of a partition from the Kafka cluster. func (c *consumerImpl) Errors() <-chan *sarama.ConsumerError { return c.partition.Errors() diff --git a/orderer/ledger/ram/impl.go b/orderer/ledger/ram/impl.go index 3141de17c5b..cac0c8a5486 100644 --- a/orderer/ledger/ram/impl.go +++ b/orderer/ledger/ram/impl.go @@ -89,7 +89,7 @@ func (rl *ramLedger) Iterator(startPosition *ab.SeekPosition) (ledger.Iterator, specified := start.Specified.Number logger.Debugf("Attempting to return block %d", specified) - // Note the two +1's here is to accomodate the 'preGenesis' block of ^uint64(0) + // Note the two +1's here is to accommodate the 'preGenesis' block of ^uint64(0) if specified+1 < oldest.block.Header.Number+1 || specified > rl.newest.block.Header.Number+1 { logger.Debugf("Returning error iterator because specified seek was %d with oldest %d and newest %d", specified, rl.oldest.block.Header.Number, rl.newest.block.Header.Number) diff --git a/orderer/ledger/util.go b/orderer/ledger/util.go index 01e7f7cd0ce..98f0f5aa796 100644 --- a/orderer/ledger/util.go +++ b/orderer/ledger/util.go @@ -46,7 +46,7 @@ func (nfei *NotFoundErrorIterator) ReadyChan() <-chan struct{} { // CreateNextBlock provides a utility way to construct the next block from // contents and metadata for a given ledger // XXX This will need to be modified to accept marshaled envelopes -// to accomodate non-deterministic marshaling +// to accommodate non-deterministic marshaling func CreateNextBlock(rl Reader, messages []*cb.Envelope) *cb.Block { var nextBlockNumber uint64 var previousBlockHash []byte diff --git a/orderer/multichain/systemchain.go b/orderer/multichain/systemchain.go index 192dbbdee5f..1e54b1dd2b6 100644 --- a/orderer/multichain/systemchain.go +++ b/orderer/multichain/systemchain.go @@ -89,7 +89,7 @@ func (scf *systemChainFilter) Apply(env *cb.Envelope) (filter.Action, filter.Com maxChannels := scf.support.SharedConfig().MaxChannelsCount() if maxChannels > 0 { - // We check for strictly greater than to accomodate the system channel + // We check for strictly greater than to accommodate the system channel if uint64(scf.cc.channelsCount()) > maxChannels { logger.Warningf("Rejecting channel creation because the orderer has reached the maximum number of channels, %d", maxChannels) return filter.Reject, nil diff --git a/orderer/sample_clients/broadcast_timestamp/client.go b/orderer/sample_clients/broadcast_timestamp/client.go index 0e17c63b150..5cbed390dbb 100644 --- a/orderer/sample_clients/broadcast_timestamp/client.go +++ b/orderer/sample_clients/broadcast_timestamp/client.go @@ -77,7 +77,7 @@ func main() { flag.StringVar(&serverAddr, "server", fmt.Sprintf("%s:%d", config.General.ListenAddress, config.General.ListenPort), "The RPC server to connect to.") flag.StringVar(&chainID, "chainID", provisional.TestChainID, "The chain ID to broadcast to.") - flag.Uint64Var(&messages, "messages", 1, "The number of messages to braodcast.") + flag.Uint64Var(&messages, "messages", 1, "The number of messages to broadcast.") flag.Parse() conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) diff --git a/orderer/sbft/simplebft/commit.go b/orderer/sbft/simplebft/commit.go index 957fd95cdf9..b2ba6383ad6 100644 --- a/orderer/sbft/simplebft/commit.go +++ b/orderer/sbft/simplebft/commit.go @@ -51,7 +51,7 @@ func (s *SBFT) handleCommit(c *Subject, src uint64) { s.cur.commit[src] = c s.cancelViewChangeTimer() - //maybe mark as comitted + //maybe mark as committed if s.cur.committed || len(s.cur.commit) < s.commonCaseQuorum() { return } diff --git a/orderer/sbft/simplebft/simplebft_test.go b/orderer/sbft/simplebft/simplebft_test.go index 3b4b757b0b9..cc3e75e3bc0 100644 --- a/orderer/sbft/simplebft/simplebft_test.go +++ b/orderer/sbft/simplebft/simplebft_test.go @@ -534,7 +534,7 @@ func TestMsgReordering(t *testing.T) { var preprep *testMsgEvent // forcing pre-prepare from primary 0 to reach replica 1 after some delay - // effectivelly delivering pre-prepare instead of checkpoint + // effectively delivering pre-prepare instead of checkpoint sys.filterFn = func(e testElem) (testElem, bool) { if msg, ok := e.ev.(*testMsgEvent); ok { if msg.src == 0 && msg.dst == 1 { @@ -587,7 +587,7 @@ func TestBacklogReordering(t *testing.T) { var preprep *testMsgEvent // forcing pre-prepare from primary 0 to reach replica 1 after some delay - // effectivelly delivering pre-prepare instead of checkpoint + // effectively delivering pre-prepare instead of checkpoint sys.filterFn = func(e testElem) (testElem, bool) { if msg, ok := e.ev.(*testMsgEvent); ok { if msg.src == 0 && msg.dst == 1 { diff --git a/peer/chaincode/install_test.go b/peer/chaincode/install_test.go index 27bd5b1956c..95849ad9715 100644 --- a/peer/chaincode/install_test.go +++ b/peer/chaincode/install_test.go @@ -33,7 +33,7 @@ func initInstallTest(fsPath string, t *testing.T) (*cobra.Command, *ChaincodeCmd viper.Set("peer.fileSystemPath", fsPath) finitInstallTest(fsPath) - //if mkdir fails everthing will fail... but it should not + //if mkdir fails everything will fail... but it should not if err := os.Mkdir(fsPath, 0755); err != nil { t.Fatalf("could not create install env") } diff --git a/peer/chaincode/package.go b/peer/chaincode/package.go index 5b6423e98d7..b259322f5e2 100644 --- a/peer/chaincode/package.go +++ b/peer/chaincode/package.go @@ -138,7 +138,7 @@ func getChaincodeInstallPackage(cds *pb.ChaincodeDeploymentSpec, cf *ChaincodeCm return bytesToWrite, nil } -// chaincodePackage creates the chaincode packge. On success, the chaincode name +// chaincodePackage creates the chaincode package. On success, the chaincode name // (hash) is printed to STDOUT for use by subsequent chaincode-related CLI // commands. func chaincodePackage(cmd *cobra.Command, args []string, cdsFact ccDepSpecFactory, cf *ChaincodeCmdFactory) error { diff --git a/peer/node/start.go b/peer/node/start.go index 422e9d70c79..930a01e5f4c 100644 --- a/peer/node/start.go +++ b/peer/node/start.go @@ -59,7 +59,7 @@ var orderingEndpoint string // XXXDefaultChannelMSPID should not be defined in production code // It should only be referenced in tests. However, it is necessary -// to support the 'default chain' setup so temporarilly adding until +// to support the 'default chain' setup so temporarily adding until // this concept can be removed to testing scenarios only const XXXDefaultChannelMSPID = "DEFAULT" @@ -93,7 +93,7 @@ func initSysCCs() { func serve(args []string) error { ledgermgmt.Initialize() - // Parameter overrides must be processed before any paramaters are + // Parameter overrides must be processed before any parameters are // cached. Failures to cache cause the server to terminate immediately. if chaincodeDevMode { logger.Info("Running in chaincode development mode") diff --git a/scripts/check_spelling.sh b/scripts/check_spelling.sh new file mode 100755 index 00000000000..c6542815ad0 --- /dev/null +++ b/scripts/check_spelling.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +echo "Checking Go files for spelling errors ..." +errs=`find . -name "*.go" | grep -v vendor/ | grep -v build/ | grep -v ".pb.go" | xargs misspell` +if [ -z "$errs" ]; then + echo "spell checker passed" + exit 0 +fi +echo "The following files are have spelling errors:" +echo "$errs" +exit 1