diff --git a/common/ledger/blkstorage/fsblkstorage/rollback.go b/common/ledger/blkstorage/fsblkstorage/rollback.go index 9397c5b3a23..cf32bf17051 100644 --- a/common/ledger/blkstorage/fsblkstorage/rollback.go +++ b/common/ledger/blkstorage/fsblkstorage/rollback.go @@ -109,7 +109,7 @@ func (r *rollbackMgr) rollbackBlockIndex() error { } func (r *rollbackMgr) deleteIndexEntriesRange(startBlkNum, endBlkNum uint64) error { - // TODO: when more than half of the blocks' indicies are to be deleted, it + // TODO: when more than half of the blocks' indices are to be deleted, it // might be efficient to drop the whole index database rather than deleting // entries. However, if there is more than more than 1 channel, dropping of // index would impact the time taken to recover the peer. We need to analyze diff --git a/core/common/ccprovider/ccprovider.go b/core/common/ccprovider/ccprovider.go index eabe1d1f26a..b4565e3e849 100644 --- a/core/common/ccprovider/ccprovider.go +++ b/core/common/ccprovider/ccprovider.go @@ -356,7 +356,7 @@ func GetInstalledChaincodes() (*pb.ChaincodeQueryResponse, error) { // ChaincodeData defines the datastructure for chaincodes to be serialized by proto // Type provides an additional check by directing to use a specific package after instantiation -// Data is Type specifc (see CDSPackage and SignedCDSPackage) +// Data is Type specific (see CDSPackage and SignedCDSPackage) type ChaincodeData struct { // Name of the chaincode Name string `protobuf:"bytes,1,opt,name=name"` diff --git a/core/common/privdata/collection.go b/core/common/privdata/collection.go index b300e362d53..223d0b57f82 100644 --- a/core/common/privdata/collection.go +++ b/core/common/privdata/collection.go @@ -60,7 +60,7 @@ type CollectionAccessPolicy interface { IsMemberOnlyWrite() bool } -// CollectionPersistenceConfigs encapsulates configurations related to persistece of a collection +// CollectionPersistenceConfigs encapsulates configurations related to persistence of a collection type CollectionPersistenceConfigs interface { // BlockToLive returns the number of blocks after which the collection data expires. // For instance if the value is set to 10, a key last modified by block number 100 @@ -120,13 +120,13 @@ type CollectionFilter interface { } const ( - // Collecion-specific constants + // Collection-specific constants // CollectionSeparator is the separator used to build the KVS // key storing the collections of a chaincode; note that we are // using as separator a character which is illegal for either the // name or the version of a chaincode so there cannot be any - // collisions when chosing the name + // collisions when choosing the name collectionSeparator = "~" // collectionSuffix is the suffix of the KVS key storing the // collections of a chaincode diff --git a/core/common/validation/statebased/vpmanager.go b/core/common/validation/statebased/vpmanager.go index 8e4084cf4f4..a21850be5cd 100644 --- a/core/common/validation/statebased/vpmanager.go +++ b/core/common/validation/statebased/vpmanager.go @@ -39,7 +39,7 @@ type KeyLevelValidationParameterManager interface { // height h. The function returns the validation parameter and no error in case of // success, or nil and an error otherwise. One particular error that may be // returned is ValidationParameterUpdatedErr, which is returned in case the - // validation parmeters for the given KVS key have been changed by a transaction + // validation parameters for the given KVS key have been changed by a transaction // with txNum smaller than the one supplied by the caller. This protects from a // scenario where a transaction changing validation parameters is marked as valid // by VSCC and is later invalidated by the committer for other reasons (e.g. MVCC diff --git a/core/common/validation/statebased/vpmanagerimpl.go b/core/common/validation/statebased/vpmanagerimpl.go index f07facf5e45..f7a932a9c38 100644 --- a/core/common/validation/statebased/vpmanagerimpl.go +++ b/core/common/validation/statebased/vpmanagerimpl.go @@ -209,7 +209,7 @@ func (c *validationContext) waitForValidationResults(kid *ledgerKeyID, blockNum // that affect us and put them in a local slice; we then release // the mutex // 2) we traverse the slice of dependencies and for each, retrieve - // the validartion result + // the validation result // The two step approach is required to avoid a deadlock where the // consumer (the caller of this function) holds the mutex and thus // prevents the producer (the caller of signalValidationResult) to diff --git a/core/endorser/plugin_endorser.go b/core/endorser/plugin_endorser.go index 58979606d26..8627b827ef8 100644 --- a/core/endorser/plugin_endorser.go +++ b/core/endorser/plugin_endorser.go @@ -29,7 +29,7 @@ type TransientStoreRetriever interface { // ChannelStateRetriever retrieves Channel state type ChannelStateRetriever interface { - // ChannelState returns a QueryCreator for the given Channel + // NewQueryCreator returns a QueryCreator for the given Channel NewQueryCreator(channel string) (QueryCreator, error) } diff --git a/core/handlers/library/registry.go b/core/handlers/library/registry.go index f725fcf625e..cab5d629bd2 100644 --- a/core/handlers/library/registry.go +++ b/core/handlers/library/registry.go @@ -126,7 +126,7 @@ func (r *registry) loadCompiled(handlerFactory string, handlerType HandlerType, } } -// loadPlugin loads a pluggagle handler +// loadPlugin loads a pluggable handler func (r *registry) loadPlugin(pluginPath string, handlerType HandlerType, extraArgs ...string) { if _, err := os.Stat(pluginPath); err != nil { logger.Panicf(fmt.Sprintf("Could not find plugin at path %s: %s", pluginPath, err)) @@ -234,7 +234,7 @@ func panicWithDefinitionError(factory string) { } // Lookup returns a list of handlers with the given -// given type, or nil if none exist +// type, or nil if none exist func (r *registry) Lookup(handlerType HandlerType) interface{} { if handlerType == Auth { return r.filters diff --git a/core/ledger/cceventmgmt/mgr.go b/core/ledger/cceventmgmt/mgr.go index aa4c84814e7..1bf77798315 100644 --- a/core/ledger/cceventmgmt/mgr.go +++ b/core/ledger/cceventmgmt/mgr.go @@ -93,7 +93,7 @@ func (m *Mgr) HandleChaincodeDeploy(chainid string, chaincodeDefinitions []*Chai // ChaincodeDeployDone is expected to be called when the deploy transaction state is committed func (m *Mgr) ChaincodeDeployDone(chainid string) { - // release the lock aquired in function `HandleChaincodeDeploy` + // release the lock acquired in function `HandleChaincodeDeploy` defer m.rwlock.RUnlock() if m.callbackStatus.isDeployPending(chainid) { m.invokeDoneOnHandlers(chainid, true) diff --git a/core/ledger/confighistory/mgr.go b/core/ledger/confighistory/mgr.go index f6282b52881..128ea5acdcd 100644 --- a/core/ledger/confighistory/mgr.go +++ b/core/ledger/confighistory/mgr.go @@ -23,7 +23,7 @@ const ( collectionConfigNamespace = "lscc" // lscc namespace was introduced in version 1.2 and we continue to use this in order to be compatible with existing data ) -// Mgr should be registered as a state listener. The state listener builds the history and retriver helps in querying the history +// Mgr should be registered as a state listener. The state listener builds the history and retriever helps in querying the history type Mgr interface { ledger.StateListener GetRetriever(ledgerID string, ledgerInfoRetriever LedgerInfoRetriever) ledger.ConfigHistoryRetriever diff --git a/core/ledger/kvledger/bookkeeping/provider.go b/core/ledger/kvledger/bookkeeping/provider.go index 556250f25ea..c00e4afcada 100644 --- a/core/ledger/kvledger/bookkeeping/provider.go +++ b/core/ledger/kvledger/bookkeeping/provider.go @@ -16,7 +16,7 @@ import ( type Category int const ( - // PvtdataExpiry repersents the bookkeeping related to expiry of pvtdata because of BTL policy + // PvtdataExpiry represents the bookkeeping related to expiry of pvtdata because of BTL policy PvtdataExpiry Category = iota // MetadataPresenceIndicator maintains the bookkeeping about whether metadata is ever set for a namespace MetadataPresenceIndicator diff --git a/core/ledger/kvledger/txmgmt/privacyenabledstate/common_storage_db.go b/core/ledger/kvledger/txmgmt/privacyenabledstate/common_storage_db.go index 79bc87c55d7..0ed77a9ef79 100644 --- a/core/ledger/kvledger/txmgmt/privacyenabledstate/common_storage_db.go +++ b/core/ledger/kvledger/txmgmt/privacyenabledstate/common_storage_db.go @@ -248,7 +248,7 @@ func (s *CommonStorageDB) ApplyPrivacyAwareUpdates(updates *UpdateBatch, height // an optimization such that it keeps track if a namespaces has never stored metadata for any of // its items, the value 'nil' is returned without going to the db. This is intended to be invoked // in the validation and commit path. This saves the chaincodes from paying unnecessary performance -// penality if they do not use features that leverage metadata (such as key-level endorsement), +// penalty if they do not use features that leverage metadata (such as key-level endorsement), func (s *CommonStorageDB) GetStateMetadata(namespace, key string) ([]byte, error) { if !s.metadataHint.metadataEverUsedFor(namespace) { return nil, nil @@ -261,7 +261,7 @@ func (s *CommonStorageDB) GetStateMetadata(namespace, key string) ([]byte, error } // GetPrivateDataMetadataByHash implements corresponding function in interface DB. For additional details, see -// decription of the similar function 'GetStateMetadata' +// description of the similar function 'GetStateMetadata' func (s *CommonStorageDB) GetPrivateDataMetadataByHash(namespace, collection string, keyHash []byte) ([]byte, error) { if !s.metadataHint.metadataEverUsedFor(namespace) { return nil, nil @@ -274,7 +274,7 @@ func (s *CommonStorageDB) GetPrivateDataMetadataByHash(namespace, collection str } // HandleChaincodeDeploy initializes database artifacts for the database associated with the namespace -// This function delibrately suppresses the errors that occur during the creation of the indexes on couchdb. +// This function deliberately suppresses the errors that occur during the creation of the indexes on couchdb. // This is because, in the present code, we do not differentiate between the errors because of couchdb interaction // and the errors because of bad index files - the later being unfixable by the admin. Note that the error suppression // is acceptable since peer can continue in the committing role without the indexes. However, executing chaincode queries diff --git a/core/ledger/kvledger/txmgmt/privacyenabledstate/update_batch_bytes.go b/core/ledger/kvledger/txmgmt/privacyenabledstate/update_batch_bytes.go index 126d4ba2c16..09943edc68b 100644 --- a/core/ledger/kvledger/txmgmt/privacyenabledstate/update_batch_bytes.go +++ b/core/ledger/kvledger/txmgmt/privacyenabledstate/update_batch_bytes.go @@ -19,13 +19,13 @@ type UpdatesBytesBuilder struct { // DeterministicBytesForPubAndHashUpdates constructs the bytes for a given UpdateBatch // while constructing the bytes, it considers only public writes and hashed writes for -// the collections. For achieveing the determinism, it constructs a slice of proto messages +// the collections. For achieving the determinism, it constructs a slice of proto messages // of type 'KVWriteProto'. In the slice, all the writes for a namespace "ns1" appear before // the writes for another namespace "ns2" if "ns1" < "ns2" (lexicographically). Within a // namespace, all the public writes appear before the collection writes. Like namespaces, // the collections writes within a namespace appear in the order of lexicographical order. -// If an entry has the same namespace as its preceding entry, the namespcae field is skipped. -// A Similar treatment is given to the repeative entries for a collection within a namespace. +// If an entry has the same namespace as its preceding entry, the namespace field is skipped. +// A Similar treatment is given to the repetitive entries for a collection within a namespace. // For illustration, see the corresponding unit tests func (bb *UpdatesBytesBuilder) DeterministicBytesForPubAndHashUpdates(u *UpdateBatch) ([]byte, error) { pubUpdates := u.PubUpdates diff --git a/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/purge_mgr.go b/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/purge_mgr.go index ce1da1d51e7..7493aebe0d9 100644 --- a/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/purge_mgr.go +++ b/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/purge_mgr.go @@ -22,7 +22,7 @@ import ( type PurgeMgr interface { // PrepareForExpiringKeys gives a chance to the PurgeMgr to do background work in advance if any PrepareForExpiringKeys(expiringAtBlk uint64) - // WaitForPrepareToFinish holds the caller till the background goroutine lauched by 'PrepareForExpiringKeys' is finished + // WaitForPrepareToFinish holds the caller till the background goroutine launched by 'PrepareForExpiringKeys' is finished WaitForPrepareToFinish() // DeleteExpiredAndUpdateBookkeeping updates the bookkeeping and modifies the update batch by adding the deletes for the expired pvtdata DeleteExpiredAndUpdateBookkeeping( @@ -197,8 +197,8 @@ func (p *purgeMgr) DeleteExpiredAndUpdateBookkeeping( // BlockCommitDone implements function in the interface 'PurgeMgr' // These orphan entries for purge-schedule can be cleared off in bulk in a separate background routine as well -// If we maintian the following logic (i.e., clear off entries just after block commit), we need a TODO - -// We need to perform a check in the start, becasue there could be a crash between the block commit and +// If we maintain the following logic (i.e., clear off entries just after block commit), we need a TODO - +// We need to perform a check in the start, because there could be a crash between the block commit and // invocation to this function resulting in the orphan entry for the deletes scheduled for the last block // Also, the another way is to club the delete of these entries in the same batch that adds entries for the future expirations - // however, that requires updating the expiry store by replaying the last block from blockchain in order to sustain a crash between @@ -229,7 +229,7 @@ func (p *purgeMgr) prepareWorkingsetFor(expiringAtBlk uint64) *workingset { logger.Debugf("No expiry entry found for expiringAtBlk [%d]", expiringAtBlk) return workingset } - logger.Debugf("Total [%d] expiring entries found. Evaluaitng whether some of these keys have been overwritten in later blocks...", len(toPurge)) + logger.Debugf("Total [%d] expiring entries found. Evaluating whether some of these keys have been overwritten in later blocks...", len(toPurge)) for purgeEntryK, purgeEntryV := range toPurge { logger.Debugf("Evaluating for hashedKey [%s]", purgeEntryK) diff --git a/core/ledger/kvledger/txmgmt/queryutil/iterator_combiner.go b/core/ledger/kvledger/txmgmt/queryutil/iterator_combiner.go index 6b381d3f394..56fbf8e35e0 100644 --- a/core/ledger/kvledger/txmgmt/queryutil/iterator_combiner.go +++ b/core/ledger/kvledger/txmgmt/queryutil/iterator_combiner.go @@ -56,7 +56,7 @@ func (combiner *itrCombiner) Next() (commonledger.QueryResult, error) { if err != nil { return nil, err } - if removed { // if the current iterator is exhaused and hence removed, decrement the index + if removed { // if the current iterator is exhausted and hence removed, decrement the index // because indexes of the remaining iterators are decremented by one i-- }