Skip to content

Commit

Permalink
[FAB-16630] Fix comment error
Browse files Browse the repository at this point in the history
This is fix #FAB-16630

Signed-off-by: bjzhang03 <bjzhang03@foxmail.com>
Change-Id: Ie94e8cc8969d6d8c29904ebab3331be870ee1751
Signed-off-by: bjzhang03 <bjzhang1991@gmail.com>
  • Loading branch information
bjzhang03 committed Oct 22, 2019
1 parent 4b786b4 commit 51387dd
Show file tree
Hide file tree
Showing 14 changed files with 24 additions and 24 deletions.
2 changes: 1 addition & 1 deletion common/ledger/blkstorage/fsblkstorage/rollback.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func (r *rollbackMgr) rollbackBlockIndex() error {
}

func (r *rollbackMgr) deleteIndexEntriesRange(startBlkNum, endBlkNum uint64) error {
// TODO: when more than half of the blocks' indicies are to be deleted, it
// TODO: when more than half of the blocks' indices are to be deleted, it
// might be efficient to drop the whole index database rather than deleting
// entries. However, if there is more than more than 1 channel, dropping of
// index would impact the time taken to recover the peer. We need to analyze
Expand Down
2 changes: 1 addition & 1 deletion core/common/ccprovider/ccprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ func GetInstalledChaincodes() (*pb.ChaincodeQueryResponse, error) {

// ChaincodeData defines the datastructure for chaincodes to be serialized by proto
// Type provides an additional check by directing to use a specific package after instantiation
// Data is Type specifc (see CDSPackage and SignedCDSPackage)
// Data is Type specific (see CDSPackage and SignedCDSPackage)
type ChaincodeData struct {
// Name of the chaincode
Name string `protobuf:"bytes,1,opt,name=name"`
Expand Down
6 changes: 3 additions & 3 deletions core/common/privdata/collection.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ type CollectionAccessPolicy interface {
IsMemberOnlyWrite() bool
}

// CollectionPersistenceConfigs encapsulates configurations related to persistece of a collection
// CollectionPersistenceConfigs encapsulates configurations related to persistence of a collection
type CollectionPersistenceConfigs interface {
// BlockToLive returns the number of blocks after which the collection data expires.
// For instance if the value is set to 10, a key last modified by block number 100
Expand Down Expand Up @@ -120,13 +120,13 @@ type CollectionFilter interface {
}

const (
// Collecion-specific constants
// Collection-specific constants

// CollectionSeparator is the separator used to build the KVS
// key storing the collections of a chaincode; note that we are
// using as separator a character which is illegal for either the
// name or the version of a chaincode so there cannot be any
// collisions when chosing the name
// collisions when choosing the name
collectionSeparator = "~"
// collectionSuffix is the suffix of the KVS key storing the
// collections of a chaincode
Expand Down
2 changes: 1 addition & 1 deletion core/common/validation/statebased/vpmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ type KeyLevelValidationParameterManager interface {
// height h. The function returns the validation parameter and no error in case of
// success, or nil and an error otherwise. One particular error that may be
// returned is ValidationParameterUpdatedErr, which is returned in case the
// validation parmeters for the given KVS key have been changed by a transaction
// validation parameters for the given KVS key have been changed by a transaction
// with txNum smaller than the one supplied by the caller. This protects from a
// scenario where a transaction changing validation parameters is marked as valid
// by VSCC and is later invalidated by the committer for other reasons (e.g. MVCC
Expand Down
2 changes: 1 addition & 1 deletion core/common/validation/statebased/vpmanagerimpl.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ func (c *validationContext) waitForValidationResults(kid *ledgerKeyID, blockNum
// that affect us and put them in a local slice; we then release
// the mutex
// 2) we traverse the slice of dependencies and for each, retrieve
// the validartion result
// the validation result
// The two step approach is required to avoid a deadlock where the
// consumer (the caller of this function) holds the mutex and thus
// prevents the producer (the caller of signalValidationResult) to
Expand Down
2 changes: 1 addition & 1 deletion core/endorser/plugin_endorser.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ type TransientStoreRetriever interface {

// ChannelStateRetriever retrieves Channel state
type ChannelStateRetriever interface {
// ChannelState returns a QueryCreator for the given Channel
// NewQueryCreator returns a QueryCreator for the given Channel
NewQueryCreator(channel string) (QueryCreator, error)
}

Expand Down
4 changes: 2 additions & 2 deletions core/handlers/library/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ func (r *registry) loadCompiled(handlerFactory string, handlerType HandlerType,
}
}

// loadPlugin loads a pluggagle handler
// loadPlugin loads a pluggable handler
func (r *registry) loadPlugin(pluginPath string, handlerType HandlerType, extraArgs ...string) {
if _, err := os.Stat(pluginPath); err != nil {
logger.Panicf(fmt.Sprintf("Could not find plugin at path %s: %s", pluginPath, err))
Expand Down Expand Up @@ -234,7 +234,7 @@ func panicWithDefinitionError(factory string) {
}

// Lookup returns a list of handlers with the given
// given type, or nil if none exist
// type, or nil if none exist
func (r *registry) Lookup(handlerType HandlerType) interface{} {
if handlerType == Auth {
return r.filters
Expand Down
2 changes: 1 addition & 1 deletion core/ledger/cceventmgmt/mgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ func (m *Mgr) HandleChaincodeDeploy(chainid string, chaincodeDefinitions []*Chai

// ChaincodeDeployDone is expected to be called when the deploy transaction state is committed
func (m *Mgr) ChaincodeDeployDone(chainid string) {
// release the lock aquired in function `HandleChaincodeDeploy`
// release the lock acquired in function `HandleChaincodeDeploy`
defer m.rwlock.RUnlock()
if m.callbackStatus.isDeployPending(chainid) {
m.invokeDoneOnHandlers(chainid, true)
Expand Down
2 changes: 1 addition & 1 deletion core/ledger/confighistory/mgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const (
collectionConfigNamespace = "lscc" // lscc namespace was introduced in version 1.2 and we continue to use this in order to be compatible with existing data
)

// Mgr should be registered as a state listener. The state listener builds the history and retriver helps in querying the history
// Mgr should be registered as a state listener. The state listener builds the history and retriever helps in querying the history
type Mgr interface {
ledger.StateListener
GetRetriever(ledgerID string, ledgerInfoRetriever LedgerInfoRetriever) ledger.ConfigHistoryRetriever
Expand Down
2 changes: 1 addition & 1 deletion core/ledger/kvledger/bookkeeping/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (
type Category int

const (
// PvtdataExpiry repersents the bookkeeping related to expiry of pvtdata because of BTL policy
// PvtdataExpiry represents the bookkeeping related to expiry of pvtdata because of BTL policy
PvtdataExpiry Category = iota
// MetadataPresenceIndicator maintains the bookkeeping about whether metadata is ever set for a namespace
MetadataPresenceIndicator
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ func (s *CommonStorageDB) ApplyPrivacyAwareUpdates(updates *UpdateBatch, height
// an optimization such that it keeps track if a namespaces has never stored metadata for any of
// its items, the value 'nil' is returned without going to the db. This is intended to be invoked
// in the validation and commit path. This saves the chaincodes from paying unnecessary performance
// penality if they do not use features that leverage metadata (such as key-level endorsement),
// penalty if they do not use features that leverage metadata (such as key-level endorsement),
func (s *CommonStorageDB) GetStateMetadata(namespace, key string) ([]byte, error) {
if !s.metadataHint.metadataEverUsedFor(namespace) {
return nil, nil
Expand All @@ -261,7 +261,7 @@ func (s *CommonStorageDB) GetStateMetadata(namespace, key string) ([]byte, error
}

// GetPrivateDataMetadataByHash implements corresponding function in interface DB. For additional details, see
// decription of the similar function 'GetStateMetadata'
// description of the similar function 'GetStateMetadata'
func (s *CommonStorageDB) GetPrivateDataMetadataByHash(namespace, collection string, keyHash []byte) ([]byte, error) {
if !s.metadataHint.metadataEverUsedFor(namespace) {
return nil, nil
Expand All @@ -274,7 +274,7 @@ func (s *CommonStorageDB) GetPrivateDataMetadataByHash(namespace, collection str
}

// HandleChaincodeDeploy initializes database artifacts for the database associated with the namespace
// This function delibrately suppresses the errors that occur during the creation of the indexes on couchdb.
// This function deliberately suppresses the errors that occur during the creation of the indexes on couchdb.
// This is because, in the present code, we do not differentiate between the errors because of couchdb interaction
// and the errors because of bad index files - the later being unfixable by the admin. Note that the error suppression
// is acceptable since peer can continue in the committing role without the indexes. However, executing chaincode queries
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@ type UpdatesBytesBuilder struct {

// DeterministicBytesForPubAndHashUpdates constructs the bytes for a given UpdateBatch
// while constructing the bytes, it considers only public writes and hashed writes for
// the collections. For achieveing the determinism, it constructs a slice of proto messages
// the collections. For achieving the determinism, it constructs a slice of proto messages
// of type 'KVWriteProto'. In the slice, all the writes for a namespace "ns1" appear before
// the writes for another namespace "ns2" if "ns1" < "ns2" (lexicographically). Within a
// namespace, all the public writes appear before the collection writes. Like namespaces,
// the collections writes within a namespace appear in the order of lexicographical order.
// If an entry has the same namespace as its preceding entry, the namespcae field is skipped.
// A Similar treatment is given to the repeative entries for a collection within a namespace.
// If an entry has the same namespace as its preceding entry, the namespace field is skipped.
// A Similar treatment is given to the repetitive entries for a collection within a namespace.
// For illustration, see the corresponding unit tests
func (bb *UpdatesBytesBuilder) DeterministicBytesForPubAndHashUpdates(u *UpdateBatch) ([]byte, error) {
pubUpdates := u.PubUpdates
Expand Down
8 changes: 4 additions & 4 deletions core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/purge_mgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
type PurgeMgr interface {
// PrepareForExpiringKeys gives a chance to the PurgeMgr to do background work in advance if any
PrepareForExpiringKeys(expiringAtBlk uint64)
// WaitForPrepareToFinish holds the caller till the background goroutine lauched by 'PrepareForExpiringKeys' is finished
// WaitForPrepareToFinish holds the caller till the background goroutine launched by 'PrepareForExpiringKeys' is finished
WaitForPrepareToFinish()
// DeleteExpiredAndUpdateBookkeeping updates the bookkeeping and modifies the update batch by adding the deletes for the expired pvtdata
DeleteExpiredAndUpdateBookkeeping(
Expand Down Expand Up @@ -197,8 +197,8 @@ func (p *purgeMgr) DeleteExpiredAndUpdateBookkeeping(

// BlockCommitDone implements function in the interface 'PurgeMgr'
// These orphan entries for purge-schedule can be cleared off in bulk in a separate background routine as well
// If we maintian the following logic (i.e., clear off entries just after block commit), we need a TODO -
// We need to perform a check in the start, becasue there could be a crash between the block commit and
// If we maintain the following logic (i.e., clear off entries just after block commit), we need a TODO -
// We need to perform a check in the start, because there could be a crash between the block commit and
// invocation to this function resulting in the orphan entry for the deletes scheduled for the last block
// Also, the another way is to club the delete of these entries in the same batch that adds entries for the future expirations -
// however, that requires updating the expiry store by replaying the last block from blockchain in order to sustain a crash between
Expand Down Expand Up @@ -229,7 +229,7 @@ func (p *purgeMgr) prepareWorkingsetFor(expiringAtBlk uint64) *workingset {
logger.Debugf("No expiry entry found for expiringAtBlk [%d]", expiringAtBlk)
return workingset
}
logger.Debugf("Total [%d] expiring entries found. Evaluaitng whether some of these keys have been overwritten in later blocks...", len(toPurge))
logger.Debugf("Total [%d] expiring entries found. Evaluating whether some of these keys have been overwritten in later blocks...", len(toPurge))

for purgeEntryK, purgeEntryV := range toPurge {
logger.Debugf("Evaluating for hashedKey [%s]", purgeEntryK)
Expand Down
2 changes: 1 addition & 1 deletion core/ledger/kvledger/txmgmt/queryutil/iterator_combiner.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func (combiner *itrCombiner) Next() (commonledger.QueryResult, error) {
if err != nil {
return nil, err
}
if removed { // if the current iterator is exhaused and hence removed, decrement the index
if removed { // if the current iterator is exhausted and hence removed, decrement the index
// because indexes of the remaining iterators are decremented by one
i--
}
Expand Down

0 comments on commit 51387dd

Please sign in to comment.