Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

eth: fix typos #16414

Merged
merged 1 commit into from
Apr 4, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ type Ethereum struct {
chainConfig *params.ChainConfig

// Channel for shutting down the service
shutdownChan chan bool // Channel for shutting down the ethereum
shutdownChan chan bool // Channel for shutting down the Ethereum
stopDbUpgrade func() error // stop chain db sequential key upgrade

// Handlers
Expand Down Expand Up @@ -351,7 +351,7 @@ func (s *Ethereum) StartMining(local bool) error {
if local {
// If local (CPU) mining is started, we can disable the transaction rejection
// mechanism introduced to speed sync times. CPU mining on mainnet is ludicrous
// so noone will ever hit this path, whereas marking sync done on CPU mining
// so none will ever hit this path, whereas marking sync done on CPU mining
// will ensure that private networks work in single miner mode too.
atomic.StoreUint32(&s.protocolManager.acceptTxs, 1)
}
Expand Down
4 changes: 2 additions & 2 deletions eth/db_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func upgradeDeduplicateData(db ethdb.Database) func() error {
failed error
)
for failed == nil && it.Next() {
// Skip any entries that don't look like old transaction meta entires (<hash>0x01)
// Skip any entries that don't look like old transaction meta entries (<hash>0x01)
key := it.Key()
if len(key) != common.HashLength+1 || key[common.HashLength] != 0x01 {
continue
Expand All @@ -86,7 +86,7 @@ func upgradeDeduplicateData(db ethdb.Database) func() error {
}
}
// Convert the old metadata to a new lookup entry, delete duplicate data
if failed = db.Put(append([]byte("l"), hash...), it.Value()); failed == nil { // Write the new looku entry
if failed = db.Put(append([]byte("l"), hash...), it.Value()); failed == nil { // Write the new lookup entry
if failed = db.Delete(hash); failed == nil { // Delete the duplicate transaction data
if failed = db.Delete(append([]byte("receipts-"), hash...)); failed == nil { // Delete the duplicate receipt data
if failed = db.Delete(key); failed != nil { // Delete the old transaction metadata
Expand Down
8 changes: 4 additions & 4 deletions eth/downloader/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ var (

MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation
rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests
rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests
rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
Expand Down Expand Up @@ -884,7 +884,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64)
// immediately to the header processor to keep the rest of the pipeline full even
// in the case of header stalls.
//
// The method returs the entire filled skeleton and also the number of headers
// The method returns the entire filled skeleton and also the number of headers
// already forwarded for processing.
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
log.Debug("Filling up skeleton", "from", from)
Expand Down Expand Up @@ -1377,7 +1377,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
pivot = height - uint64(fsMinFullBlocks)
}
// To cater for moving pivot points, track the pivot block and subsequently
// accumulated download results separatey.
// accumulated download results separately.
var (
oldPivot *fetchResult // Locked in pivot block, might change eventually
oldTail []*fetchResult // Downloaded content after the pivot
Expand Down Expand Up @@ -1615,7 +1615,7 @@ func (d *Downloader) qosReduceConfidence() {
//
// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
// the downloader tries to adapt queries to the RTT, so multiple RTT values can
// be adapted to, but smaller ones are preffered (stabler download stream).
// be adapted to, but smaller ones are preferred (stabler download stream).
func (d *Downloader) requestRTT() time.Duration {
return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
}
Expand Down
2 changes: 1 addition & 1 deletion eth/downloader/downloader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentRec
// Create the common suffix
hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)

// Create the forks, making the second heavyer if non balanced forks were requested
// Create the forks, making the second heavier if non balanced forks were requested
hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
hashes1 = append(hashes1, hashes[1:]...)

Expand Down
6 changes: 3 additions & 3 deletions eth/downloader/fakepeer.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (

// FakePeer is a mock downloader peer that operates on a local database instance
// instead of being an actual live node. It's useful for testing and to implement
// sync commands from an xisting local database.
// sync commands from an existing local database.
type FakePeer struct {
id string
db ethdb.Database
Expand All @@ -48,7 +48,7 @@ func (p *FakePeer) Head() (common.Hash, *big.Int) {
}

// RequestHeadersByHash implements downloader.Peer, returning a batch of headers
// defined by the origin hash and the associaed query parameters.
// defined by the origin hash and the associated query parameters.
func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
Expand Down Expand Up @@ -92,7 +92,7 @@ func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int,
}

// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers
// defined by the origin number and the associaed query parameters.
// defined by the origin number and the associated query parameters.
func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
Expand Down
2 changes: 1 addition & 1 deletion eth/downloader/peer.go
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,7 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerC
// medianRTT returns the median RTT of the peerset, considering only the tuning
// peers if there are more peers available.
func (ps *peerSet) medianRTT() time.Duration {
// Gather all the currnetly measured round trip times
// Gather all the currently measured round trip times
ps.lock.RLock()
defer ps.lock.RUnlock()

Expand Down
2 changes: 1 addition & 1 deletion eth/downloader/queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
if q.headerResults != nil {
panic("skeleton assembly already in progress")
}
// Shedule all the header retrieval tasks for the skeleton assembly
// Schedule all the header retrieval tasks for the skeleton assembly
q.headerTaskPool = make(map[uint64]*types.Header)
q.headerTaskQueue = prque.New()
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
Expand Down
10 changes: 5 additions & 5 deletions eth/downloader/statesync.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/trie"
)

// stateReq represents a batch of state fetch requests groupped together into
// stateReq represents a batch of state fetch requests grouped together into
// a single data retrieval network packet.
type stateReq struct {
items []common.Hash // Hashes of the state items to download
Expand Down Expand Up @@ -139,7 +139,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {

// Handle incoming state packs:
case pack := <-d.stateCh:
// Discard any data not requested (or previsouly timed out)
// Discard any data not requested (or previously timed out)
req := active[pack.PeerId()]
if req == nil {
log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items())
Expand Down Expand Up @@ -182,7 +182,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
case req := <-d.trackStateReq:
// If an active request already exists for this peer, we have a problem. In
// theory the trie node schedule must never assign two requests to the same
// peer. In practive however, a peer might receive a request, disconnect and
// peer. In practice however, a peer might receive a request, disconnect and
// immediately reconnect before the previous times out. In this case the first
// request is never honored, alas we must not silently overwrite it, as that
// causes valid requests to go missing and sync to get stuck.
Expand Down Expand Up @@ -228,7 +228,7 @@ type stateSync struct {
err error // Any error hit during sync (set before completion)
}

// stateTask represents a single trie node download taks, containing a set of
// stateTask represents a single trie node download task, containing a set of
// peers already attempted retrieval from to detect stalled syncs and abort.
type stateTask struct {
attempts map[string]struct{}
Expand Down Expand Up @@ -333,7 +333,7 @@ func (s *stateSync) commit(force bool) error {
return nil
}

// assignTasks attempts to assing new tasks to all idle peers, either from the
// assignTasks attempts to assign new tasks to all idle peers, either from the
// batch currently being retried, or fetching new data from the trie sync itself.
func (s *stateSync) assignTasks() {
// Iterate over all idle peers and try to assign them state fetches
Expand Down
2 changes: 1 addition & 1 deletion eth/fetcher/fetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ type Fetcher struct {
// Block cache
queue *prque.Prque // Queue containing the import operations (block number sorted)
queues map[string]int // Per peer block counts to prevent memory exhaustion
queued map[common.Hash]*inject // Set of already queued blocks (to dedup imports)
queued map[common.Hash]*inject // Set of already queued blocks (to dedupe imports)

// Callbacks
getBlock blockRetrievalFn // Retrieves a block from the local chain
Expand Down
2 changes: 1 addition & 1 deletion eth/filters/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func (api *PublicFilterAPI) timeoutLoop() {
// NewPendingTransactionFilter creates a filter that fetches pending transaction hashes
// as transactions enter the pending state.
//
// It is part of the filter package because this filter can be used throug the
// It is part of the filter package because this filter can be used through the
// `eth_getFilterChanges` polling method that is also used for log filters.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter
Expand Down
16 changes: 8 additions & 8 deletions eth/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ type ProtocolManager struct {
wg sync.WaitGroup
}

// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
// NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the Ethereum network.
func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
Expand Down Expand Up @@ -498,20 +498,20 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Deliver them all to the downloader for queuing
trasactions := make([][]*types.Transaction, len(request))
transactions := make([][]*types.Transaction, len(request))
uncles := make([][]*types.Header, len(request))

for i, body := range request {
trasactions[i] = body.Transactions
transactions[i] = body.Transactions
uncles[i] = body.Uncles
}
// Filter out any explicitly requested bodies, deliver the rest to the downloader
filter := len(trasactions) > 0 || len(uncles) > 0
filter := len(transactions) > 0 || len(uncles) > 0
if filter {
trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
transactions, uncles = pm.fetcher.FilterBodies(p.id, transactions, uncles, time.Now())
}
if len(trasactions) > 0 || len(uncles) > 0 || !filter {
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
if len(transactions) > 0 || len(uncles) > 0 || !filter {
err := pm.downloader.DeliverBodies(p.id, transactions, uncles)
if err != nil {
log.Debug("Failed to deliver bodies", "err", err)
}
Expand Down