Skip to content
This repository has been archived by the owner on Jul 24, 2024. It is now read-only.

Spelling error in code comment #569

Merged
merged 4 commits into from
Oct 28, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/backup/push.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
"github.com/pingcap/br/pkg/rtree"
)

// pushDown warps a backup task.
// pushDown wraps a backup task.
type pushDown struct {
mgr ClientMgr
respCh chan *backup.BackupResponse
Expand Down
2 changes: 1 addition & 1 deletion pkg/restore/batcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ func (b *Batcher) drainRanges() DrainResult {

// the batch is full, we should stop here!
// we use strictly greater than because when we send a batch at equal, the offset should plus one.
// (because the last table is sent, we should put it in emptyTables), and this will intrduce extra complex.
// (because the last table is sent, we should put it in emptyTables), and this will introduce extra complex.
if thisTableLen+collected > b.batchSizeThreshold {
drainSize := b.batchSizeThreshold - collected
thisTableRanges := thisTable.Range
Expand Down
2 changes: 1 addition & 1 deletion pkg/restore/pipeline_items.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func NewBRContextManager(client *Client) ContextManager {
type brContextManager struct {
client *Client

// This 'set' of table ID allow us handle each table just once.
// This 'set' of table ID allow us to handle each table just once.
hasTable map[int64]CreatedTable
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/restore/split.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ func (rs *RegionSplitter) splitAndScatterRegions(
return nil, err
}
for _, region := range newRegions {
// Wait for a while until the regions successfully splits.
// Wait for a while until the regions successfully split.
rs.waitForSplit(ctx, region.Region.Id)
if err = rs.client.ScatterRegion(ctx, region); err != nil {
log.Warn("scatter region failed", utils.ZapRegion(region.Region), zap.Error(err))
Expand Down