Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: gofmt code for adapting go 1.19 #36832

Merged
merged 19 commits into from
Aug 11, 2022
10 changes: 7 additions & 3 deletions br/pkg/glue/console_glue.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,10 @@ func (t *Table) maxKeyLen() int {

// Print prints the table.
// The format would be like:
// Key1: <Value>
// Other: <Value>
//
bb7133 marked this conversation as resolved.
Show resolved Hide resolved
// Key1: <Value>
// Other: <Value>
//
// LongKey: <Value>
// The format may change if the terminal size is small.
func (t *Table) Print() {
Expand Down Expand Up @@ -305,7 +307,9 @@ func (ps PrettyString) slicePointOf(s int) (realSlicePoint, endAt int) {
// It is the abstraction of some subarea of the terminal,
// you might imagine it as a panel in the tmux, but with infinity height.
// For example, printing a frame with the width of 10 chars, and 4 chars offset left, would be like:
// v~~~~~~~~~~v Here is the "width of a frame".
//
// v~~~~~~~~~~v Here is the "width of a frame".
//
// +--+----------+--+
// | Hello, wor |
// | ld. |
Expand Down
1 change: 1 addition & 0 deletions br/pkg/lightning/backend/local/duplicate.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,7 @@ type DupKVStream interface {

// LocalDupKVStream implements the interface of DupKVStream.
// It collects duplicate key-value pairs from a pebble.DB.
//
//goland:noinspection GoNameStartsWithPackageName
type LocalDupKVStream struct {
iter Iter
Expand Down
16 changes: 8 additions & 8 deletions br/pkg/lightning/backend/noop/noop.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,14 +97,14 @@ func (b noopBackend) CheckRequirements(context.Context, *backend.CheckCtx) error
// name. The returned table info does not need to be precise if the encoder,
// is not requiring them, but must at least fill in the following fields for
// TablesFromMeta to succeed:
// - Name
// - State (must be model.StatePublic)
// - ID
// - Columns
// * Name
// * State (must be model.StatePublic)
// * Offset (must be 0, 1, 2, ...)
// - PKIsHandle (true = do not generate _tidb_rowid)
// - Name
// - State (must be model.StatePublic)
// - ID
// - Columns
// - Name
// - State (must be model.StatePublic)
bb7133 marked this conversation as resolved.
Show resolved Hide resolved
// - Offset (must be 0, 1, 2, ...)
// - PKIsHandle (true = do not generate _tidb_rowid)
func (b noopBackend) FetchRemoteTableModels(ctx context.Context, schemaName string) ([]*model.TableInfo, error) {
return nil, nil
}
Expand Down
13 changes: 8 additions & 5 deletions br/pkg/lightning/backend/tidb/tidb.go
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,8 @@ type stmtTask struct {
}

// WriteBatchRowsToDB write rows in batch mode, which will insert multiple rows like this:
// insert into t1 values (111), (222), (333), (444);
//
// insert into t1 values (111), (222), (333), (444);
func (be *tidbBackend) WriteBatchRowsToDB(ctx context.Context, tableName string, columnNames []string, r kv.Rows) error {
rows := r.(tidbRows)
insertStmt := be.checkAndBuildStmt(rows, tableName, columnNames)
Expand Down Expand Up @@ -655,10 +656,12 @@ func (be *tidbBackend) checkAndBuildStmt(rows tidbRows, tableName string, column
}

// WriteRowsToDB write rows in row-by-row mode, which will insert multiple rows like this:
// insert into t1 values (111);
// insert into t1 values (222);
// insert into t1 values (333);
// insert into t1 values (444);
//
// insert into t1 values (111);
// insert into t1 values (222);
// insert into t1 values (333);
// insert into t1 values (444);
//
// See more details in br#1366: https://github.com/pingcap/br/issues/1366
func (be *tidbBackend) WriteRowsToDB(ctx context.Context, tableName string, columnNames []string, r kv.Rows) error {
rows := r.(tidbRows)
Expand Down
1 change: 1 addition & 0 deletions br/pkg/lightning/backend/tidb/tidb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,7 @@ func TestWriteRowsErrorOnDup(t *testing.T) {
}

// TODO: temporarily disable this test before we fix strict mode
//
//nolint:unused,deadcode
func testStrictMode(t *testing.T) {
s := createMysqlSuite(t)
Expand Down
19 changes: 10 additions & 9 deletions br/pkg/lightning/lightning.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,11 +257,12 @@ func (l *Lightning) goServe(statusAddr string, realAddrWriter io.Writer) error {
}

// RunOnce is used by binary lightning and host when using lightning as a library.
// - for binary lightning, taskCtx could be context.Background which means taskCtx wouldn't be canceled directly by its
// cancel function, but only by Lightning.Stop or HTTP DELETE using l.cancel. and glue could be nil to let lightning
// use a default glue later.
// - for lightning as a library, taskCtx could be a meaningful context that get canceled outside, and glue could be a
// caller implemented glue.
// - for binary lightning, taskCtx could be context.Background which means taskCtx wouldn't be canceled directly by its
// cancel function, but only by Lightning.Stop or HTTP DELETE using l.cancel. and glue could be nil to let lightning
// use a default glue later.
// - for lightning as a library, taskCtx could be a meaningful context that get canceled outside, and glue could be a
// caller implemented glue.
//
// deprecated: use RunOnceWithOptions instead.
func (l *Lightning) RunOnce(taskCtx context.Context, taskCfg *config.Config, glue glue.Glue) error {
if err := taskCfg.Adjust(taskCtx); err != nil {
Expand Down Expand Up @@ -309,10 +310,10 @@ func (l *Lightning) RunServer() error {
}

// RunOnceWithOptions is used by binary lightning and host when using lightning as a library.
// - for binary lightning, taskCtx could be context.Background which means taskCtx wouldn't be canceled directly by its
// cancel function, but only by Lightning.Stop or HTTP DELETE using l.cancel. No need to set Options
// - for lightning as a library, taskCtx could be a meaningful context that get canceled outside, and there Options may
// be used:
// - for binary lightning, taskCtx could be context.Background which means taskCtx wouldn't be canceled directly by its
// cancel function, but only by Lightning.Stop or HTTP DELETE using l.cancel. No need to set Options
// - for lightning as a library, taskCtx could be a meaningful context that get canceled outside, and there Options may
// be used:
// - WithGlue: set a caller implemented glue. Otherwise, lightning will use a default glue later.
// - WithDumpFileStorage: caller has opened an external storage for lightning. Otherwise, lightning will open a
// storage by config
Expand Down
2 changes: 1 addition & 1 deletion br/pkg/lightning/mydump/loader.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ func (m *MDTableMeta) GetSchema(ctx context.Context, store storage.ExternalStora
}

/*
Mydumper File Loader
Mydumper File Loader
*/
type MDLoader struct {
store storage.ExternalStorage
Expand Down
2 changes: 1 addition & 1 deletion br/pkg/lightning/mydump/parser_generated.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion br/pkg/lightning/mydump/region_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ import (
// }

/*
TODO : test with specified 'regionBlockSize' ...
TODO : test with specified 'regionBlockSize' ...
*/
func TestTableRegion(t *testing.T) {
cfg := newConfigWithSourceDir("./examples")
Expand Down
8 changes: 4 additions & 4 deletions br/pkg/lightning/restore/precheck_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -866,10 +866,10 @@ func (ci *csvHeaderCheckItem) GetCheckItemID() CheckItemID {
}

// Check tries to check whether the csv header config is consistent with the source csv files by:
// 1. pick one table with two CSV files and a unique/primary key
// 2. read the first row of those two CSV files
// 3. checks if the content of those first rows are compatible with the table schema, and whether the
// two rows are identical, to determine if the first rows are a header rows.
// 1. pick one table with two CSV files and a unique/primary key
// 2. read the first row of those two CSV files
// 3. checks if the content of those first rows are compatible with the table schema, and whether the
// two rows are identical, to determine if the first rows are a header rows.
func (ci *csvHeaderCheckItem) Check(ctx context.Context) (*CheckResult, error) {
// if cfg set header = true but source files actually contain not header, former SchemaCheck should
// return error in this situation, so we need do it again.
Expand Down
1 change: 0 additions & 1 deletion br/pkg/restore/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,6 @@ func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error {
return errors.Trace(err)
}

//
func (db *DB) restoreSequence(ctx context.Context, table *metautil.Table) error {
var restoreMetaSQL string
var err error
Expand Down
10 changes: 6 additions & 4 deletions br/pkg/restore/split_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -320,8 +320,9 @@ func TestScatterFinishInTime(t *testing.T) {
// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj)
// rewrite rules: aa -> xx, cc -> bb
// expected regions after split:
// [, aay), [aay, bba), [bba, bbf), [bbf, bbh), [bbh, bbj),
// [bbj, cca), [cca, xxe), [xxe, xxz), [xxz, )
//
// [, aay), [aay, bba), [bba, bbf), [bbf, bbh), [bbh, bbj),
// [bbj, cca), [cca, xxe), [xxe, xxz), [xxz, )
func TestSplitAndScatter(t *testing.T) {
t.Run("BatchScatter", func(t *testing.T) {
client := initTestClient(false)
Expand Down Expand Up @@ -474,8 +475,9 @@ func initRewriteRules() *restore.RewriteRules {
}

// expected regions after split:
// [, aay), [aay, bba), [bba, bbf), [bbf, bbh), [bbh, bbj),
// [bbj, cca), [cca, xxe), [xxe, xxz), [xxz, )
//
// [, aay), [aay, bba), [bba, bbf), [bbf, bbh), [bbh, bbj),
// [bbj, cca), [cca, xxe), [xxe, xxz), [xxz, )
func validateRegions(regions map[uint64]*split.RegionInfo) bool {
keys := [...]string{"", "aay", "bba", "bbf", "bbh", "bbj", "cca", "xxe", "xxz", ""}
return validateRegionsExt(regions, keys[:], false)
Expand Down
10 changes: 6 additions & 4 deletions br/pkg/restore/stream_metas.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,12 @@ func (ms *StreamMetadataSet) CalculateShiftTS(startTS uint64) uint64 {
}

// IterateFilesFullyBefore runs the function over all files contain data before the timestamp only.
// 0 before
// |------------------------------------------|
// |-file1---------------| <- File contains records in this TS range would be found.
// |-file2--------------| <- File contains any record out of this won't be found.
//
// 0 before
// |------------------------------------------|
// |-file1---------------| <- File contains records in this TS range would be found.
// |-file2--------------| <- File contains any record out of this won't be found.
//
// This function would call the `f` over file1 only.
func (ms *StreamMetadataSet) IterateFilesFullyBefore(before uint64, f func(d *backuppb.DataFileInfo) (shouldBreak bool)) {
ms.iterateDataFiles(func(d *backuppb.DataFileInfo) (shouldBreak bool) {
Expand Down
3 changes: 2 additions & 1 deletion br/pkg/stream/stream_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,8 @@ type PDInfoProvider interface {

// MaybeQPS get a number like the QPS of last seconds for each store via the prometheus interface.
// TODO: this is a temporary solution(aha, like in a Hackthon),
// we MUST find a better way for providing this information.
//
// we MUST find a better way for providing this information.
func MaybeQPS(ctx context.Context, mgr PDInfoProvider) (float64, error) {
c := mgr.GetPDClient()
prefix := "http://"
Expand Down
36 changes: 19 additions & 17 deletions br/pkg/streamhelper/advancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,23 +29,25 @@ import (
// CheckpointAdvancer is the central node for advancing the checkpoint of log backup.
// It's a part of "checkpoint v3".
// Generally, it scan the regions in the task range, collect checkpoints from tikvs.
// ┌──────┐
// ┌────►│ TiKV │
// │ └──────┘
// │
// │
// ┌──────────┐GetLastFlushTSOfRegion│ ┌──────┐
// │ Advancer ├──────────────────────┼────►│ TiKV │
// └────┬─────┘ │ └──────┘
// │ │
// │ │
// │ │ ┌──────┐
// │ └────►│ TiKV │
// │ └──────┘
// │
// │ UploadCheckpointV3 ┌──────────────────┐
// └─────────────────────►│ PD │
// └──────────────────┘
/*
┌──────┐
┌────►│ TiKV │
│ └──────┘
┌──────────┐GetLastFlushTSOfRegion│ ┌──────┐
│ Advancer ├──────────────────────┼────►│ TiKV │
└────┬─────┘ │ └──────┘
│ │
│ │
│ │ ┌──────┐
│ └────►│ TiKV │
│ └──────┘
│ UploadCheckpointV3 ┌──────────────────┐
└─────────────────────►│ PD │
└──────────────────┘
*/
type CheckpointAdvancer struct {
env Env

Expand Down
8 changes: 5 additions & 3 deletions br/pkg/streamhelper/advancer_daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,11 @@ const (
//
// ad := NewAdvancerDaemon(adv, mgr)
// loop, err := ad.Begin(ctx)
// if err != nil {
// return err
// }
//
// if err != nil {
// return err
// }
//
// loop()
type AdvancerDaemon struct {
adv *CheckpointAdvancer
Expand Down
28 changes: 15 additions & 13 deletions br/pkg/streamhelper/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,19 +221,21 @@ type runningStoreCollector struct {

// clusterCollector is the controller for collecting region checkpoints for the cluster.
// It creates multi store collectors.
// ┌──────────────────────┐ Requesting ┌────────────┐
// ┌─►│ StoreCollector[id=1] ├─────────────►│ TiKV[id=1] │
// │ └──────────────────────┘ └────────────┘
// │
// │Owns
// ┌──────────────────┐ │ ┌──────────────────────┐ Requesting ┌────────────┐
// │ ClusterCollector ├─────┼─►│ StoreCollector[id=4] ├─────────────►│ TiKV[id=4] │
// └──────────────────┘ │ └──────────────────────┘ └────────────┘
// │
// │
// │ ┌──────────────────────┐ Requesting ┌────────────┐
// └─►│ StoreCollector[id=5] ├─────────────►│ TiKV[id=5] │
// └──────────────────────┘ └────────────┘
/*
┌──────────────────────┐ Requesting ┌────────────┐
┌─►│ StoreCollector[id=1] ├─────────────►│ TiKV[id=1] │
│ └──────────────────────┘ └────────────┘
│Owns
┌──────────────────┐ │ ┌──────────────────────┐ Requesting ┌────────────┐
│ ClusterCollector ├─────┼─►│ StoreCollector[id=4] ├─────────────►│ TiKV[id=4] │
└──────────────────┘ │ └──────────────────────┘ └────────────┘
│ ┌──────────────────────┐ Requesting ┌────────────┐
└─►│ StoreCollector[id=5] ├─────────────►│ TiKV[id=5] │
└──────────────────────┘ └────────────┘
*/
type clusterCollector struct {
mu sync.Mutex
collectors map[uint64]runningStoreCollector
Expand Down
11 changes: 6 additions & 5 deletions br/pkg/streamhelper/tsheap.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,12 @@ import (
// - is based on range (it only promises there won't be new committed txns in the range).
// - the checkpoint of union of ranges is the minimal checkpoint of all ranges.
// As an example:
// +----------------------------------+
// ^-----------^ (Checkpoint = 42)
// ^---------------^ (Checkpoint = 76)
// ^-----------------------^ (Checkpoint = min(42, 76) = 42)
//
/*
+----------------------------------+
^-----------^ (Checkpoint = 42)
^---------------^ (Checkpoint = 76)
^-----------------------^ (Checkpoint = min(42, 76) = 42)
*/
// For calculating the global checkpoint, we can make a heap-like structure:
// Checkpoint Ranges
// 42 -> {[0, 8], [16, 100]}
Expand Down
2 changes: 1 addition & 1 deletion br/pkg/task/stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@ func (s *streamMgr) backupFullSchemas(ctx context.Context, g glue.Glue) error {
return nil
}

// RunStreamCommand run all kinds of `stream task``
// RunStreamCommand run all kinds of `stream task`
func RunStreamCommand(
ctx context.Context,
g glue.Glue,
Expand Down
14 changes: 7 additions & 7 deletions br/pkg/utils/worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,15 +114,15 @@ func (pool *WorkerPool) HasWorker() bool {
// PanicToErr recovers when the execution get panicked, and set the error provided by the arg.
// generally, this would be used with named return value and `defer`, like:
//
// func foo() (err error) {
// defer utils.PanicToErr(&err)
// return maybePanic()
// }
// func foo() (err error) {
// defer utils.PanicToErr(&err)
// return maybePanic()
// }
//
// Before using this, there are some hints for reducing resource leakage or bugs:
// - If any of clean work (by `defer`) relies on the error (say, when error happens, rollback some operations.), please
// place `defer this` AFTER that.
// - All resources allocated should be freed by the `defer` syntax, or when panicking, they may not be recycled.
// - If any of clean work (by `defer`) relies on the error (say, when error happens, rollback some operations.), please
// place `defer this` AFTER that.
// - All resources allocated should be freed by the `defer` syntax, or when panicking, they may not be recycled.
func PanicToErr(err *error) {
item := recover()
if item != nil {
Expand Down
1 change: 1 addition & 0 deletions br/tests/br_key_locked/locker.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,7 @@ type Locker struct {
}

// generateLocks sends Prewrite requests to TiKV to generate locks, without committing and rolling back.
//
//nolint:gosec
func (c *Locker) generateLocks(pctx context.Context) error {
log.Info("genLock started")
Expand Down
1 change: 0 additions & 1 deletion build/linter/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
"honnef.co/go/tools/analysis/report"
)

//
type skipType int

const (
Expand Down
Loading