From 49d21dd9a82e763854a97627bc0db72b5956a88a Mon Sep 17 00:00:00 2001 From: ZhuoZhi <517770911@qq.com> Date: Tue, 18 May 2021 15:25:40 +0800 Subject: [PATCH 01/10] executor: add correctness tests for partition table with different joins (#24673) --- executor/partition_table_test.go | 235 +++++++++++++++++++++++++++++++ 1 file changed, 235 insertions(+) diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index e87e1044278e3..8337be660ec0c 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/israce" "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" ) func (s *partitionTableSuite) TestFourReader(c *C) { @@ -572,6 +573,240 @@ func (s *partitionTableSuite) TestGlobalStatsAndSQLBinding(c *C) { tk.MustIndexLookup("select * from tlist where a<1") } +func (s *partitionTableSuite) TestPartitionTableWithDifferentJoin(c *C) { + if israce.RaceEnabled { + c.Skip("exhaustive types test, skip race test") + } + + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create database test_partition_joins") + tk.MustExec("use test_partition_joins") + tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") + + // hash and range partition + tk.MustExec("create table thash(a int, b int, key(a)) partition by hash(a) partitions 4") + tk.MustExec("create table tregular1(a int, b int, key(a))") + + tk.MustExec(`create table trange(a int, b int, key(a)) partition by range(a) ( + partition p0 values less than (200), + partition p1 values less than (400), + partition p2 values less than (600), + partition p3 values less than (800), + partition p4 values less than (1001))`) + tk.MustExec("create table tregular2(a int, b int, key(a))") + + vals := make([]string, 0, 2000) + for i := 0; i < 2000; i++ { + vals = append(vals, fmt.Sprintf("(%v, %v)", rand.Intn(1000), rand.Intn(1000))) + } + tk.MustExec("insert into thash values " + strings.Join(vals, ",")) + tk.MustExec("insert into tregular1 values " + strings.Join(vals, ",")) + + vals = make([]string, 0, 2000) + for i := 0; i < 2000; i++ { + vals = append(vals, fmt.Sprintf("(%v, %v)", rand.Intn(1000), rand.Intn(1000))) + } + tk.MustExec("insert into trange values " + strings.Join(vals, ",")) + tk.MustExec("insert into tregular2 values " + strings.Join(vals, ",")) + + // random params + x1 := rand.Intn(1000) + x2 := rand.Intn(1000) + x3 := rand.Intn(1000) + x4 := rand.Intn(1000) + + // group 1 + // hash_join range partition and hash partition + queryHash := fmt.Sprintf("select /*+ hash_join(trange, thash) */ * from trange, thash where trange.b=thash.b and thash.a = %v and trange.a > %v;", x1, x2) + queryRegular := fmt.Sprintf("select /*+ hash_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.b=tregular1.b and tregular1.a = %v and tregular2.a > %v;", x1, x2) + c.Assert(tk.HasPlan(queryHash, "HashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ hash_join(trange, thash) */ * from trange, thash where trange.a=thash.a and thash.a > %v;", x1) + queryRegular = fmt.Sprintf("select /*+ hash_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a=tregular1.a and tregular1.a > %v;", x1) + c.Assert(tk.HasPlan(queryHash, "HashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ hash_join(trange, thash) */ * from trange, thash where trange.a=thash.a and trange.b = thash.b and thash.a > %v;", x1) + queryRegular = fmt.Sprintf("select /*+ hash_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a=tregular1.a and tregular1.b = tregular2.b and tregular1.a > %v;", x1) + c.Assert(tk.HasPlan(queryHash, "HashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ hash_join(trange, thash) */ * from trange, thash where trange.a=thash.a and thash.a = %v;", x1) + queryRegular = fmt.Sprintf("select /*+ hash_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a=tregular1.a and tregular1.a = %v;", x1) + c.Assert(tk.HasPlan(queryHash, "HashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + // group 2 + // hash_join range partition and regular table + queryHash = fmt.Sprintf("select /*+ hash_join(trange, tregular1) */ * from trange, tregular1 where trange.a = tregular1.a and trange.a >= %v and tregular1.a > %v;", x1, x2) + queryRegular = fmt.Sprintf("select /*+ hash_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a = tregular1.a and tregular2.a >= %v and tregular1.a > %v;", x1, x2) + c.Assert(tk.HasPlan(queryHash, "HashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ hash_join(trange, tregular1) */ * from trange, tregular1 where trange.a = tregular1.a and trange.a in (%v, %v, %v);", x1, x2, x3) + queryRegular = fmt.Sprintf("select /*+ hash_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a = tregular1.a and tregular2.a in (%v, %v, %v);", x1, x2, x3) + c.Assert(tk.HasPlan(queryHash, "HashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ hash_join(trange, tregular1) */ * from trange, tregular1 where trange.a = tregular1.a and tregular1.a >= %v;", x1) + queryRegular = fmt.Sprintf("select /*+ hash_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a = tregular1.a and tregular1.a >= %v;", x1) + c.Assert(tk.HasPlan(queryHash, "HashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + // group 3 + // merge_join range partition and hash partition + queryHash = fmt.Sprintf("select /*+ merge_join(trange, thash) */ * from trange, thash where trange.b=thash.b and thash.a = %v and trange.a > %v;", x1, x2) + queryRegular = fmt.Sprintf("select /*+ merge_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.b=tregular1.b and tregular1.a = %v and tregular2.a > %v;", x1, x2) + c.Assert(tk.HasPlan(queryHash, "MergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ merge_join(trange, thash) */ * from trange, thash where trange.a=thash.a and thash.a > %v;", x1) + queryRegular = fmt.Sprintf("select /*+ merge_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a=tregular1.a and tregular1.a > %v;", x1) + c.Assert(tk.HasPlan(queryHash, "MergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ merge_join(trange, thash) */ * from trange, thash where trange.a=thash.a and trange.b = thash.b and thash.a > %v;", x1) + queryRegular = fmt.Sprintf("select /*+ merge_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a=tregular1.a and tregular1.b = tregular2.b and tregular1.a > %v;", x1) + c.Assert(tk.HasPlan(queryHash, "MergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ merge_join(trange, thash) */ * from trange, thash where trange.a=thash.a and thash.a = %v;", x1) + queryRegular = fmt.Sprintf("select /*+ merge_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a=tregular1.a and tregular1.a = %v;", x1) + c.Assert(tk.HasPlan(queryHash, "MergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + // group 4 + // merge_join range partition and regular table + queryHash = fmt.Sprintf("select /*+ merge_join(trange, tregular1) */ * from trange, tregular1 where trange.a = tregular1.a and trange.a >= %v and tregular1.a > %v;", x1, x2) + queryRegular = fmt.Sprintf("select /*+ merge_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a = tregular1.a and tregular2.a >= %v and tregular1.a > %v;", x1, x2) + c.Assert(tk.HasPlan(queryHash, "MergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ merge_join(trange, tregular1) */ * from trange, tregular1 where trange.a = tregular1.a and trange.a in (%v, %v, %v);", x1, x2, x3) + queryRegular = fmt.Sprintf("select /*+ merge_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a = tregular1.a and tregular2.a in (%v, %v, %v);", x1, x2, x3) + c.Assert(tk.HasPlan(queryHash, "MergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ merge_join(trange, tregular1) */ * from trange, tregular1 where trange.a = tregular1.a and tregular1.a >= %v;", x1) + queryRegular = fmt.Sprintf("select /*+ merge_join(tregular2, tregular1) */ * from tregular2, tregular1 where tregular2.a = tregular1.a and tregular1.a >= %v;", x1) + c.Assert(tk.HasPlan(queryHash, "MergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + // new table instances + tk.MustExec("create table thash2(a int, b int, index idx(a)) partition by hash(a) partitions 4") + tk.MustExec("create table tregular3(a int, b int, index idx(a))") + + tk.MustExec(`create table trange2(a int, b int, index idx(a)) partition by range(a) ( + partition p0 values less than (200), + partition p1 values less than (400), + partition p2 values less than (600), + partition p3 values less than (800), + partition p4 values less than (1001))`) + tk.MustExec("create table tregular4(a int, b int, index idx(a))") + + vals = make([]string, 0, 2000) + for i := 0; i < 2000; i++ { + vals = append(vals, fmt.Sprintf("(%v, %v)", rand.Intn(1000), rand.Intn(1000))) + } + tk.MustExec("insert into thash2 values " + strings.Join(vals, ",")) + tk.MustExec("insert into tregular3 values " + strings.Join(vals, ",")) + + vals = make([]string, 0, 2000) + for i := 0; i < 2000; i++ { + vals = append(vals, fmt.Sprintf("(%v, %v)", rand.Intn(1000), rand.Intn(1000))) + } + tk.MustExec("insert into trange2 values " + strings.Join(vals, ",")) + tk.MustExec("insert into tregular4 values " + strings.Join(vals, ",")) + + // group 5 + // index_merge_join range partition and range partition + // Currently don't support index merge join on two partition tables. Only test warning. + queryHash = fmt.Sprintf("select /*+ inl_merge_join(trange, trange2) */ * from trange, trange2 where trange.a=trange2.a and trange.a > %v;", x1) + // queryRegular = fmt.Sprintf("select /*+ inl_merge_join(tregular2, tregular4) */ * from tregular2, tregular4 where tregular2.a=tregular4.a and tregular2.a > %v;", x1) + // c.Assert(tk.HasPlan(queryHash, "IndexMergeJoin"), IsTrue) + // tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + tk.MustQuery(queryHash) + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1815|Optimizer Hint /*+ INL_MERGE_JOIN(trange, trange2) */ is inapplicable")) + + queryHash = fmt.Sprintf("select /*+ inl_merge_join(trange, trange2) */ * from trange, trange2 where trange.a=trange2.a and trange.a > %v and trange2.a > %v;", x1, x2) + // queryRegular = fmt.Sprintf("select /*+ inl_merge_join(tregular2, tregular4) */ * from tregular2, tregular4 where tregular2.a=tregular4.a and tregular2.a > %v and tregular4.a > %v;", x1, x2) + // c.Assert(tk.HasPlan(queryHash, "IndexMergeJoin"), IsTrue) + // tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + tk.MustQuery(queryHash) + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1815|Optimizer Hint /*+ INL_MERGE_JOIN(trange, trange2) */ is inapplicable")) + + queryHash = fmt.Sprintf("select /*+ inl_merge_join(trange, trange2) */ * from trange, trange2 where trange.a=trange2.a and trange.a > %v and trange.b > %v;", x1, x2) + // queryRegular = fmt.Sprintf("select /*+ inl_merge_join(tregular2, tregular4) */ * from tregular2, tregular4 where tregular2.a=tregular4.a and tregular2.a > %v and tregular2.b > %v;", x1, x2) + // c.Assert(tk.HasPlan(queryHash, "IndexMergeJoin"), IsTrue) + // tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + tk.MustQuery(queryHash) + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1815|Optimizer Hint /*+ INL_MERGE_JOIN(trange, trange2) */ is inapplicable")) + + queryHash = fmt.Sprintf("select /*+ inl_merge_join(trange, trange2) */ * from trange, trange2 where trange.a=trange2.a and trange.a > %v and trange2.b > %v;", x1, x2) + // queryRegular = fmt.Sprintf("select /*+ inl_merge_join(tregular2, tregular4) */ * from tregular2, tregular4 where tregular2.a=tregular4.a and tregular2.a > %v and tregular4.b > %v;", x1, x2) + // c.Assert(tk.HasPlan(queryHash, "IndexMergeJoin"), IsTrue) + // tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + tk.MustQuery(queryHash) + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1815|Optimizer Hint /*+ INL_MERGE_JOIN(trange, trange2) */ is inapplicable")) + + // group 6 + // index_merge_join range partition and regualr table + queryHash = fmt.Sprintf("select /*+ inl_merge_join(trange, tregular4) */ * from trange, tregular4 where trange.a=tregular4.a and trange.a > %v;", x1) + queryRegular = fmt.Sprintf("select /*+ inl_merge_join(tregular2, tregular4) */ * from tregular2, tregular4 where tregular2.a=tregular4.a and tregular2.a > %v;", x1) + c.Assert(tk.HasPlan(queryHash, "IndexMergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ inl_merge_join(trange, tregular4) */ * from trange, tregular4 where trange.a=tregular4.a and trange.a > %v and tregular4.a > %v;", x1, x2) + queryRegular = fmt.Sprintf("select /*+ inl_merge_join(tregular2, tregular4) */ * from tregular2, tregular4 where tregular2.a=tregular4.a and tregular2.a > %v and tregular4.a > %v;", x1, x2) + c.Assert(tk.HasPlan(queryHash, "IndexMergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ inl_merge_join(trange, tregular4) */ * from trange, tregular4 where trange.a=tregular4.a and trange.a > %v and trange.b > %v;", x1, x2) + queryRegular = fmt.Sprintf("select /*+ inl_merge_join(tregular2, tregular4) */ * from tregular2, tregular4 where tregular2.a=tregular4.a and tregular2.a > %v and tregular2.b > %v;", x1, x2) + c.Assert(tk.HasPlan(queryHash, "IndexMergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ inl_merge_join(trange, tregular4) */ * from trange, tregular4 where trange.a=tregular4.a and trange.a > %v and tregular4.b > %v;", x1, x2) + queryRegular = fmt.Sprintf("select /*+ inl_merge_join(tregular2, tregular4) */ * from tregular2, tregular4 where tregular2.a=tregular4.a and tregular2.a > %v and tregular4.b > %v;", x1, x2) + c.Assert(tk.HasPlan(queryHash, "IndexMergeJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + // group 7 + // index_hash_join hash partition and hash partition + queryHash = fmt.Sprintf("select /*+ inl_hash_join(thash, thash2) */ * from thash, thash2 where thash.a = thash2.a and thash.a in (%v, %v);", x1, x2) + queryRegular = fmt.Sprintf("select /*+ inl_hash_join(tregular1, tregular3) */ * from tregular1, tregular3 where tregular1.a = tregular3.a and tregular1.a in (%v, %v);", x1, x2) + c.Assert(tk.HasPlan(queryHash, "IndexHashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ inl_hash_join(thash, thash2) */ * from thash, thash2 where thash.a = thash2.a and thash.a in (%v, %v) and thash2.a in (%v, %v);", x1, x2, x3, x4) + queryRegular = fmt.Sprintf("select /*+ inl_hash_join(tregular1, tregular3) */ * from tregular1, tregular3 where tregular1.a = tregular3.a and tregular1.a in (%v, %v) and tregular3.a in (%v, %v);", x1, x2, x3, x4) + c.Assert(tk.HasPlan(queryHash, "IndexHashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ inl_hash_join(thash, thash2) */ * from thash, thash2 where thash.a = thash2.a and thash.a > %v and thash2.b > %v;", x1, x2) + queryRegular = fmt.Sprintf("select /*+ inl_hash_join(tregular1, tregular3) */ * from tregular1, tregular3 where tregular1.a = tregular3.a and tregular1.a > %v and tregular3.b > %v;", x1, x2) + c.Assert(tk.HasPlan(queryHash, "IndexHashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + // group 8 + // index_hash_join hash partition and hash partition + queryHash = fmt.Sprintf("select /*+ inl_hash_join(thash, tregular3) */ * from thash, tregular3 where thash.a = tregular3.a and thash.a in (%v, %v);", x1, x2) + queryRegular = fmt.Sprintf("select /*+ inl_hash_join(tregular1, tregular3) */ * from tregular1, tregular3 where tregular1.a = tregular3.a and tregular1.a in (%v, %v);", x1, x2) + c.Assert(tk.HasPlan(queryHash, "IndexHashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ inl_hash_join(thash, tregular3) */ * from thash, tregular3 where thash.a = tregular3.a and thash.a in (%v, %v) and tregular3.a in (%v, %v);", x1, x2, x3, x4) + queryRegular = fmt.Sprintf("select /*+ inl_hash_join(tregular1, tregular3) */ * from tregular1, tregular3 where tregular1.a = tregular3.a and tregular1.a in (%v, %v) and tregular3.a in (%v, %v);", x1, x2, x3, x4) + c.Assert(tk.HasPlan(queryHash, "IndexHashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) + + queryHash = fmt.Sprintf("select /*+ inl_hash_join(thash, tregular3) */ * from thash, tregular3 where thash.a = tregular3.a and thash.a > %v and tregular3.b > %v;", x1, x2) + queryRegular = fmt.Sprintf("select /*+ inl_hash_join(tregular1, tregular3) */ * from tregular1, tregular3 where tregular1.a = tregular3.a and tregular1.a > %v and tregular3.b > %v;", x1, x2) + c.Assert(tk.HasPlan(queryHash, "IndexHashJoin"), IsTrue) + tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) +} + func createTable4DynamicPruneModeTestWithExpression(tk *testkit.TestKit) { tk.MustExec("create table trange(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11));") tk.MustExec("create table thash(a int, b int) partition by hash(a) partitions 4;") From 5fd17dd7f4cef3f2e103f12427cb9197d9838b73 Mon Sep 17 00:00:00 2001 From: JmPotato Date: Tue, 18 May 2021 16:27:40 +0800 Subject: [PATCH 02/10] expression: fix the spelling of word arithmetical (#24713) --- expression/builtin_time.go | 238 ++++++++++++++++---------------- expression/builtin_time_test.go | 2 +- 2 files changed, 120 insertions(+), 120 deletions(-) diff --git a/expression/builtin_time.go b/expression/builtin_time.go index 13b3d1eef3def..67413dab11374 100644 --- a/expression/builtin_time.go +++ b/expression/builtin_time.go @@ -2769,20 +2769,20 @@ func (b *builtinExtractDurationSig) evalInt(row chunk.Row) (int64, bool, error) return res, err != nil, err } -// baseDateArithmitical is the base class for all "builtinAddDateXXXSig" and "builtinSubDateXXXSig", +// baseDateArithmetical is the base class for all "builtinAddDateXXXSig" and "builtinSubDateXXXSig", // which provides parameter getter and date arithmetical calculate functions. -type baseDateArithmitical struct { +type baseDateArithmetical struct { // intervalRegexp is "*Regexp" used to extract string interval for "DAY" unit. intervalRegexp *regexp.Regexp } -func newDateArighmeticalUtil() baseDateArithmitical { - return baseDateArithmitical{ +func newDateArighmeticalUtil() baseDateArithmetical { + return baseDateArithmetical{ intervalRegexp: regexp.MustCompile(`-?[\d]+`), } } -func (du *baseDateArithmitical) getDateFromString(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (types.Time, bool, error) { +func (du *baseDateArithmetical) getDateFromString(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (types.Time, bool, error) { dateStr, isNull, err := args[0].EvalString(ctx, row) if isNull || err != nil { return types.ZeroTime, true, err @@ -2807,7 +2807,7 @@ func (du *baseDateArithmitical) getDateFromString(ctx sessionctx.Context, args [ return date, false, handleInvalidTimeError(ctx, err) } -func (du *baseDateArithmitical) getDateFromInt(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (types.Time, bool, error) { +func (du *baseDateArithmetical) getDateFromInt(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (types.Time, bool, error) { dateInt, isNull, err := args[0].EvalInt(ctx, row) if isNull || err != nil { return types.ZeroTime, true, err @@ -2827,7 +2827,7 @@ func (du *baseDateArithmitical) getDateFromInt(ctx sessionctx.Context, args []Ex return date, false, nil } -func (du *baseDateArithmitical) getDateFromDatetime(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (types.Time, bool, error) { +func (du *baseDateArithmetical) getDateFromDatetime(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (types.Time, bool, error) { date, isNull, err := args[0].EvalTime(ctx, row) if isNull || err != nil { return types.ZeroTime, true, err @@ -2839,7 +2839,7 @@ func (du *baseDateArithmitical) getDateFromDatetime(ctx sessionctx.Context, args return date, false, nil } -func (du *baseDateArithmitical) getIntervalFromString(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (string, bool, error) { +func (du *baseDateArithmetical) getIntervalFromString(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (string, bool, error) { interval, isNull, err := args[1].EvalString(ctx, row) if isNull || err != nil { return "", true, err @@ -2857,7 +2857,7 @@ func (du *baseDateArithmitical) getIntervalFromString(ctx sessionctx.Context, ar return interval, false, nil } -func (du *baseDateArithmitical) getIntervalFromDecimal(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (string, bool, error) { +func (du *baseDateArithmetical) getIntervalFromDecimal(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (string, bool, error) { decimal, isNull, err := args[1].EvalDecimal(ctx, row) if isNull || err != nil { return "", true, err @@ -2911,7 +2911,7 @@ func (du *baseDateArithmitical) getIntervalFromDecimal(ctx sessionctx.Context, a return interval, false, nil } -func (du *baseDateArithmitical) getIntervalFromInt(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (string, bool, error) { +func (du *baseDateArithmetical) getIntervalFromInt(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (string, bool, error) { interval, isNull, err := args[1].EvalInt(ctx, row) if isNull || err != nil { return "", true, err @@ -2919,7 +2919,7 @@ func (du *baseDateArithmitical) getIntervalFromInt(ctx sessionctx.Context, args return strconv.FormatInt(interval, 10), false, nil } -func (du *baseDateArithmitical) getIntervalFromReal(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (string, bool, error) { +func (du *baseDateArithmetical) getIntervalFromReal(ctx sessionctx.Context, args []Expression, row chunk.Row, unit string) (string, bool, error) { interval, isNull, err := args[1].EvalReal(ctx, row) if isNull || err != nil { return "", true, err @@ -2927,7 +2927,7 @@ func (du *baseDateArithmitical) getIntervalFromReal(ctx sessionctx.Context, args return strconv.FormatFloat(interval, 'f', args[1].GetType().Decimal, 64), false, nil } -func (du *baseDateArithmitical) add(ctx sessionctx.Context, date types.Time, interval string, unit string) (types.Time, bool, error) { +func (du *baseDateArithmetical) add(ctx sessionctx.Context, date types.Time, interval string, unit string) (types.Time, bool, error) { year, month, day, nano, err := types.ParseDurationValue(unit, interval) if err := handleInvalidTimeError(ctx, err); err != nil { return types.ZeroTime, true, err @@ -2935,7 +2935,7 @@ func (du *baseDateArithmitical) add(ctx sessionctx.Context, date types.Time, int return du.addDate(ctx, date, year, month, day, nano) } -func (du *baseDateArithmitical) addDate(ctx sessionctx.Context, date types.Time, year, month, day, nano int64) (types.Time, bool, error) { +func (du *baseDateArithmetical) addDate(ctx sessionctx.Context, date types.Time, year, month, day, nano int64) (types.Time, bool, error) { goTime, err := date.GoTime(time.UTC) if err := handleInvalidTimeError(ctx, err); err != nil { return types.ZeroTime, true, err @@ -2972,7 +2972,7 @@ func (du *baseDateArithmitical) addDate(ctx sessionctx.Context, date types.Time, return date, false, nil } -func (du *baseDateArithmitical) addDuration(ctx sessionctx.Context, d types.Duration, interval string, unit string) (types.Duration, bool, error) { +func (du *baseDateArithmetical) addDuration(ctx sessionctx.Context, d types.Duration, interval string, unit string) (types.Duration, bool, error) { dur, err := types.ExtractDurationValue(unit, interval) if err != nil { return types.ZeroDuration, true, handleInvalidTimeError(ctx, err) @@ -2984,7 +2984,7 @@ func (du *baseDateArithmitical) addDuration(ctx sessionctx.Context, d types.Dura return retDur, false, nil } -func (du *baseDateArithmitical) subDuration(ctx sessionctx.Context, d types.Duration, interval string, unit string) (types.Duration, bool, error) { +func (du *baseDateArithmetical) subDuration(ctx sessionctx.Context, d types.Duration, interval string, unit string) (types.Duration, bool, error) { dur, err := types.ExtractDurationValue(unit, interval) if err != nil { return types.ZeroDuration, true, handleInvalidTimeError(ctx, err) @@ -2996,7 +2996,7 @@ func (du *baseDateArithmitical) subDuration(ctx sessionctx.Context, d types.Dura return retDur, false, nil } -func (du *baseDateArithmitical) sub(ctx sessionctx.Context, date types.Time, interval string, unit string) (types.Time, bool, error) { +func (du *baseDateArithmetical) sub(ctx sessionctx.Context, date types.Time, interval string, unit string) (types.Time, bool, error) { year, month, day, nano, err := types.ParseDurationValue(unit, interval) if err := handleInvalidTimeError(ctx, err); err != nil { return types.ZeroTime, true, err @@ -3004,7 +3004,7 @@ func (du *baseDateArithmitical) sub(ctx sessionctx.Context, date types.Time, int return du.addDate(ctx, date, -year, -month, -day, -nano) } -func (du *baseDateArithmitical) vecGetDateFromInt(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { +func (du *baseDateArithmetical) vecGetDateFromInt(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { n := input.NumRows() buf, err := b.bufAllocator.get(types.ETInt, n) if err != nil { @@ -3046,7 +3046,7 @@ func (du *baseDateArithmitical) vecGetDateFromInt(b *baseBuiltinFunc, input *chu return nil } -func (du *baseDateArithmitical) vecGetDateFromString(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { +func (du *baseDateArithmetical) vecGetDateFromString(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { n := input.NumRows() buf, err := b.bufAllocator.get(types.ETString, n) if err != nil { @@ -3090,7 +3090,7 @@ func (du *baseDateArithmitical) vecGetDateFromString(b *baseBuiltinFunc, input * return nil } -func (du *baseDateArithmitical) vecGetDateFromDatetime(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { +func (du *baseDateArithmetical) vecGetDateFromDatetime(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { n := input.NumRows() result.ResizeTime(n, false) if err := b.args[0].VecEvalTime(b.ctx, input, result); err != nil { @@ -3111,7 +3111,7 @@ func (du *baseDateArithmitical) vecGetDateFromDatetime(b *baseBuiltinFunc, input return nil } -func (du *baseDateArithmitical) vecGetIntervalFromString(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { +func (du *baseDateArithmetical) vecGetIntervalFromString(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { n := input.NumRows() buf, err := b.bufAllocator.get(types.ETString, n) if err != nil { @@ -3148,7 +3148,7 @@ func (du *baseDateArithmitical) vecGetIntervalFromString(b *baseBuiltinFunc, inp return nil } -func (du *baseDateArithmitical) vecGetIntervalFromDecimal(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { +func (du *baseDateArithmetical) vecGetIntervalFromDecimal(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { n := input.NumRows() buf, err := b.bufAllocator.get(types.ETDecimal, n) if err != nil { @@ -3249,7 +3249,7 @@ func (du *baseDateArithmitical) vecGetIntervalFromDecimal(b *baseBuiltinFunc, in return nil } -func (du *baseDateArithmitical) vecGetIntervalFromInt(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { +func (du *baseDateArithmetical) vecGetIntervalFromInt(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { n := input.NumRows() buf, err := b.bufAllocator.get(types.ETInt, n) if err != nil { @@ -3272,7 +3272,7 @@ func (du *baseDateArithmitical) vecGetIntervalFromInt(b *baseBuiltinFunc, input return nil } -func (du *baseDateArithmitical) vecGetIntervalFromReal(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { +func (du *baseDateArithmetical) vecGetIntervalFromReal(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { n := input.NumRows() buf, err := b.bufAllocator.get(types.ETReal, n) if err != nil { @@ -3356,97 +3356,97 @@ func (c *addDateFunctionClass) getFunction(ctx sessionctx.Context, args []Expres case dateEvalTp == types.ETString && intervalEvalTp == types.ETString: sig = &builtinAddDateStringStringSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateStringString) case dateEvalTp == types.ETString && intervalEvalTp == types.ETInt: sig = &builtinAddDateStringIntSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateStringInt) case dateEvalTp == types.ETString && intervalEvalTp == types.ETReal: sig = &builtinAddDateStringRealSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateStringReal) case dateEvalTp == types.ETString && intervalEvalTp == types.ETDecimal: sig = &builtinAddDateStringDecimalSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateStringDecimal) case dateEvalTp == types.ETInt && intervalEvalTp == types.ETString: sig = &builtinAddDateIntStringSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateIntString) case dateEvalTp == types.ETInt && intervalEvalTp == types.ETInt: sig = &builtinAddDateIntIntSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateIntInt) case dateEvalTp == types.ETInt && intervalEvalTp == types.ETReal: sig = &builtinAddDateIntRealSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateIntReal) case dateEvalTp == types.ETInt && intervalEvalTp == types.ETDecimal: sig = &builtinAddDateIntDecimalSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateIntDecimal) case dateEvalTp == types.ETDatetime && intervalEvalTp == types.ETString: sig = &builtinAddDateDatetimeStringSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateDatetimeString) case dateEvalTp == types.ETDatetime && intervalEvalTp == types.ETInt: sig = &builtinAddDateDatetimeIntSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateDatetimeInt) case dateEvalTp == types.ETDatetime && intervalEvalTp == types.ETReal: sig = &builtinAddDateDatetimeRealSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateDatetimeReal) case dateEvalTp == types.ETDatetime && intervalEvalTp == types.ETDecimal: sig = &builtinAddDateDatetimeDecimalSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateDatetimeDecimal) case dateEvalTp == types.ETDuration && intervalEvalTp == types.ETString: sig = &builtinAddDateDurationStringSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateDurationString) case dateEvalTp == types.ETDuration && intervalEvalTp == types.ETInt: sig = &builtinAddDateDurationIntSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateDurationInt) case dateEvalTp == types.ETDuration && intervalEvalTp == types.ETReal: sig = &builtinAddDateDurationRealSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateDurationReal) case dateEvalTp == types.ETDuration && intervalEvalTp == types.ETDecimal: sig = &builtinAddDateDurationDecimalSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_AddDateDurationDecimal) } @@ -3455,11 +3455,11 @@ func (c *addDateFunctionClass) getFunction(ctx sessionctx.Context, args []Expres type builtinAddDateStringStringSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateStringStringSig) Clone() builtinFunc { - newSig := &builtinAddDateStringStringSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateStringStringSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3488,11 +3488,11 @@ func (b *builtinAddDateStringStringSig) evalTime(row chunk.Row) (types.Time, boo type builtinAddDateStringIntSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateStringIntSig) Clone() builtinFunc { - newSig := &builtinAddDateStringIntSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateStringIntSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3521,11 +3521,11 @@ func (b *builtinAddDateStringIntSig) evalTime(row chunk.Row) (types.Time, bool, type builtinAddDateStringRealSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateStringRealSig) Clone() builtinFunc { - newSig := &builtinAddDateStringRealSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateStringRealSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3554,11 +3554,11 @@ func (b *builtinAddDateStringRealSig) evalTime(row chunk.Row) (types.Time, bool, type builtinAddDateStringDecimalSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateStringDecimalSig) Clone() builtinFunc { - newSig := &builtinAddDateStringDecimalSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateStringDecimalSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3587,11 +3587,11 @@ func (b *builtinAddDateStringDecimalSig) evalTime(row chunk.Row) (types.Time, bo type builtinAddDateIntStringSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateIntStringSig) Clone() builtinFunc { - newSig := &builtinAddDateIntStringSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateIntStringSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3620,11 +3620,11 @@ func (b *builtinAddDateIntStringSig) evalTime(row chunk.Row) (types.Time, bool, type builtinAddDateIntIntSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateIntIntSig) Clone() builtinFunc { - newSig := &builtinAddDateIntIntSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateIntIntSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3653,11 +3653,11 @@ func (b *builtinAddDateIntIntSig) evalTime(row chunk.Row) (types.Time, bool, err type builtinAddDateIntRealSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateIntRealSig) Clone() builtinFunc { - newSig := &builtinAddDateIntRealSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateIntRealSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3686,11 +3686,11 @@ func (b *builtinAddDateIntRealSig) evalTime(row chunk.Row) (types.Time, bool, er type builtinAddDateIntDecimalSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateIntDecimalSig) Clone() builtinFunc { - newSig := &builtinAddDateIntDecimalSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateIntDecimalSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3719,11 +3719,11 @@ func (b *builtinAddDateIntDecimalSig) evalTime(row chunk.Row) (types.Time, bool, type builtinAddDateDatetimeStringSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateDatetimeStringSig) Clone() builtinFunc { - newSig := &builtinAddDateDatetimeStringSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateDatetimeStringSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3752,11 +3752,11 @@ func (b *builtinAddDateDatetimeStringSig) evalTime(row chunk.Row) (types.Time, b type builtinAddDateDatetimeIntSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateDatetimeIntSig) Clone() builtinFunc { - newSig := &builtinAddDateDatetimeIntSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateDatetimeIntSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3785,11 +3785,11 @@ func (b *builtinAddDateDatetimeIntSig) evalTime(row chunk.Row) (types.Time, bool type builtinAddDateDatetimeRealSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateDatetimeRealSig) Clone() builtinFunc { - newSig := &builtinAddDateDatetimeRealSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateDatetimeRealSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3818,11 +3818,11 @@ func (b *builtinAddDateDatetimeRealSig) evalTime(row chunk.Row) (types.Time, boo type builtinAddDateDatetimeDecimalSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateDatetimeDecimalSig) Clone() builtinFunc { - newSig := &builtinAddDateDatetimeDecimalSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateDatetimeDecimalSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3851,11 +3851,11 @@ func (b *builtinAddDateDatetimeDecimalSig) evalTime(row chunk.Row) (types.Time, type builtinAddDateDurationStringSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateDurationStringSig) Clone() builtinFunc { - newSig := &builtinAddDateDurationStringSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateDurationStringSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3882,11 +3882,11 @@ func (b *builtinAddDateDurationStringSig) evalDuration(row chunk.Row) (types.Dur type builtinAddDateDurationIntSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateDurationIntSig) Clone() builtinFunc { - newSig := &builtinAddDateDurationIntSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateDurationIntSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3912,11 +3912,11 @@ func (b *builtinAddDateDurationIntSig) evalDuration(row chunk.Row) (types.Durati type builtinAddDateDurationDecimalSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateDurationDecimalSig) Clone() builtinFunc { - newSig := &builtinAddDateDurationDecimalSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateDurationDecimalSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -3942,11 +3942,11 @@ func (b *builtinAddDateDurationDecimalSig) evalDuration(row chunk.Row) (types.Du type builtinAddDateDurationRealSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinAddDateDurationRealSig) Clone() builtinFunc { - newSig := &builtinAddDateDurationRealSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinAddDateDurationRealSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4030,97 +4030,97 @@ func (c *subDateFunctionClass) getFunction(ctx sessionctx.Context, args []Expres case dateEvalTp == types.ETString && intervalEvalTp == types.ETString: sig = &builtinSubDateStringStringSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateStringString) case dateEvalTp == types.ETString && intervalEvalTp == types.ETInt: sig = &builtinSubDateStringIntSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateStringInt) case dateEvalTp == types.ETString && intervalEvalTp == types.ETReal: sig = &builtinSubDateStringRealSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateStringReal) case dateEvalTp == types.ETString && intervalEvalTp == types.ETDecimal: sig = &builtinSubDateStringDecimalSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateStringDecimal) case dateEvalTp == types.ETInt && intervalEvalTp == types.ETString: sig = &builtinSubDateIntStringSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateIntString) case dateEvalTp == types.ETInt && intervalEvalTp == types.ETInt: sig = &builtinSubDateIntIntSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateIntInt) case dateEvalTp == types.ETInt && intervalEvalTp == types.ETReal: sig = &builtinSubDateIntRealSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateIntReal) case dateEvalTp == types.ETInt && intervalEvalTp == types.ETDecimal: sig = &builtinSubDateIntDecimalSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateIntDecimal) case dateEvalTp == types.ETDatetime && intervalEvalTp == types.ETString: sig = &builtinSubDateDatetimeStringSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateDatetimeString) case dateEvalTp == types.ETDatetime && intervalEvalTp == types.ETInt: sig = &builtinSubDateDatetimeIntSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateDatetimeInt) case dateEvalTp == types.ETDatetime && intervalEvalTp == types.ETReal: sig = &builtinSubDateDatetimeRealSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateDatetimeReal) case dateEvalTp == types.ETDatetime && intervalEvalTp == types.ETDecimal: sig = &builtinSubDateDatetimeDecimalSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateDatetimeDecimal) case dateEvalTp == types.ETDuration && intervalEvalTp == types.ETString: sig = &builtinSubDateDurationStringSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateDurationString) case dateEvalTp == types.ETDuration && intervalEvalTp == types.ETInt: sig = &builtinSubDateDurationIntSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateDurationInt) case dateEvalTp == types.ETDuration && intervalEvalTp == types.ETReal: sig = &builtinSubDateDurationRealSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateDurationReal) case dateEvalTp == types.ETDuration && intervalEvalTp == types.ETDecimal: sig = &builtinSubDateDurationDecimalSig{ baseBuiltinFunc: bf, - baseDateArithmitical: newDateArighmeticalUtil(), + baseDateArithmetical: newDateArighmeticalUtil(), } sig.setPbCode(tipb.ScalarFuncSig_SubDateDurationDecimal) } @@ -4129,11 +4129,11 @@ func (c *subDateFunctionClass) getFunction(ctx sessionctx.Context, args []Expres type builtinSubDateStringStringSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateStringStringSig) Clone() builtinFunc { - newSig := &builtinSubDateStringStringSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateStringStringSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4162,11 +4162,11 @@ func (b *builtinSubDateStringStringSig) evalTime(row chunk.Row) (types.Time, boo type builtinSubDateStringIntSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateStringIntSig) Clone() builtinFunc { - newSig := &builtinSubDateStringIntSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateStringIntSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4195,11 +4195,11 @@ func (b *builtinSubDateStringIntSig) evalTime(row chunk.Row) (types.Time, bool, type builtinSubDateStringRealSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateStringRealSig) Clone() builtinFunc { - newSig := &builtinSubDateStringRealSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateStringRealSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4228,11 +4228,11 @@ func (b *builtinSubDateStringRealSig) evalTime(row chunk.Row) (types.Time, bool, type builtinSubDateStringDecimalSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateStringDecimalSig) Clone() builtinFunc { - newSig := &builtinSubDateStringDecimalSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateStringDecimalSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4259,11 +4259,11 @@ func (b *builtinSubDateStringDecimalSig) evalTime(row chunk.Row) (types.Time, bo type builtinSubDateIntStringSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateIntStringSig) Clone() builtinFunc { - newSig := &builtinSubDateIntStringSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateIntStringSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4292,11 +4292,11 @@ func (b *builtinSubDateIntStringSig) evalTime(row chunk.Row) (types.Time, bool, type builtinSubDateIntIntSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateIntIntSig) Clone() builtinFunc { - newSig := &builtinSubDateIntIntSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateIntIntSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4325,11 +4325,11 @@ func (b *builtinSubDateIntIntSig) evalTime(row chunk.Row) (types.Time, bool, err type builtinSubDateIntRealSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateIntRealSig) Clone() builtinFunc { - newSig := &builtinSubDateIntRealSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateIntRealSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4358,16 +4358,16 @@ func (b *builtinSubDateIntRealSig) evalTime(row chunk.Row) (types.Time, bool, er type builtinSubDateDatetimeStringSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } type builtinSubDateIntDecimalSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateIntDecimalSig) Clone() builtinFunc { - newSig := &builtinSubDateIntDecimalSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateIntDecimalSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4395,7 +4395,7 @@ func (b *builtinSubDateIntDecimalSig) evalTime(row chunk.Row) (types.Time, bool, } func (b *builtinSubDateDatetimeStringSig) Clone() builtinFunc { - newSig := &builtinSubDateDatetimeStringSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateDatetimeStringSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4424,11 +4424,11 @@ func (b *builtinSubDateDatetimeStringSig) evalTime(row chunk.Row) (types.Time, b type builtinSubDateDatetimeIntSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateDatetimeIntSig) Clone() builtinFunc { - newSig := &builtinSubDateDatetimeIntSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateDatetimeIntSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4457,11 +4457,11 @@ func (b *builtinSubDateDatetimeIntSig) evalTime(row chunk.Row) (types.Time, bool type builtinSubDateDatetimeRealSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateDatetimeRealSig) Clone() builtinFunc { - newSig := &builtinSubDateDatetimeRealSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateDatetimeRealSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4490,11 +4490,11 @@ func (b *builtinSubDateDatetimeRealSig) evalTime(row chunk.Row) (types.Time, boo type builtinSubDateDatetimeDecimalSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateDatetimeDecimalSig) Clone() builtinFunc { - newSig := &builtinSubDateDatetimeDecimalSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateDatetimeDecimalSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4523,11 +4523,11 @@ func (b *builtinSubDateDatetimeDecimalSig) evalTime(row chunk.Row) (types.Time, type builtinSubDateDurationStringSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateDurationStringSig) Clone() builtinFunc { - newSig := &builtinSubDateDurationStringSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateDurationStringSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4554,11 +4554,11 @@ func (b *builtinSubDateDurationStringSig) evalDuration(row chunk.Row) (types.Dur type builtinSubDateDurationIntSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateDurationIntSig) Clone() builtinFunc { - newSig := &builtinSubDateDurationIntSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateDurationIntSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4585,11 +4585,11 @@ func (b *builtinSubDateDurationIntSig) evalDuration(row chunk.Row) (types.Durati type builtinSubDateDurationDecimalSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateDurationDecimalSig) Clone() builtinFunc { - newSig := &builtinSubDateDurationDecimalSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateDurationDecimalSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } @@ -4616,11 +4616,11 @@ func (b *builtinSubDateDurationDecimalSig) evalDuration(row chunk.Row) (types.Du type builtinSubDateDurationRealSig struct { baseBuiltinFunc - baseDateArithmitical + baseDateArithmetical } func (b *builtinSubDateDurationRealSig) Clone() builtinFunc { - newSig := &builtinSubDateDurationRealSig{baseDateArithmitical: b.baseDateArithmitical} + newSig := &builtinSubDateDurationRealSig{baseDateArithmetical: b.baseDateArithmetical} newSig.cloneFrom(&b.baseBuiltinFunc) return newSig } diff --git a/expression/builtin_time_test.go b/expression/builtin_time_test.go index 161912b07e973..e247e8756ae9a 100644 --- a/expression/builtin_time_test.go +++ b/expression/builtin_time_test.go @@ -2955,7 +2955,7 @@ func (s *testEvaluatorSuite) TestTiDBBoundedStaleness(c *C) { } func (s *testEvaluatorSuite) TestGetIntervalFromDecimal(c *C) { - du := baseDateArithmitical{} + du := baseDateArithmetical{} tests := []struct { param string From 66c8cd96b0eadd95cebe4bab37cd798422496fdd Mon Sep 17 00:00:00 2001 From: xufei Date: Tue, 18 May 2021 18:21:40 +0800 Subject: [PATCH 03/10] store/copr: balance region for batch cop task (#24521) --- store/copr/batch_coprocessor.go | 204 +++++++++++++++++-- store/copr/mpp.go | 14 +- store/{copr => tikv}/batch_request_sender.go | 54 ++--- store/tikv/region_cache.go | 101 +++++++++ 4 files changed, 321 insertions(+), 52 deletions(-) rename store/{copr => tikv}/batch_request_sender.go (54%) diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index b0c0ad5c9ea7b..8c73f58fbf892 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -16,6 +16,8 @@ package copr import ( "context" "io" + "math" + "strconv" "sync" "sync/atomic" "time" @@ -25,6 +27,7 @@ import ( "github.com/pingcap/kvproto/pkg/coprocessor" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/log" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/driver/backoff" derr "github.com/pingcap/tidb/store/driver/error" @@ -40,8 +43,9 @@ import ( type batchCopTask struct { storeAddr string cmdType tikvrpc.CmdType + ctx *tikv.RPCContext - copTasks []copTaskAndRPCContext + regionInfos []tikv.RegionInfo } type batchCopResponse struct { @@ -93,9 +97,152 @@ func (rs *batchCopResponse) RespTime() time.Duration { return rs.respTime } -type copTaskAndRPCContext struct { - task *copTask - ctx *tikv.RPCContext +// balanceBatchCopTask balance the regions between available stores, the basic rule is +// 1. the first region of each original batch cop task belongs to its original store because some +// meta data(like the rpc context) in batchCopTask is related to it +// 2. for the remaining regions: +// if there is only 1 available store, then put the region to the related store +// otherwise, use a greedy algorithm to put it into the store with highest weight +func balanceBatchCopTask(originalTasks []*batchCopTask) []*batchCopTask { + if len(originalTasks) <= 1 { + return originalTasks + } + storeTaskMap := make(map[uint64]*batchCopTask) + storeCandidateRegionMap := make(map[uint64]map[string]tikv.RegionInfo) + totalRegionCandidateNum := 0 + totalRemainingRegionNum := 0 + + for _, task := range originalTasks { + taskStoreID := task.regionInfos[0].AllStores[0] + batchTask := &batchCopTask{ + storeAddr: task.storeAddr, + cmdType: task.cmdType, + ctx: task.ctx, + regionInfos: []tikv.RegionInfo{task.regionInfos[0]}, + } + storeTaskMap[taskStoreID] = batchTask + } + + for _, task := range originalTasks { + taskStoreID := task.regionInfos[0].AllStores[0] + for index, ri := range task.regionInfos { + // for each region, figure out the valid store num + validStoreNum := 0 + if index == 0 { + continue + } + if len(ri.AllStores) <= 1 { + validStoreNum = 1 + } else { + for _, storeID := range ri.AllStores { + if _, ok := storeTaskMap[storeID]; ok { + validStoreNum++ + } + } + } + if validStoreNum == 1 { + // if only one store is valid, just put it to storeTaskMap + storeTaskMap[taskStoreID].regionInfos = append(storeTaskMap[taskStoreID].regionInfos, ri) + } else { + // if more than one store is valid, put the region + // to store candidate map + totalRegionCandidateNum += validStoreNum + totalRemainingRegionNum += 1 + taskKey := ri.Region.String() + for _, storeID := range ri.AllStores { + if _, validStore := storeTaskMap[storeID]; !validStore { + continue + } + if _, ok := storeCandidateRegionMap[storeID]; !ok { + candidateMap := make(map[string]tikv.RegionInfo) + storeCandidateRegionMap[storeID] = candidateMap + } + if _, duplicateRegion := storeCandidateRegionMap[storeID][taskKey]; duplicateRegion { + // duplicated region, should not happen, just give up balance + logutil.BgLogger().Warn("Meet duplicated region info during when trying to balance batch cop task, give up balancing") + return originalTasks + } + storeCandidateRegionMap[storeID][taskKey] = ri + } + } + } + } + if totalRemainingRegionNum == 0 { + return originalTasks + } + + avgStorePerRegion := float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) + findNextStore := func(candidateStores []uint64) uint64 { + store := uint64(math.MaxUint64) + weightedRegionNum := math.MaxFloat64 + if candidateStores != nil { + for _, storeID := range candidateStores { + if _, validStore := storeCandidateRegionMap[storeID]; !validStore { + continue + } + num := float64(len(storeCandidateRegionMap[storeID]))/avgStorePerRegion + float64(len(storeTaskMap[storeID].regionInfos)) + if num < weightedRegionNum { + store = storeID + weightedRegionNum = num + } + } + if store != uint64(math.MaxUint64) { + return store + } + } + for storeID := range storeTaskMap { + if _, validStore := storeCandidateRegionMap[storeID]; !validStore { + continue + } + num := float64(len(storeCandidateRegionMap[storeID]))/avgStorePerRegion + float64(len(storeTaskMap[storeID].regionInfos)) + if num < weightedRegionNum { + store = storeID + weightedRegionNum = num + } + } + return store + } + + store := findNextStore(nil) + for totalRemainingRegionNum > 0 { + if store == uint64(math.MaxUint64) { + break + } + var key string + var ri tikv.RegionInfo + for key, ri = range storeCandidateRegionMap[store] { + // get the first region + break + } + storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) + totalRemainingRegionNum-- + for _, id := range ri.AllStores { + if _, ok := storeCandidateRegionMap[id]; ok { + delete(storeCandidateRegionMap[id], key) + totalRegionCandidateNum-- + if len(storeCandidateRegionMap[id]) == 0 { + delete(storeCandidateRegionMap, id) + } + } + } + if totalRemainingRegionNum > 0 { + avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) + // it is not optimal because we only check the stores that affected by this region, in fact in order + // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think + // check only the affected stores is more simple and will get a good enough result + store = findNextStore(ri.AllStores) + } + } + if totalRemainingRegionNum > 0 { + logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") + return originalTasks + } + + var ret []*batchCopTask + for _, task := range storeTaskMap { + ret = append(ret, task) + } + return ret } func buildBatchCopTasks(bo *Backoffer, cache *tikv.RegionCache, ranges *tikv.KeyRanges, storeType kv.StoreType) ([]*batchCopTask, error) { @@ -138,13 +285,15 @@ func buildBatchCopTasks(bo *Backoffer, cache *tikv.RegionCache, ranges *tikv.Key // Then `splitRegion` will reloads these regions. continue } + allStores := cache.GetAllValidTiFlashStores(task.region, rpcCtx.Store) if batchCop, ok := storeTaskMap[rpcCtx.Addr]; ok { - batchCop.copTasks = append(batchCop.copTasks, copTaskAndRPCContext{task: task, ctx: rpcCtx}) + batchCop.regionInfos = append(batchCop.regionInfos, tikv.RegionInfo{Region: task.region, Meta: rpcCtx.Meta, Ranges: task.ranges, AllStores: allStores}) } else { batchTask := &batchCopTask{ - storeAddr: rpcCtx.Addr, - cmdType: cmdType, - copTasks: []copTaskAndRPCContext{{task, rpcCtx}}, + storeAddr: rpcCtx.Addr, + cmdType: cmdType, + ctx: rpcCtx, + regionInfos: []tikv.RegionInfo{{Region: task.region, Meta: rpcCtx.Meta, Ranges: task.ranges, AllStores: allStores}}, } storeTaskMap[rpcCtx.Addr] = batchTask } @@ -159,9 +308,25 @@ func buildBatchCopTasks(bo *Backoffer, cache *tikv.RegionCache, ranges *tikv.Key } continue } + for _, task := range storeTaskMap { batchTasks = append(batchTasks, task) } + if log.GetLevel() <= zap.DebugLevel { + msg := "Before region balance:" + for _, task := range batchTasks { + msg += " store " + task.storeAddr + ": " + strconv.Itoa(len(task.regionInfos)) + " regions," + } + logutil.BgLogger().Debug(msg) + } + batchTasks = balanceBatchCopTask(batchTasks) + if log.GetLevel() <= zap.DebugLevel { + msg := "After region balance:" + for _, task := range batchTasks { + msg += " store " + task.storeAddr + ": " + strconv.Itoa(len(task.regionInfos)) + " regions," + } + logutil.BgLogger().Debug(msg) + } if elapsed := time.Since(start); elapsed > time.Millisecond*500 { logutil.BgLogger().Warn("buildBatchCopTasks takes too much time", @@ -311,8 +476,8 @@ func (b *batchCopIterator) handleTask(ctx context.Context, bo *Backoffer, task * // Merge all ranges and request again. func (b *batchCopIterator) retryBatchCopTask(ctx context.Context, bo *Backoffer, batchTask *batchCopTask) ([]*batchCopTask, error) { var ranges []tikvstore.KeyRange - for _, taskCtx := range batchTask.copTasks { - taskCtx.task.ranges.Do(func(ran *tikvstore.KeyRange) { + for _, ri := range batchTask.regionInfos { + ri.Ranges.Do(func(ran *tikvstore.KeyRange) { ranges = append(ranges, *ran) }) } @@ -320,16 +485,16 @@ func (b *batchCopIterator) retryBatchCopTask(ctx context.Context, bo *Backoffer, } func (b *batchCopIterator) handleTaskOnce(ctx context.Context, bo *Backoffer, task *batchCopTask) ([]*batchCopTask, error) { - sender := NewRegionBatchRequestSender(b.store.GetRegionCache(), b.store.GetTiKVClient()) - var regionInfos = make([]*coprocessor.RegionInfo, 0, len(task.copTasks)) - for _, task := range task.copTasks { + sender := tikv.NewRegionBatchRequestSender(b.store.GetRegionCache(), b.store.GetTiKVClient()) + var regionInfos = make([]*coprocessor.RegionInfo, 0, len(task.regionInfos)) + for _, ri := range task.regionInfos { regionInfos = append(regionInfos, &coprocessor.RegionInfo{ - RegionId: task.task.region.GetID(), + RegionId: ri.Region.GetID(), RegionEpoch: &metapb.RegionEpoch{ - ConfVer: task.task.region.GetConfVer(), - Version: task.task.region.GetVer(), + ConfVer: ri.Region.GetConfVer(), + Version: ri.Region.GetVer(), }, - Ranges: task.task.ranges.ToPBRanges(), + Ranges: ri.Ranges.ToPBRanges(), }) } @@ -351,13 +516,14 @@ func (b *batchCopIterator) handleTaskOnce(ctx context.Context, bo *Backoffer, ta }) req.StoreTp = tikvrpc.TiFlash - logutil.BgLogger().Debug("send batch request to ", zap.String("req info", req.String()), zap.Int("cop task len", len(task.copTasks))) - resp, retry, cancel, err := sender.sendStreamReqToAddr(bo, task.copTasks, req, tikv.ReadTimeoutUltraLong) + logutil.BgLogger().Debug("send batch request to ", zap.String("req info", req.String()), zap.Int("cop task len", len(task.regionInfos))) + resp, retry, cancel, err := sender.SendReqToAddr(bo.TiKVBackoffer(), task.ctx, task.regionInfos, req, tikv.ReadTimeoutUltraLong) // If there are store errors, we should retry for all regions. if retry { return b.retryBatchCopTask(ctx, bo, task) } if err != nil { + err = derr.ToTiDBErr(err) return nil, errors.Trace(err) } defer cancel() diff --git a/store/copr/mpp.go b/store/copr/mpp.go index 2aaf4223ed8e5..1941f2b3fbfa4 100644 --- a/store/copr/mpp.go +++ b/store/copr/mpp.go @@ -180,14 +180,14 @@ func (m *mppIterator) handleDispatchReq(ctx context.Context, bo *Backoffer, req var regionInfos []*coprocessor.RegionInfo originalTask, ok := req.Meta.(*batchCopTask) if ok { - for _, task := range originalTask.copTasks { + for _, ri := range originalTask.regionInfos { regionInfos = append(regionInfos, &coprocessor.RegionInfo{ - RegionId: task.task.region.GetID(), + RegionId: ri.Region.GetID(), RegionEpoch: &metapb.RegionEpoch{ - ConfVer: task.task.region.GetConfVer(), - Version: task.task.region.GetVer(), + ConfVer: ri.Region.GetConfVer(), + Version: ri.Region.GetVer(), }, - Ranges: task.task.ranges.ToPBRanges(), + Ranges: ri.Ranges.ToPBRanges(), }) } } @@ -214,8 +214,8 @@ func (m *mppIterator) handleDispatchReq(ctx context.Context, bo *Backoffer, req // Or else it's the task without region, which always happens in high layer task without table. // In that case if originalTask != nil { - sender := NewRegionBatchRequestSender(m.store.GetRegionCache(), m.store.GetTiKVClient()) - rpcResp, _, _, err = sender.sendStreamReqToAddr(bo, originalTask.copTasks, wrappedReq, tikv.ReadTimeoutMedium) + sender := tikv.NewRegionBatchRequestSender(m.store.GetRegionCache(), m.store.GetTiKVClient()) + rpcResp, _, _, err = sender.SendReqToAddr(bo.TiKVBackoffer(), originalTask.ctx, originalTask.regionInfos, wrappedReq, tikv.ReadTimeoutMedium) // No matter what the rpc error is, we won't retry the mpp dispatch tasks. // TODO: If we want to retry, we must redo the plan fragment cutting and task scheduling. // That's a hard job but we can try it in the future. diff --git a/store/copr/batch_request_sender.go b/store/tikv/batch_request_sender.go similarity index 54% rename from store/copr/batch_request_sender.go rename to store/tikv/batch_request_sender.go index 422306382337d..9aad070b70306 100644 --- a/store/copr/batch_request_sender.go +++ b/store/tikv/batch_request_sender.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package copr +package tikv import ( "context" @@ -19,45 +19,52 @@ import ( "time" "github.com/pingcap/errors" - "github.com/pingcap/tidb/store/tikv" + "github.com/pingcap/kvproto/pkg/metapb" tikverr "github.com/pingcap/tidb/store/tikv/error" "github.com/pingcap/tidb/store/tikv/tikvrpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) +// RegionInfo contains region related information for batchCopTask +type RegionInfo struct { + Region RegionVerID + Meta *metapb.Region + Ranges *KeyRanges + AllStores []uint64 +} + // RegionBatchRequestSender sends BatchCop requests to TiFlash server by stream way. type RegionBatchRequestSender struct { - *tikv.RegionRequestSender + *RegionRequestSender } // NewRegionBatchRequestSender creates a RegionBatchRequestSender object. -func NewRegionBatchRequestSender(cache *tikv.RegionCache, client tikv.Client) *RegionBatchRequestSender { +func NewRegionBatchRequestSender(cache *RegionCache, client Client) *RegionBatchRequestSender { return &RegionBatchRequestSender{ - RegionRequestSender: tikv.NewRegionRequestSender(cache, client), + RegionRequestSender: NewRegionRequestSender(cache, client), } } -func (ss *RegionBatchRequestSender) sendStreamReqToAddr(bo *Backoffer, ctxs []copTaskAndRPCContext, req *tikvrpc.Request, timout time.Duration) (resp *tikvrpc.Response, retry bool, cancel func(), err error) { - // use the first ctx to send request, because every ctx has same address. +// SendReqToAddr send batch cop request +func (ss *RegionBatchRequestSender) SendReqToAddr(bo *Backoffer, rpcCtx *RPCContext, regionInfos []RegionInfo, req *tikvrpc.Request, timout time.Duration) (resp *tikvrpc.Response, retry bool, cancel func(), err error) { cancel = func() {} - rpcCtx := ctxs[0].ctx if e := tikvrpc.SetContext(req, rpcCtx.Meta, rpcCtx.Peer); e != nil { return nil, false, cancel, errors.Trace(e) } ctx := bo.GetCtx() - if rawHook := ctx.Value(tikv.RPCCancellerCtxKey{}); rawHook != nil { - ctx, cancel = rawHook.(*tikv.RPCCanceller).WithCancel(ctx) + if rawHook := ctx.Value(RPCCancellerCtxKey{}); rawHook != nil { + ctx, cancel = rawHook.(*RPCCanceller).WithCancel(ctx) } start := time.Now() resp, err = ss.GetClient().SendRequest(ctx, rpcCtx.Addr, req, timout) if ss.Stats != nil { - tikv.RecordRegionRequestRuntimeStats(ss.Stats, req.Type, time.Since(start)) + RecordRegionRequestRuntimeStats(ss.Stats, req.Type, time.Since(start)) } if err != nil { cancel() ss.SetRPCError(err) - e := ss.onSendFail(bo, ctxs, err) + e := ss.onSendFailForBatchRegions(bo, rpcCtx, regionInfos, err) if e != nil { return nil, false, func() {}, errors.Trace(e) } @@ -67,30 +74,25 @@ func (ss *RegionBatchRequestSender) sendStreamReqToAddr(bo *Backoffer, ctxs []co return } -func (ss *RegionBatchRequestSender) onSendFail(bo *Backoffer, ctxs []copTaskAndRPCContext, err error) error { +func (ss *RegionBatchRequestSender) onSendFailForBatchRegions(bo *Backoffer, ctx *RPCContext, regionInfos []RegionInfo, err error) error { // If it failed because the context is cancelled by ourself, don't retry. if errors.Cause(err) == context.Canceled || status.Code(errors.Cause(err)) == codes.Canceled { return errors.Trace(err) - } else if atomic.LoadUint32(&tikv.ShuttingDown) > 0 { + } else if atomic.LoadUint32(&ShuttingDown) > 0 { return tikverr.ErrTiDBShuttingDown } - for _, failedCtx := range ctxs { - ctx := failedCtx.ctx - if ctx.Meta != nil { - // The reload region param is always true. Because that every time we try, we must - // re-build the range then re-create the batch sender. As a result, the len of "failStores" - // will change. If tiflash's replica is more than two, the "reload region" will always be false. - // Now that the batch cop and mpp has a relative low qps, it's reasonable to reload every time - // when meeting io error. - ss.GetRegionCache().OnSendFail(bo.TiKVBackoffer(), ctx, true, err) - } - } + // The reload region param is always true. Because that every time we try, we must + // re-build the range then re-create the batch sender. As a result, the len of "failStores" + // will change. If tiflash's replica is more than two, the "reload region" will always be false. + // Now that the batch cop and mpp has a relative low qps, it's reasonable to reload every time + // when meeting io error. + ss.GetRegionCache().OnSendFailForBatchRegions(bo, ctx.Store, regionInfos, true, err) // Retry on send request failure when it's not canceled. // When a store is not available, the leader of related region should be elected quickly. // TODO: the number of retry time should be limited:since region may be unavailable // when some unrecoverable disaster happened. - err = bo.Backoff(tikv.BoTiFlashRPC, errors.Errorf("send tikv request error: %v, ctxs: %v, try next peer later", err, ctxs)) + err = bo.Backoff(BoTiFlashRPC, errors.Errorf("send request error: %v, ctx: %v, regionInfos: %v", err, ctx, regionInfos)) return errors.Trace(err) } diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index f6225a2724f8e..0d9423a9f5a7e 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -112,6 +112,15 @@ func (r *RegionStore) accessStore(mode AccessMode, idx AccessIndex) (int, *Store return sidx, r.stores[sidx] } +func (r *RegionStore) getAccessIndex(mode AccessMode, store *Store) AccessIndex { + for index, sidx := range r.accessIndex[mode] { + if r.stores[sidx].storeID == store.storeID { + return AccessIndex(index) + } + } + return -1 +} + func (r *RegionStore) accessStoreNum(mode AccessMode) int { return len(r.accessIndex[mode]) } @@ -526,6 +535,40 @@ func (c *RegionCache) GetTiKVRPCContext(bo *Backoffer, id RegionVerID, replicaRe }, nil } +// GetAllValidTiFlashStores returns the store ids of all valid TiFlash stores, the store id of currentStore is always the first one +func (c *RegionCache) GetAllValidTiFlashStores(id RegionVerID, currentStore *Store) []uint64 { + // set the cap to 2 because usually, TiFlash table will have 2 replicas + allStores := make([]uint64, 0, 2) + // make sure currentStore id is always the first in allStores + allStores = append(allStores, currentStore.storeID) + ts := time.Now().Unix() + cachedRegion := c.getCachedRegionWithRLock(id) + if cachedRegion == nil { + return allStores + } + if !cachedRegion.checkRegionCacheTTL(ts) { + return allStores + } + regionStore := cachedRegion.getStore() + currentIndex := regionStore.getAccessIndex(TiFlashOnly, currentStore) + if currentIndex == -1 { + return allStores + } + for startOffset := 1; startOffset < regionStore.accessStoreNum(TiFlashOnly); startOffset++ { + accessIdx := AccessIndex((int(currentIndex) + startOffset) % regionStore.accessStoreNum(TiFlashOnly)) + storeIdx, store := regionStore.accessStore(TiFlashOnly, accessIdx) + if store.getResolveState() == needCheck { + continue + } + storeFailEpoch := atomic.LoadUint32(&store.epoch) + if storeFailEpoch != regionStore.storeEpochs[storeIdx] { + continue + } + allStores = append(allStores, store.storeID) + } + return allStores +} + // GetTiFlashRPCContext returns RPCContext for a region must access flash store. If it returns nil, the region // must be out of date and already dropped from cache or not flash store found. // `loadBalance` is an option. For MPP and batch cop, it is pointless and might cause try the failed store repeatly. @@ -668,6 +711,64 @@ func (c *RegionCache) findRegionByKey(bo *Backoffer, key []byte, isEndKey bool) return r, nil } +// OnSendFailForBatchRegions handles send request fail logic. +func (c *RegionCache) OnSendFailForBatchRegions(bo *Backoffer, store *Store, regionInfos []RegionInfo, scheduleReload bool, err error) { + metrics.RegionCacheCounterWithSendFail.Add(float64(len(regionInfos))) + if store.storeType != tikvrpc.TiFlash { + logutil.Logger(bo.GetCtx()).Info("Should not reach here, OnSendFailForBatchRegions only support TiFlash") + return + } + for _, ri := range regionInfos { + if ri.Meta == nil { + continue + } + r := c.getCachedRegionWithRLock(ri.Region) + if r != nil { + peersNum := len(r.meta.Peers) + if len(ri.Meta.Peers) != peersNum { + logutil.Logger(bo.GetCtx()).Info("retry and refresh current region after send request fail and up/down stores length changed", + zap.Stringer("region", &ri.Region), + zap.Bool("needReload", scheduleReload), + zap.Reflect("oldPeers", ri.Meta.Peers), + zap.Reflect("newPeers", r.meta.Peers), + zap.Error(err)) + continue + } + + rs := r.getStore() + + accessMode := TiFlashOnly + accessIdx := rs.getAccessIndex(accessMode, store) + if accessIdx == -1 { + logutil.Logger(bo.GetCtx()).Warn("can not get access index for region " + ri.Region.String()) + continue + } + if err != nil { + storeIdx, s := rs.accessStore(accessMode, accessIdx) + epoch := rs.storeEpochs[storeIdx] + if atomic.CompareAndSwapUint32(&s.epoch, epoch, epoch+1) { + logutil.BgLogger().Info("mark store's regions need be refill", zap.String("store", s.addr)) + metrics.RegionCacheCounterWithInvalidateStoreRegionsOK.Inc() + } + // schedule a store addr resolve. + s.markNeedCheck(c.notifyCheckCh) + } + + // try next peer + rs.switchNextFlashPeer(r, accessIdx) + logutil.Logger(bo.GetCtx()).Info("switch region tiflash peer to next due to send request fail", + zap.Stringer("region", &ri.Region), + zap.Bool("needReload", scheduleReload), + zap.Error(err)) + + // force reload region when retry all known peers in region. + if scheduleReload { + r.scheduleReload() + } + } + } +} + // OnSendFail handles send request fail logic. func (c *RegionCache) OnSendFail(bo *Backoffer, ctx *RPCContext, scheduleReload bool, err error) { metrics.RegionCacheCounterWithSendFail.Inc() From fbbf2b49c56cf42bc8a6fbdb2cc2a1eaa0f65e54 Mon Sep 17 00:00:00 2001 From: Song Gao Date: Tue, 18 May 2021 19:19:40 +0800 Subject: [PATCH 04/10] store, metrics: Add metrics for safetTS updating (#24687) --- store/tikv/kv.go | 6 +++++- store/tikv/metrics/metrics.go | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/store/tikv/kv.go b/store/tikv/kv.go index 8cec3dfbca964..edaef3b4744d7 100644 --- a/store/tikv/kv.go +++ b/store/tikv/kv.go @@ -18,6 +18,7 @@ import ( "crypto/tls" "math" "math/rand" + "strconv" "sync" "sync/atomic" "time" @@ -436,17 +437,20 @@ func (s *KVStore) updateSafeTS(ctx context.Context) { storeAddr := store.addr go func(ctx context.Context, wg *sync.WaitGroup, storeID uint64, storeAddr string) { defer wg.Done() - // TODO: add metrics for updateSafeTS resp, err := tikvClient.SendRequest(ctx, storeAddr, tikvrpc.NewRequest(tikvrpc.CmdStoreSafeTS, &kvrpcpb.StoreSafeTSRequest{KeyRange: &kvrpcpb.KeyRange{ StartKey: []byte(""), EndKey: []byte(""), }}), ReadTimeoutShort) + storeIDStr := strconv.Itoa(int(storeID)) if err != nil { + metrics.TiKVSafeTSUpdateCounter.WithLabelValues("fail", storeIDStr).Inc() logutil.BgLogger().Debug("update safeTS failed", zap.Error(err), zap.Uint64("store-id", storeID)) return } safeTSResp := resp.Resp.(*kvrpcpb.StoreSafeTSResponse) s.setSafeTS(storeID, safeTSResp.GetSafeTs()) + metrics.TiKVSafeTSUpdateCounter.WithLabelValues("success", storeIDStr).Inc() + metrics.TiKVSafeTSUpdateStats.WithLabelValues(storeIDStr).Set(float64(safeTSResp.GetSafeTs())) }(ctx, wg, storeID, storeAddr) } wg.Wait() diff --git a/store/tikv/metrics/metrics.go b/store/tikv/metrics/metrics.go index 8d71582fa2522..6b8ea32d456f7 100644 --- a/store/tikv/metrics/metrics.go +++ b/store/tikv/metrics/metrics.go @@ -59,6 +59,8 @@ var ( TiKVPanicCounter *prometheus.CounterVec TiKVForwardRequestCounter *prometheus.CounterVec TiKVTSFutureWaitDuration prometheus.Histogram + TiKVSafeTSUpdateCounter *prometheus.CounterVec + TiKVSafeTSUpdateStats *prometheus.GaugeVec ) // Label constants. @@ -414,6 +416,22 @@ func initMetrics(namespace, subsystem string) { Buckets: prometheus.ExponentialBuckets(0.000005, 2, 30), // 5us ~ 2560s }) + TiKVSafeTSUpdateCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "safets_update_counter", + Help: "Counter of tikv safe_ts being updated.", + }, []string{LblResult, LblStore}) + + TiKVSafeTSUpdateStats = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "safets_update_stats", + Help: "stat of tikv updating safe_ts stats", + }, []string{LblStore}) + initShortcuts() } @@ -468,6 +486,8 @@ func RegisterMetrics() { prometheus.MustRegister(TiKVPanicCounter) prometheus.MustRegister(TiKVForwardRequestCounter) prometheus.MustRegister(TiKVTSFutureWaitDuration) + prometheus.MustRegister(TiKVSafeTSUpdateCounter) + prometheus.MustRegister(TiKVSafeTSUpdateStats) } // readCounter reads the value of a prometheus.Counter. From b515e14d3e62d41a5de87eb176c3cc3ffb146b4c Mon Sep 17 00:00:00 2001 From: Morgan Tocker Date: Tue, 18 May 2021 06:13:40 -0600 Subject: [PATCH 05/10] sem: add tidbredact log to restricted variables (#24701) --- util/sem/sem.go | 6 ++++-- util/sem/sem_test.go | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/util/sem/sem.go b/util/sem/sem.go index d29d29b601559..1aac6d0a9a999 100644 --- a/util/sem/sem.go +++ b/util/sem/sem.go @@ -138,6 +138,7 @@ func IsInvisibleSysVar(varNameInLower string) bool { variable.TiDBCheckMb4ValueInUTF8, variable.TiDBConfig, variable.TiDBEnableSlowLog, + variable.TiDBEnableTelemetry, variable.TiDBExpensiveQueryTimeThreshold, variable.TiDBForcePriority, variable.TiDBGeneralLog, @@ -146,12 +147,13 @@ func IsInvisibleSysVar(varNameInLower string) bool { variable.TiDBOptWriteRowID, variable.TiDBPProfSQLCPU, variable.TiDBRecordPlanInSlowLog, + variable.TiDBRowFormatVersion, variable.TiDBSlowQueryFile, variable.TiDBSlowLogThreshold, variable.TiDBEnableCollectExecutionInfo, variable.TiDBMemoryUsageAlarmRatio, - variable.TiDBEnableTelemetry, - variable.TiDBRowFormatVersion: + variable.TiDBRedactLog, + variable.TiDBSlowLogMasking: return true } return false diff --git a/util/sem/sem_test.go b/util/sem/sem_test.go index 073a195139c37..c2a54170dcf99 100644 --- a/util/sem/sem_test.go +++ b/util/sem/sem_test.go @@ -98,4 +98,6 @@ func (s *testSecurity) TestIsInvisibleSysVar(c *C) { c.Assert(IsInvisibleSysVar(variable.TiDBMemoryUsageAlarmRatio), IsTrue) c.Assert(IsInvisibleSysVar(variable.TiDBEnableTelemetry), IsTrue) c.Assert(IsInvisibleSysVar(variable.TiDBRowFormatVersion), IsTrue) + c.Assert(IsInvisibleSysVar(variable.TiDBRedactLog), IsTrue) + c.Assert(IsInvisibleSysVar(variable.TiDBSlowLogMasking), IsTrue) } From 44830b917465c5d46cd22f617a3e260f09f0b857 Mon Sep 17 00:00:00 2001 From: wjHuang Date: Wed, 19 May 2021 00:25:40 +0800 Subject: [PATCH 06/10] session: fix dml_batch_size doesn't load the global variable (#24710) --- session/session.go | 1 + session/session_test.go | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/session/session.go b/session/session.go index 78a60a6ebaecf..efd6706c4ffb3 100644 --- a/session/session.go +++ b/session/session.go @@ -2652,6 +2652,7 @@ var builtinGlobalVariable = []string{ variable.TiDBAllowFallbackToTiKV, variable.TiDBEnableDynamicPrivileges, variable.CTEMaxRecursionDepth, + variable.TiDBDMLBatchSize, } // loadCommonGlobalVariablesIfNeeded loads and applies commonly used global variables for the session. diff --git a/session/session_test.go b/session/session_test.go index f7267e3a13259..df2a167921e56 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -4476,3 +4476,13 @@ func (s *testTxnStateSuite) TestRollbacking(c *C) { c.Assert(tk.Se.TxnInfo().State, Equals, txninfo.TxnRollingBack) <-ch } + +func (s *testSessionSuite) TestReadDMLBatchSize(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("set global tidb_dml_batch_size=1000") + se, err := session.CreateSession(s.store) + c.Assert(err, IsNil) + // `select 1` to load the global variables. + _, _ = se.Execute(context.TODO(), "select 1") + c.Assert(se.GetSessionVars().DMLBatchSize, Equals, 1000) +} From 3ec8c8bae798642baf267601a5864238d3367b9e Mon Sep 17 00:00:00 2001 From: Lei Zhao Date: Wed, 19 May 2021 07:39:40 +0800 Subject: [PATCH 07/10] store/tikv: retry TSO RPC (#24682) --- store/tikv/2pc.go | 14 +++++--------- store/tikv/lock_resolver.go | 9 ++------- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go index 19f3e4faf40e3..14609f5f77400 100644 --- a/store/tikv/2pc.go +++ b/store/tikv/2pc.go @@ -739,15 +739,11 @@ func (tm *ttlManager) keepAlive(c *twoPhaseCommitter) { return } bo := retry.NewBackofferWithVars(context.Background(), pessimisticLockMaxBackoff, c.txn.vars) - now, err := c.store.GetOracle().GetTimestamp(bo.GetCtx(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) + now, err := c.store.getTimestampWithRetry(bo, c.txn.GetScope()) if err != nil { - err1 := bo.Backoff(retry.BoPDRPC, err) - if err1 != nil { - logutil.Logger(bo.GetCtx()).Warn("keepAlive get tso fail", - zap.Error(err)) - return - } - continue + logutil.Logger(bo.GetCtx()).Warn("keepAlive get tso fail", + zap.Error(err)) + return } uptime := uint64(oracle.ExtractPhysical(now) - oracle.ExtractPhysical(c.startTS)) @@ -999,7 +995,7 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) { // from PD and plus one as our MinCommitTS. if commitTSMayBeCalculated && c.needLinearizability() { failpoint.Inject("getMinCommitTSFromTSO", nil) - latestTS, err := c.store.oracle.GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) + latestTS, err := c.store.getTimestampWithRetry(retry.NewBackofferWithVars(ctx, tsoMaxBackoff, c.txn.vars), c.txn.GetScope()) // If we fail to get a timestamp from PD, we just propagate the failure // instead of falling back to the normal 2PC because a normal 2PC will // also be likely to fail due to the same timestamp issue. diff --git a/store/tikv/lock_resolver.go b/store/tikv/lock_resolver.go index fe50910a896e6..0ed9ecb3fa471 100644 --- a/store/tikv/lock_resolver.go +++ b/store/tikv/lock_resolver.go @@ -229,11 +229,6 @@ func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc Regi // locks have been cleaned before GC. expiredLocks := locks - callerStartTS, err := lr.store.GetOracle().GetTimestamp(bo.GetCtx(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - if err != nil { - return false, errors.Trace(err) - } - txnInfos := make(map[uint64]uint64) startTime := time.Now() for _, l := range expiredLocks { @@ -243,7 +238,7 @@ func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc Regi metrics.LockResolverCountWithExpired.Inc() // Use currentTS = math.MaxUint64 means rollback the txn, no matter the lock is expired or not! - status, err := lr.getTxnStatus(bo, l.TxnID, l.Primary, callerStartTS, math.MaxUint64, true, false, l) + status, err := lr.getTxnStatus(bo, l.TxnID, l.Primary, 0, math.MaxUint64, true, false, l) if err != nil { return false, err } @@ -257,7 +252,7 @@ func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc Regi continue } if _, ok := errors.Cause(err).(*nonAsyncCommitLock); ok { - status, err = lr.getTxnStatus(bo, l.TxnID, l.Primary, callerStartTS, math.MaxUint64, true, true, l) + status, err = lr.getTxnStatus(bo, l.TxnID, l.Primary, 0, math.MaxUint64, true, true, l) if err != nil { return false, err } From ac7e6a42c50367b55865a587fc4af08f2758f6ae Mon Sep 17 00:00:00 2001 From: Shenghui Wu <793703860@qq.com> Date: Wed, 19 May 2021 07:51:40 +0800 Subject: [PATCH 08/10] expression, planner: push cast down to control function with enum type. (#24542) --- expression/builtin_cast.go | 90 ++++++++++++++++++++++++++++ expression/integration_test.go | 73 ++++++++++++++++++++++ planner/core/logical_plan_builder.go | 13 ++++ 3 files changed, 176 insertions(+) diff --git a/expression/builtin_cast.go b/expression/builtin_cast.go index 0f2d5827c91d9..9d6becab44cbe 100644 --- a/expression/builtin_cast.go +++ b/expression/builtin_cast.go @@ -1804,6 +1804,7 @@ func BuildCastFunction4Union(ctx sessionctx.Context, expr Expression, tp *types. // BuildCastFunction builds a CAST ScalarFunction from the Expression. func BuildCastFunction(ctx sessionctx.Context, expr Expression, tp *types.FieldType) (res Expression) { + expr = TryPushCastIntoControlFunctionForHybridType(ctx, expr, tp) var fc functionClass switch tp.EvalType() { case types.ETInt: @@ -1983,3 +1984,92 @@ func WrapWithCastAsJSON(ctx sessionctx.Context, expr Expression) Expression { } return BuildCastFunction(ctx, expr, tp) } + +// TryPushCastIntoControlFunctionForHybridType try to push cast into control function for Hybrid Type. +// If necessary, it will rebuild control function using changed args. +// When a hybrid type is the output of a control function, the result may be as a numeric type to subsequent calculation +// We should perform the `Cast` operation early to avoid using the wrong type for calculation +// For example, the condition `if(1, e, 'a') = 1`, `if` function will output `e` and compare with `1`. +// If the evaltype is ETString, it will get wrong result. So we can rewrite the condition to +// `IfInt(1, cast(e as int), cast('a' as int)) = 1` to get the correct result. +func TryPushCastIntoControlFunctionForHybridType(ctx sessionctx.Context, expr Expression, tp *types.FieldType) (res Expression) { + sf, ok := expr.(*ScalarFunction) + if !ok { + return expr + } + + var wrapCastFunc func(ctx sessionctx.Context, expr Expression) Expression + switch tp.EvalType() { + case types.ETInt: + wrapCastFunc = WrapWithCastAsInt + case types.ETReal: + wrapCastFunc = WrapWithCastAsReal + default: + return expr + } + + isHybrid := func(ft *types.FieldType) bool { + // todo: compatible with mysql control function using bit type. issue 24725 + return ft.Hybrid() && ft.Tp != mysql.TypeBit + } + + args := sf.GetArgs() + switch sf.FuncName.L { + case ast.If: + if isHybrid(args[1].GetType()) || isHybrid(args[2].GetType()) { + args[1] = wrapCastFunc(ctx, args[1]) + args[2] = wrapCastFunc(ctx, args[2]) + f, err := funcs[ast.If].getFunction(ctx, args) + if err != nil { + return expr + } + sf.RetType, sf.Function = f.getRetTp(), f + return sf + } + case ast.Case: + hasHybrid := false + for i := 0; i < len(args)-1; i += 2 { + hasHybrid = hasHybrid || isHybrid(args[i+1].GetType()) + } + if len(args)%2 == 1 { + hasHybrid = hasHybrid || isHybrid(args[len(args)-1].GetType()) + } + if !hasHybrid { + return expr + } + + for i := 0; i < len(args)-1; i += 2 { + args[i+1] = wrapCastFunc(ctx, args[i+1]) + } + if len(args)%2 == 1 { + args[len(args)-1] = wrapCastFunc(ctx, args[len(args)-1]) + } + f, err := funcs[ast.Case].getFunction(ctx, args) + if err != nil { + return expr + } + sf.RetType, sf.Function = f.getRetTp(), f + return sf + case ast.Elt: + hasHybrid := false + for i := 1; i < len(args); i++ { + hasHybrid = hasHybrid || isHybrid(args[i].GetType()) + } + if !hasHybrid { + return expr + } + + for i := 1; i < len(args); i++ { + args[i] = wrapCastFunc(ctx, args[i]) + } + f, err := funcs[ast.Elt].getFunction(ctx, args) + if err != nil { + return expr + } + sf.RetType, sf.Function = f.getRetTp(), f + return sf + default: + return expr + } + return expr +} diff --git a/expression/integration_test.go b/expression/integration_test.go index 69142c01c3f35..a0fb6fd8b5499 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -9435,3 +9435,76 @@ func (s *testIntegrationSuite) TestEnumIndex(c *C) { tk.MustQuery("select /*+ use_index(t,idx) */ col3 from t where col2 = 'b' and col1 is not null;").Check( testkit.Rows("2")) } + +func (s *testIntegrationSuite) TestControlFunctionWithEnumOrSet(c *C) { + defer s.cleanEnv(c) + + // issue 23114 + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists e;") + tk.MustExec("create table e(e enum('c', 'b', 'a'));") + tk.MustExec("insert into e values ('a'),('b'),('a'),('b');") + tk.MustQuery("select e from e where if(e>1, e, e);").Sort().Check( + testkit.Rows("a", "a", "b", "b")) + tk.MustQuery("select e from e where case e when 1 then e else e end;").Sort().Check( + testkit.Rows("a", "a", "b", "b")) + tk.MustQuery("select e from e where case 1 when e then e end;").Check(testkit.Rows()) + + tk.MustQuery("select if(e>1,e,e)='a' from e").Sort().Check( + testkit.Rows("0", "0", "1", "1")) + tk.MustQuery("select if(e>1,e,e)=1 from e").Sort().Check( + testkit.Rows("0", "0", "0", "0")) + // if and if + tk.MustQuery("select if(e>2,e,e) and if(e<=2,e,e) from e;").Sort().Check( + testkit.Rows("1", "1", "1", "1")) + tk.MustQuery("select if(e>2,e,e) and (if(e<3,0,e) or if(e>=2,0,e)) from e;").Sort().Check( + testkit.Rows("0", "0", "1", "1")) + tk.MustQuery("select * from e where if(e>2,e,e) and if(e<=2,e,e);").Sort().Check( + testkit.Rows("a", "a", "b", "b")) + tk.MustQuery("select * from e where if(e>2,e,e) and (if(e<3,0,e) or if(e>=2,0,e));").Sort().Check( + testkit.Rows("a", "a")) + + // issue 24494 + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int,b enum(\"b\",\"y\",\"1\"));") + tk.MustExec("insert into t values(0,\"y\"),(1,\"b\"),(null,null),(2,\"1\");") + tk.MustQuery("SELECT count(*) FROM t where if(a,b ,null);").Check(testkit.Rows("2")) + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int,b enum(\"b\"),c enum(\"c\"));") + tk.MustExec("insert into t values(1,1,1),(2,1,1),(1,1,1),(2,1,1);") + tk.MustQuery("select a from t where if(a=1,b,c)=\"b\";").Check(testkit.Rows("1", "1")) + tk.MustQuery("select a from t where if(a=1,b,c)=\"c\";").Check(testkit.Rows("2", "2")) + tk.MustQuery("select a from t where if(a=1,b,c)=1;").Sort().Check(testkit.Rows("1", "1", "2", "2")) + tk.MustQuery("select a from t where if(a=1,b,c);").Sort().Check(testkit.Rows("1", "1", "2", "2")) + + tk.MustExec("drop table if exists e;") + tk.MustExec("create table e(e enum('c', 'b', 'a'));") + tk.MustExec("insert into e values(3)") + tk.MustQuery("select elt(1,e) = 'a' from e").Check(testkit.Rows("1")) + tk.MustQuery("select elt(1,e) = 3 from e").Check(testkit.Rows("1")) + tk.MustQuery("select e from e where elt(1,e)").Check(testkit.Rows("a")) + + // test set type + tk.MustExec("drop table if exists s;") + tk.MustExec("create table s(s set('c', 'b', 'a'));") + tk.MustExec("insert into s values ('a'),('b'),('a'),('b');") + tk.MustQuery("select s from s where if(s>1, s, s);").Sort().Check( + testkit.Rows("a", "a", "b", "b")) + tk.MustQuery("select s from s where case s when 1 then s else s end;").Sort().Check( + testkit.Rows("a", "a", "b", "b")) + tk.MustQuery("select s from s where case 1 when s then s end;").Check(testkit.Rows()) + + tk.MustQuery("select if(s>1,s,s)='a' from s").Sort().Check( + testkit.Rows("0", "0", "1", "1")) + tk.MustQuery("select if(s>1,s,s)=4 from s").Sort().Check( + testkit.Rows("0", "0", "1", "1")) + + tk.MustExec("drop table if exists s;") + tk.MustExec("create table s(s set('c', 'b', 'a'));") + tk.MustExec("insert into s values('a')") + tk.MustQuery("select elt(1,s) = 'a' from s").Check(testkit.Rows("1")) + tk.MustQuery("select elt(1,s) = 4 from s").Check(testkit.Rows("1")) + tk.MustQuery("select s from s where elt(1,s)").Check(testkit.Rows("a")) +} diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 64bc0c41407e1..fc6fb53dcff44 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -975,6 +975,19 @@ func (b *PlanBuilder) buildSelection(ctx context.Context, p LogicalPlan, where a if len(cnfExpres) == 0 { return p, nil } + // check expr field types. + for i, expr := range cnfExpres { + if expr.GetType().EvalType() == types.ETString { + tp := &types.FieldType{ + Tp: mysql.TypeDouble, + Flag: expr.GetType().Flag, + Flen: mysql.MaxRealWidth, + Decimal: types.UnspecifiedLength, + } + types.SetBinChsClnFlag(tp) + cnfExpres[i] = expression.TryPushCastIntoControlFunctionForHybridType(b.ctx, expr, tp) + } + } selection.Conditions = cnfExpres selection.SetChildren(p) return selection, nil From f5a362d229fd1032231f05432fd93dd0a2b34d36 Mon Sep 17 00:00:00 2001 From: ZhuoZhi <517770911@qq.com> Date: Wed, 19 May 2021 08:03:40 +0800 Subject: [PATCH 09/10] executor: add correctness tests about IndexMerge (#24674) --- executor/partition_table_test.go | 95 ++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index 8337be660ec0c..529a1da343387 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -1328,6 +1328,101 @@ func (s *partitionTableSuite) TestDirectReadingWithAgg(c *C) { } } +func (s *partitionTableSuite) TestIdexMerge(c *C) { + if israce.RaceEnabled { + c.Skip("exhaustive types test, skip race test") + } + + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create database test_idx_merge") + tk.MustExec("use test_idx_merge") + tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") + + // list partition table + tk.MustExec(`create table tlist(a int, b int, primary key(a) clustered, index idx_b(b)) partition by list(a)( + partition p0 values in (1, 2, 3, 4), + partition p1 values in (5, 6, 7, 8), + partition p2 values in (9, 10, 11, 12));`) + + // range partition table + tk.MustExec(`create table trange(a int, b int, primary key(a) clustered, index idx_b(b)) partition by range(a) ( + partition p0 values less than(300), + partition p1 values less than (500), + partition p2 values less than(1100));`) + + // hash partition table + tk.MustExec(`create table thash(a int, b int, primary key(a) clustered, index idx_b(b)) partition by hash(a) partitions 4;`) + + // regular table + tk.MustExec("create table tregular1(a int, b int, primary key(a) clustered)") + tk.MustExec("create table tregular2(a int, b int, primary key(a) clustered)") + + // generate some random data to be inserted + vals := make([]string, 0, 2000) + for i := 0; i < 2000; i++ { + vals = append(vals, fmt.Sprintf("(%v, %v)", rand.Intn(1100), rand.Intn(2000))) + } + + tk.MustExec("insert ignore into trange values " + strings.Join(vals, ",")) + tk.MustExec("insert ignore into thash values " + strings.Join(vals, ",")) + tk.MustExec("insert ignore into tregular1 values " + strings.Join(vals, ",")) + + vals = make([]string, 0, 2000) + for i := 0; i < 2000; i++ { + vals = append(vals, fmt.Sprintf("(%v, %v)", rand.Intn(12)+1, rand.Intn(20))) + } + + tk.MustExec("insert ignore into tlist values " + strings.Join(vals, ",")) + tk.MustExec("insert ignore into tregular2 values " + strings.Join(vals, ",")) + + // test range partition + for i := 0; i < 100; i++ { + x1 := rand.Intn(1099) + x2 := rand.Intn(1099) + + queryPartition1 := fmt.Sprintf("select /*+ use_index_merge(trange) */ * from trange where a > %v or b < %v;", x1, x2) + queryRegular1 := fmt.Sprintf("select /*+ use_index_merge(tregular1) */ * from tregular1 where a > %v or b < %v;", x1, x2) + c.Assert(tk.HasPlan(queryPartition1, "IndexMerge"), IsTrue) // check if IndexLookUp is used + tk.MustQuery(queryPartition1).Sort().Check(tk.MustQuery(queryRegular1).Sort().Rows()) + + queryPartition2 := fmt.Sprintf("select /*+ use_index_merge(trange) */ * from trange where a > %v or b > %v;", x1, x2) + queryRegular2 := fmt.Sprintf("select /*+ use_index_merge(tregular1) */ * from tregular1 where a > %v or b > %v;", x1, x2) + c.Assert(tk.HasPlan(queryPartition2, "IndexMerge"), IsTrue) // check if IndexLookUp is used + tk.MustQuery(queryPartition2).Sort().Check(tk.MustQuery(queryRegular2).Sort().Rows()) + } + + // test hash partition + for i := 0; i < 100; i++ { + x1 := rand.Intn(1099) + x2 := rand.Intn(1099) + + queryPartition1 := fmt.Sprintf("select /*+ use_index_merge(thash) */ * from thash where a > %v or b < %v;", x1, x2) + queryRegular1 := fmt.Sprintf("select /*+ use_index_merge(tregualr1) */ * from tregular1 where a > %v or b < %v;", x1, x2) + c.Assert(tk.HasPlan(queryPartition1, "IndexMerge"), IsTrue) // check if IndexLookUp is used + tk.MustQuery(queryPartition1).Sort().Check(tk.MustQuery(queryRegular1).Sort().Rows()) + + queryPartition2 := fmt.Sprintf("select /*+ use_index_merge(thash) */ * from thash where a > %v or b > %v;", x1, x2) + queryRegular2 := fmt.Sprintf("select /*+ use_index_merge(tregular1) */ * from tregular1 where a > %v or b > %v;", x1, x2) + c.Assert(tk.HasPlan(queryPartition2, "IndexMerge"), IsTrue) // check if IndexLookUp is used + tk.MustQuery(queryPartition2).Sort().Check(tk.MustQuery(queryRegular2).Sort().Rows()) + } + + // test list partition + for i := 0; i < 100; i++ { + x1 := rand.Intn(12) + 1 + x2 := rand.Intn(12) + 1 + queryPartition1 := fmt.Sprintf("select /*+ use_index_merge(tlist) */ * from tlist where a > %v or b < %v;", x1, x2) + queryRegular1 := fmt.Sprintf("select /*+ use_index_merge(tregular2) */ * from tregular2 where a > %v or b < %v;", x1, x2) + c.Assert(tk.HasPlan(queryPartition1, "IndexMerge"), IsTrue) // check if IndexLookUp is used + tk.MustQuery(queryPartition1).Sort().Check(tk.MustQuery(queryRegular1).Sort().Rows()) + + queryPartition2 := fmt.Sprintf("select /*+ use_index_merge(tlist) */ * from tlist where a > %v or b > %v;", x1, x2) + queryRegular2 := fmt.Sprintf("select /*+ use_index_merge(tregular2) */ * from tregular2 where a > %v or b > %v;", x1, x2) + c.Assert(tk.HasPlan(queryPartition2, "IndexMerge"), IsTrue) // check if IndexLookUp is used + tk.MustQuery(queryPartition2).Sort().Check(tk.MustQuery(queryRegular2).Sort().Rows()) + } +} + func (s *globalIndexSuite) TestGlobalIndexScan(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) tk.MustExec("drop table if exists p") From 0a1c3c0f02ebead5d414c76c71e93b3b64356af7 Mon Sep 17 00:00:00 2001 From: Morgan Tocker Date: Tue, 18 May 2021 18:41:40 -0600 Subject: [PATCH 10/10] variable: change default for DefDMLBatchSize tidbOptInt64 call (#24697) --- sessionctx/variable/sysvar.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 99c3da8233d68..e6f632db6b02d 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -904,7 +904,7 @@ var defaultSysVars = []*SysVar{ return nil }}, {Scope: ScopeGlobal | ScopeSession, Name: TiDBDMLBatchSize, Value: strconv.Itoa(DefDMLBatchSize), Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error { - s.DMLBatchSize = int(tidbOptInt64(val, DefOptCorrelationExpFactor)) + s.DMLBatchSize = int(tidbOptInt64(val, DefDMLBatchSize)) return nil }}, {Scope: ScopeSession, Name: TiDBCurrentTS, Value: strconv.Itoa(DefCurretTS), ReadOnly: true},