diff --git a/cmd/ddltest/index_test.go b/cmd/ddltest/index_test.go index 73061acea4d0e..112527242c2f2 100644 --- a/cmd/ddltest/index_test.go +++ b/cmd/ddltest/index_test.go @@ -86,9 +86,9 @@ func (s *TestDDLSuite) checkAddIndex(c *C, indexInfo *model.IndexInfo) { } func (s *TestDDLSuite) checkDropIndex(c *C, indexInfo *model.IndexInfo) { - gc_worker, err := gcworker.NewMockGCWorker(s.store.(tikv.Storage)) + gcWorker, err := gcworker.NewMockGCWorker(s.store.(tikv.Storage)) c.Assert(err, IsNil) - err = gc_worker.DeleteRanges(goctx.Background(), uint64(math.MaxInt32)) + err = gcWorker.DeleteRanges(goctx.Background(), uint64(math.MaxInt32)) c.Assert(err, IsNil) ctx := s.ctx diff --git a/cmd/explaintest/r/explain-non-select-stmt.result b/cmd/explaintest/r/explain-non-select-stmt.result new file mode 100644 index 0000000000000..746250bf2d319 --- /dev/null +++ b/cmd/explaintest/r/explain-non-select-stmt.result @@ -0,0 +1,29 @@ +use test; +drop table if exists t; +create table t(a bigint, b bigint); +explain insert into t values(1, 1); +id count task operator info +Insert_1 N/A root N/A +explain insert into t select * from t; +id count task operator info +Insert_1 N/A root N/A +└─TableReader_7 10000.00 root data:TableScan_6 + └─TableScan_6 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo +explain delete from t where a > 100; +id count task operator info +Delete_3 N/A root N/A +└─TableReader_6 3333.33 root data:Selection_5 + └─Selection_5 3333.33 cop gt(Column#1, 100) + └─TableScan_4 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo +explain update t set b = 100 where a = 200; +id count task operator info +Update_3 N/A root N/A +└─TableReader_6 10.00 root data:Selection_5 + └─Selection_5 10.00 cop eq(Column#1, 200) + └─TableScan_4 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo +explain replace into t select a, 100 from t; +id count task operator info +Insert_1 N/A root N/A +└─Projection_5 10000.00 root Column#3, 100 + └─TableReader_7 10000.00 root data:TableScan_6 + └─TableScan_6 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo diff --git a/cmd/explaintest/r/explain_complex.result b/cmd/explaintest/r/explain_complex.result index 10cbcf5ed015a..1e3f938afd012 100644 --- a/cmd/explaintest/r/explain_complex.result +++ b/cmd/explaintest/r/explain_complex.result @@ -153,9 +153,9 @@ id count task operator info Projection_10 0.00 root Column#1, Column#2, Column#4, Column#5, Column#3, Column#24, Column#25, Column#26, Column#11, Column#12, Column#13, Column#14, Column#15, Column#16, Column#17 └─Limit_13 0.00 root offset:0, count:2000 └─IndexMergeJoin_25 0.00 root inner join, inner:IndexLookUp_23, outer key:Column#2, Column#5, inner key:Column#21, Column#23 - ├─TableReader_53 0.00 root data:Selection_52 - │ └─Selection_52 0.00 cop eq(Column#18, 0), eq(Column#4, "ios"), gt(Column#9, 1478185592), not(isnull(Column#5)) - │ └─TableScan_51 10000.00 cop table:dt, range:[0,+inf], keep order:false, stats:pseudo + ├─TableReader_40 0.00 root data:Selection_39 + │ └─Selection_39 0.00 cop eq(Column#18, 0), eq(Column#4, "ios"), gt(Column#9, 1478185592), not(isnull(Column#5)) + │ └─TableScan_38 10000.00 cop table:dt, range:[0,+inf], keep order:false, stats:pseudo └─IndexLookUp_23 0.00 root ├─IndexScan_20 1.25 cop table:rr, index:aid, dic, range: decided by [eq(Column#21, Column#2) eq(Column#23, Column#5)], keep order:true, stats:pseudo └─Selection_22 0.00 cop eq(Column#22, "ios"), gt(Column#26, 1478185592) diff --git a/cmd/explaintest/r/explain_complex_stats.result b/cmd/explaintest/r/explain_complex_stats.result index f1f6ae1b2e71d..a2fc0af9e74d9 100644 --- a/cmd/explaintest/r/explain_complex_stats.result +++ b/cmd/explaintest/r/explain_complex_stats.result @@ -161,9 +161,9 @@ id count task operator info Projection_10 428.32 root Column#1, Column#2, Column#4, Column#5, Column#3, Column#24, Column#25, Column#26, Column#11, Column#12, Column#13, Column#14, Column#15, Column#16, Column#17 └─Limit_13 428.32 root offset:0, count:2000 └─IndexMergeJoin_25 428.32 root inner join, inner:IndexLookUp_23, outer key:Column#2, Column#5, inner key:Column#21, Column#23 - ├─TableReader_53 428.32 root data:Selection_52 - │ └─Selection_52 428.32 cop eq(Column#18, 0), eq(Column#4, "ios"), gt(Column#9, 1478185592), not(isnull(Column#5)) - │ └─TableScan_51 2000.00 cop table:dt, range:[0,+inf], keep order:false + ├─TableReader_40 428.32 root data:Selection_39 + │ └─Selection_39 428.32 cop eq(Column#18, 0), eq(Column#4, "ios"), gt(Column#9, 1478185592), not(isnull(Column#5)) + │ └─TableScan_38 2000.00 cop table:dt, range:[0,+inf], keep order:false └─IndexLookUp_23 0.48 root ├─IndexScan_20 1.00 cop table:rr, index:aid, dic, range: decided by [eq(Column#21, Column#2) eq(Column#23, Column#5)], keep order:true └─Selection_22 0.48 cop eq(Column#22, "ios"), gt(Column#26, 1478185592) diff --git a/cmd/explaintest/r/explain_easy.result b/cmd/explaintest/r/explain_easy.result index 19d177dda98e0..120becef70729 100644 --- a/cmd/explaintest/r/explain_easy.result +++ b/cmd/explaintest/r/explain_easy.result @@ -52,12 +52,14 @@ HashLeftJoin_19 4166.67 root left outer join, inner:TableReader_32, equal:[eq(Co └─TableScan_30 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo explain update t1 set t1.c2 = 2 where t1.c1 = 1; id count task operator info -Point_Get_1 1.00 root table:t1, handle:1 +Update_2 N/A root N/A +└─Point_Get_1 1.00 root table:t1, handle:1 explain delete from t1 where t1.c2 = 1; id count task operator info -IndexLookUp_9 10.00 root -├─IndexScan_7 10.00 cop table:t1, index:c2, range:[1,1], keep order:false, stats:pseudo -└─TableScan_8 10.00 cop table:t1, keep order:false, stats:pseudo +Delete_3 N/A root N/A +└─IndexLookUp_9 10.00 root + ├─IndexScan_7 10.00 cop table:t1, index:c2, range:[1,1], keep order:false, stats:pseudo + └─TableScan_8 10.00 cop table:t1, keep order:false, stats:pseudo explain select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1; id count task operator info Projection_11 9990.00 root cast(Column#9) @@ -120,15 +122,14 @@ MemTableScan_4 10000.00 root explain select c2 = (select c2 from t2 where t1.c1 = t2.c1 order by c1 limit 1) from t1; id count task operator info Projection_12 10000.00 root eq(Column#2, Column#5) -└─Apply_14 10000.00 root CARTESIAN left outer join, inner:Limit_23 +└─Apply_14 10000.00 root CARTESIAN left outer join, inner:Projection_43 ├─IndexReader_18 10000.00 root index:IndexScan_17 │ └─IndexScan_17 10000.00 cop table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo - └─Limit_23 1.00 root offset:0, count:1 - └─Projection_43 1.00 root Column#4, Column#5 - └─IndexLookUp_42 1.00 root - ├─Limit_41 1.00 cop offset:0, count:1 - │ └─IndexScan_39 1.00 cop table:t2, index:c1, range: decided by [eq(Column#1, Column#4)], keep order:true, stats:pseudo - └─TableScan_40 1.00 cop table:t2, keep order:false, stats:pseudo + └─Projection_43 1.00 root Column#4, Column#5 + └─IndexLookUp_42 1.00 root limit embedded(offset:0, count:1) + ├─Limit_41 1.00 cop offset:0, count:1 + │ └─IndexScan_39 1.00 cop table:t2, index:c1, range: decided by [eq(Column#1, Column#4)], keep order:true, stats:pseudo + └─TableScan_40 1.00 cop table:t2, keep order:false, stats:pseudo explain select * from t1 order by c1 desc limit 1; id count task operator info Limit_10 1.00 root offset:0, count:1 @@ -349,13 +350,13 @@ Projection_11 5.00 root Column#12 ├─TableReader_15 5.00 root data:TableScan_14 │ └─TableScan_14 5.00 cop table:t, range:[-inf,+inf], keep order:false └─StreamAgg_20 1.00 root funcs:count(1) - └─MergeJoin_58 2.40 root inner join, left key:Column#4, right key:Column#7 - ├─IndexReader_45 2.40 root index:Selection_44 - │ └─Selection_44 2.40 cop eq(3, Column#1) - │ └─IndexScan_43 3.00 cop table:s, index:b, range:[3,3], keep order:true - └─TableReader_48 4.00 root data:Selection_47 - └─Selection_47 4.00 cop eq(3, Column#1) - └─TableScan_46 5.00 cop table:t1, range:[-inf,+inf], keep order:true + └─MergeJoin_49 2.40 root inner join, left key:Column#4, right key:Column#7 + ├─IndexReader_36 2.40 root index:Selection_35 + │ └─Selection_35 2.40 cop eq(3, Column#1) + │ └─IndexScan_34 3.00 cop table:s, index:b, range:[3,3], keep order:true + └─TableReader_39 4.00 root data:Selection_38 + └─Selection_38 4.00 cop eq(3, Column#1) + └─TableScan_37 5.00 cop table:t1, range:[-inf,+inf], keep order:true explain select t.c in (select count(*) from t s left join t t1 on s.a = t1.a where 3 = t.a and s.b = 3) from t; id count task operator info Projection_10 5.00 root Column#12 @@ -658,7 +659,8 @@ begin; insert into t values (1, 1); explain update t set j = -j where i = 1 and j = 1; id count task operator info -Point_Get_1 1.00 root table:t, index:i j +Update_2 N/A root N/A +└─Point_Get_1 1.00 root table:t, index:i j rollback; drop table if exists t; create table t(a int); diff --git a/cmd/explaintest/r/explain_easy_stats.result b/cmd/explaintest/r/explain_easy_stats.result index 2f34083582e4b..4aca3286610b0 100644 --- a/cmd/explaintest/r/explain_easy_stats.result +++ b/cmd/explaintest/r/explain_easy_stats.result @@ -55,12 +55,14 @@ HashLeftJoin_19 2481.25 root left outer join, inner:TableReader_32, equal:[eq(Co └─TableScan_30 1985.00 cop table:t2, range:[-inf,+inf], keep order:false explain update t1 set t1.c2 = 2 where t1.c1 = 1; id count task operator info -Point_Get_1 1.00 root table:t1, handle:1 +Update_2 N/A root N/A +└─Point_Get_1 1.00 root table:t1, handle:1 explain delete from t1 where t1.c2 = 1; id count task operator info -IndexLookUp_9 0.00 root -├─IndexScan_7 0.00 cop table:t1, index:c2, range:[1,1], keep order:false -└─TableScan_8 0.00 cop table:t1, keep order:false +Delete_3 N/A root N/A +└─IndexLookUp_9 0.00 root + ├─IndexScan_7 0.00 cop table:t1, index:c2, range:[1,1], keep order:false + └─TableScan_8 0.00 cop table:t1, keep order:false explain select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1; id count task operator info Projection_11 1985.00 root cast(Column#9) @@ -104,15 +106,14 @@ MemTableScan_4 10000.00 root explain select c2 = (select c2 from t2 where t1.c1 = t2.c1 order by c1 limit 1) from t1; id count task operator info Projection_12 1999.00 root eq(Column#2, Column#5) -└─Apply_14 1999.00 root CARTESIAN left outer join, inner:Limit_23 +└─Apply_14 1999.00 root CARTESIAN left outer join, inner:Projection_43 ├─IndexReader_18 1999.00 root index:IndexScan_17 │ └─IndexScan_17 1999.00 cop table:t1, index:c2, range:[NULL,+inf], keep order:false - └─Limit_23 1.00 root offset:0, count:1 - └─Projection_43 1.00 root Column#4, Column#5 - └─IndexLookUp_42 1.00 root - ├─Limit_41 1.00 cop offset:0, count:1 - │ └─IndexScan_39 1.25 cop table:t2, index:c1, range: decided by [eq(Column#1, Column#4)], keep order:true - └─TableScan_40 1.00 cop table:t2, keep order:false, stats:pseudo + └─Projection_43 1.00 root Column#4, Column#5 + └─IndexLookUp_42 1.00 root limit embedded(offset:0, count:1) + ├─Limit_41 1.00 cop offset:0, count:1 + │ └─IndexScan_39 1.25 cop table:t2, index:c1, range: decided by [eq(Column#1, Column#4)], keep order:true + └─TableScan_40 1.00 cop table:t2, keep order:false, stats:pseudo explain select * from t1 order by c1 desc limit 1; id count task operator info Limit_10 1.00 root offset:0, count:1 @@ -165,18 +166,16 @@ id count task operator info TableDual_5 0.00 root rows:0 explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 1, 1; id count task operator info -Limit_9 1.00 root offset:1, count:1 -└─IndexLookUp_14 1.00 root - ├─Limit_13 1.00 cop offset:0, count:2 - │ └─IndexScan_11 1.00 cop table:index_prune, index:a, b, range:[1010010404050976781 26467085526790,1010010404050976781 26467085526790], keep order:false - └─TableScan_12 1.00 cop table:index_prune, keep order:false, stats:pseudo +IndexLookUp_14 1.00 root limit embedded(offset:1, count:1) +├─Limit_13 1.00 cop offset:0, count:2 +│ └─IndexScan_11 1.00 cop table:index_prune, index:a, b, range:[1010010404050976781 26467085526790,1010010404050976781 26467085526790], keep order:false +└─TableScan_12 1.00 cop table:index_prune, keep order:false, stats:pseudo explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 1, 0; id count task operator info -Limit_9 0.00 root offset:1, count:0 -└─IndexLookUp_14 1.00 root - ├─Limit_13 1.00 cop offset:0, count:1 - │ └─IndexScan_11 1.00 cop table:index_prune, index:a, b, range:[1010010404050976781 26467085526790,1010010404050976781 26467085526790], keep order:false - └─TableScan_12 1.00 cop table:index_prune, keep order:false, stats:pseudo +IndexLookUp_14 0.00 root limit embedded(offset:1, count:0) +├─Limit_13 1.00 cop offset:0, count:1 +│ └─IndexScan_11 1.00 cop table:index_prune, index:a, b, range:[1010010404050976781 26467085526790,1010010404050976781 26467085526790], keep order:false +└─TableScan_12 0.00 cop table:index_prune, keep order:false, stats:pseudo explain select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 0, 1; id count task operator info Point_Get_1 1.00 root table:index_prune, index:a b diff --git a/cmd/explaintest/r/explain_indexmerge.result b/cmd/explaintest/r/explain_indexmerge.result index 314da1ea59439..4498ee0ec9e00 100644 --- a/cmd/explaintest/r/explain_indexmerge.result +++ b/cmd/explaintest/r/explain_indexmerge.result @@ -48,3 +48,36 @@ IndexMerge_17 0.00 root ├─IndexScan_14 9.00 cop table:t, index:d, range:[-inf,10), keep order:false └─Selection_16 0.00 cop lt(Column#6, 10), or(lt(Column#2, 10000), lt(Column#3, 10000)) └─TableScan_15 18.00 cop table:t, keep order:false +explain format="dot" select * from t where (a < 50 or b < 50) and f > 100; +dot contents + +digraph IndexMerge_12 { +subgraph cluster12{ +node [style=filled, color=lightgrey] +color=black +label = "root" +"IndexMerge_12" +} +subgraph cluster8{ +node [style=filled, color=lightgrey] +color=black +label = "cop" +"TableScan_8" +} +subgraph cluster9{ +node [style=filled, color=lightgrey] +color=black +label = "cop" +"IndexScan_9" +} +subgraph cluster11{ +node [style=filled, color=lightgrey] +color=black +label = "cop" +"Selection_11" -> "TableScan_10" +} +"IndexMerge_12" -> "TableScan_8" +"IndexMerge_12" -> "IndexScan_9" +"IndexMerge_12" -> "Selection_11" +} + diff --git a/cmd/explaintest/r/generated_columns.result b/cmd/explaintest/r/generated_columns.result index b3c4fbf9ebf51..4ebf4d47fe51a 100644 --- a/cmd/explaintest/r/generated_columns.result +++ b/cmd/explaintest/r/generated_columns.result @@ -82,13 +82,13 @@ IndexJoin_23 5.00 root inner join, inner:IndexLookUp_22, outer key:Column#8, inn EXPLAIN SELECT * from sgc1 join sgc2 on sgc1.a=sgc2.a; id count task operator info Projection_6 5.00 root Column#1, Column#2, Column#3, Column#4, Column#6, Column#7, Column#8, Column#9 -└─HashRightJoin_32 5.00 root inner join, inner:TableReader_51, equal:[eq(Column#8, Column#3)] - ├─TableReader_51 1.00 root data:Selection_50 - │ └─Selection_50 1.00 cop not(isnull(Column#8)) - │ └─TableScan_49 1.00 cop table:sgc2, range:[-inf,+inf], keep order:false - └─TableReader_60 5.00 root data:Selection_59 - └─Selection_59 5.00 cop not(isnull(Column#3)) - └─TableScan_58 5.00 cop table:sgc1, range:[-inf,+inf], keep order:false +└─HashRightJoin_21 5.00 root inner join, inner:TableReader_40, equal:[eq(Column#8, Column#3)] + ├─TableReader_40 1.00 root data:Selection_39 + │ └─Selection_39 1.00 cop not(isnull(Column#8)) + │ └─TableScan_38 1.00 cop table:sgc2, range:[-inf,+inf], keep order:false + └─TableReader_49 5.00 root data:Selection_48 + └─Selection_48 5.00 cop not(isnull(Column#3)) + └─TableScan_47 5.00 cop table:sgc1, range:[-inf,+inf], keep order:false DROP TABLE IF EXISTS sgc3; CREATE TABLE sgc3 ( j JSON, diff --git a/cmd/explaintest/r/index_join.result b/cmd/explaintest/r/index_join.result index b6f8889af970a..a6b93af03854a 100644 --- a/cmd/explaintest/r/index_join.result +++ b/cmd/explaintest/r/index_join.result @@ -19,13 +19,13 @@ IndexJoin_22 5.00 root inner join, inner:IndexLookUp_21, outer key:Column#4, inn explain select * from t1 join t2 on t1.a=t2.a; id count task operator info Projection_6 5.00 root Column#1, Column#2, Column#4, Column#5 -└─HashRightJoin_31 5.00 root inner join, inner:TableReader_42, equal:[eq(Column#4, Column#1)] - ├─TableReader_42 1.00 root data:Selection_41 - │ └─Selection_41 1.00 cop not(isnull(Column#4)) - │ └─TableScan_40 1.00 cop table:t2, range:[-inf,+inf], keep order:false - └─TableReader_48 5.00 root data:Selection_47 - └─Selection_47 5.00 cop not(isnull(Column#1)) - └─TableScan_46 5.00 cop table:t1, range:[-inf,+inf], keep order:false +└─HashRightJoin_20 5.00 root inner join, inner:TableReader_31, equal:[eq(Column#4, Column#1)] + ├─TableReader_31 1.00 root data:Selection_30 + │ └─Selection_30 1.00 cop not(isnull(Column#4)) + │ └─TableScan_29 1.00 cop table:t2, range:[-inf,+inf], keep order:false + └─TableReader_37 5.00 root data:Selection_36 + └─Selection_36 5.00 cop not(isnull(Column#1)) + └─TableScan_35 5.00 cop table:t1, range:[-inf,+inf], keep order:false drop table if exists t1, t2; create table t1(a int not null, b int not null); create table t2(a int not null, b int not null, key a(a)); diff --git a/cmd/explaintest/r/topn_push_down.result b/cmd/explaintest/r/topn_push_down.result index a0bedb7d2328c..873ce0a293137 100644 --- a/cmd/explaintest/r/topn_push_down.result +++ b/cmd/explaintest/r/topn_push_down.result @@ -169,21 +169,21 @@ LIMIT 0, 5; id count task operator info Projection_13 0.00 root Column#47 └─Limit_19 0.00 root offset:0, count:5 - └─IndexJoin_116 0.00 root left outer join, inner:IndexReader_115, outer key:Column#1, inner key:Column#97 - ├─TopN_123 0.00 root Column#47:asc, offset:0, count:5 + └─IndexJoin_99 0.00 root left outer join, inner:IndexReader_98, outer key:Column#1, inner key:Column#97 + ├─TopN_106 0.00 root Column#47:asc, offset:0, count:5 │ └─IndexMergeJoin_47 0.00 root inner join, inner:IndexLookUp_45, outer key:Column#1, inner key:Column#41 - │ ├─IndexLookUp_90 0.00 root - │ │ ├─Selection_88 0.00 cop eq(Column#4, 18), eq(Column#5, 1) - │ │ │ └─IndexScan_86 10.00 cop table:tr, index:shop_identy, trade_status, business_type, trade_pay_status, trade_type, delivery_type, source, biz_date, range:[810094178,810094178], keep order:false, stats:pseudo - │ │ └─Selection_89 0.00 cop eq(Column#21, 32314), eq(Column#3, 2) - │ │ └─TableScan_87 0.00 cop table:tr, keep order:false, stats:pseudo + │ ├─IndexLookUp_81 0.00 root + │ │ ├─Selection_79 0.00 cop eq(Column#4, 18), eq(Column#5, 1) + │ │ │ └─IndexScan_77 10.00 cop table:tr, index:shop_identy, trade_status, business_type, trade_pay_status, trade_type, delivery_type, source, biz_date, range:[810094178,810094178], keep order:false, stats:pseudo + │ │ └─Selection_80 0.00 cop eq(Column#21, 32314), eq(Column#3, 2) + │ │ └─TableScan_78 0.00 cop table:tr, keep order:false, stats:pseudo │ └─IndexLookUp_45 0.03 root │ ├─IndexScan_42 1.25 cop table:te, index:trade_id, range: decided by [eq(Column#41, Column#1)], keep order:true, stats:pseudo │ └─Selection_44 0.03 cop ge(Column#47, 2018-04-23 00:00:00.000000), le(Column#47, 2018-04-23 23:59:59.000000) │ └─TableScan_43 1.25 cop table:te, keep order:false, stats:pseudo - └─IndexReader_115 1.25 root index:Selection_114 - └─Selection_114 1.25 cop not(isnull(Column#97)) - └─IndexScan_113 1.25 cop table:p, index:relate_id, range: decided by [eq(Column#97, Column#1)], keep order:false, stats:pseudo + └─IndexReader_98 1.25 root index:Selection_97 + └─Selection_97 1.25 cop not(isnull(Column#97)) + └─IndexScan_96 1.25 cop table:p, index:relate_id, range: decided by [eq(Column#97, Column#1)], keep order:false, stats:pseudo desc select 1 as a from dual order by a limit 1; id count task operator info Projection_6 1.00 root 1 diff --git a/cmd/explaintest/r/tpch.result b/cmd/explaintest/r/tpch.result index f7afd5b27d6a8..a078a80335407 100644 --- a/cmd/explaintest/r/tpch.result +++ b/cmd/explaintest/r/tpch.result @@ -768,12 +768,12 @@ l_shipmode; id count task operator info Sort_9 1.00 root Column#29:asc └─Projection_11 1.00 root Column#24, Column#27, Column#28 - └─HashAgg_14 1.00 root group by:Column#36, funcs:sum(Column#0), sum(Column#0), firstrow(Column#0) - └─Projection_49 10023369.01 root cast(case(or(eq(Column#6, "1-URGENT"), eq(Column#6, "2-HIGH")), 1, 0)), cast(case(and(ne(Column#6, "1-URGENT"), ne(Column#6, "2-HIGH")), 1, 0)), Column#24, Column#24 + └─HashAgg_14 1.00 root group by:Column#34, funcs:sum(Column#0), sum(Column#0), firstrow(Column#0) + └─Projection_38 10023369.01 root cast(case(or(eq(Column#6, "1-URGENT"), eq(Column#6, "2-HIGH")), 1, 0)), cast(case(and(ne(Column#6, "1-URGENT"), ne(Column#6, "2-HIGH")), 1, 0)), Column#24, Column#24 └─IndexMergeJoin_22 10023369.01 root inner join, inner:TableReader_20, outer key:Column#10, inner key:Column#1 - ├─TableReader_45 10023369.01 root data:Selection_44 - │ └─Selection_44 10023369.01 cop ge(Column#22, 1997-01-01 00:00:00.000000), in(Column#24, "RAIL", "FOB"), lt(Column#20, Column#21), lt(Column#21, Column#22), lt(Column#22, 1998-01-01) - │ └─TableScan_43 300005811.00 cop table:lineitem, range:[-inf,+inf], keep order:false + ├─TableReader_34 10023369.01 root data:Selection_33 + │ └─Selection_33 10023369.01 cop ge(Column#22, 1997-01-01 00:00:00.000000), in(Column#24, "RAIL", "FOB"), lt(Column#20, Column#21), lt(Column#21, Column#22), lt(Column#22, 1998-01-01) + │ └─TableScan_32 300005811.00 cop table:lineitem, range:[-inf,+inf], keep order:false └─TableReader_20 1.00 root data:TableScan_19 └─TableScan_19 1.00 cop table:orders, range: decided by [Column#10], keep order:true /* @@ -930,16 +930,16 @@ id count task operator info Sort_13 3863988.24 root Column#28:desc, Column#25:asc, Column#26:asc, Column#27:asc └─Projection_15 3863988.24 root Column#10, Column#11, Column#12, Column#24 └─HashAgg_18 3863988.24 root group by:Column#10, Column#11, Column#12, funcs:count(distinct Column#2), firstrow(Column#10), firstrow(Column#11), firstrow(Column#12) - └─HashLeftJoin_28 3863988.24 root anti semi join, inner:TableReader_62, equal:[eq(Column#2, Column#16)] + └─HashLeftJoin_28 3863988.24 root anti semi join, inner:TableReader_53, equal:[eq(Column#2, Column#16)] ├─IndexMergeJoin_36 4829985.30 root inner join, inner:IndexReader_34, outer key:Column#7, inner key:Column#1 - │ ├─TableReader_55 1200618.43 root data:Selection_54 - │ │ └─Selection_54 1200618.43 cop in(Column#12, 48, 19, 12, 4, 41, 7, 21, 39), ne(Column#10, "Brand#34"), not(like(Column#11, "LARGE BRUSHED%", 92)) - │ │ └─TableScan_53 10000000.00 cop table:part, range:[-inf,+inf], keep order:false + │ ├─TableReader_46 1200618.43 root data:Selection_45 + │ │ └─Selection_45 1200618.43 cop in(Column#12, 48, 19, 12, 4, 41, 7, 21, 39), ne(Column#10, "Brand#34"), not(like(Column#11, "LARGE BRUSHED%", 92)) + │ │ └─TableScan_44 10000000.00 cop table:part, range:[-inf,+inf], keep order:false │ └─IndexReader_34 4.02 root index:IndexScan_33 │ └─IndexScan_33 4.02 cop table:partsupp, index:PS_PARTKEY, PS_SUPPKEY, range: decided by [eq(Column#1, Column#7)], keep order:true - └─TableReader_62 400000.00 root data:Selection_61 - └─Selection_61 400000.00 cop like(Column#22, "%Customer%Complaints%", 92) - └─TableScan_60 500000.00 cop table:supplier, range:[-inf,+inf], keep order:false + └─TableReader_53 400000.00 root data:Selection_52 + └─Selection_52 400000.00 cop like(Column#22, "%Customer%Complaints%", 92) + └─TableScan_51 500000.00 cop table:supplier, range:[-inf,+inf], keep order:false /* Q17 Small-Quantity-Order Revenue Query This query determines how much average yearly revenue would be lost if orders were no longer filled for small @@ -1158,17 +1158,17 @@ Sort_28 20000.00 root Column#48:asc └─Projection_52 64006.34 root Column#12, Column#13, Column#14, Column#18, mul(0.5, Column#45) └─Selection_53 64006.34 root gt(cast(Column#14), mul(0.5, Column#45)) └─HashAgg_56 80007.93 root group by:Column#12, Column#13, funcs:firstrow(Column#12), firstrow(Column#13), firstrow(Column#14), firstrow(Column#18), sum(Column#32) - └─HashLeftJoin_59 9711455.06 root left outer join, inner:TableReader_95, equal:[eq(Column#12, Column#29) eq(Column#13, Column#30)] + └─HashLeftJoin_59 9711455.06 root left outer join, inner:TableReader_86, equal:[eq(Column#12, Column#29) eq(Column#13, Column#30)] ├─IndexMergeJoin_69 321865.05 root inner join, inner:IndexLookUp_67, outer key:Column#18, inner key:Column#12 - │ ├─TableReader_90 80007.93 root data:Selection_89 - │ │ └─Selection_89 80007.93 cop like(Column#19, "green%", 92) - │ │ └─TableScan_88 10000000.00 cop table:part, range:[-inf,+inf], keep order:false + │ ├─TableReader_81 80007.93 root data:Selection_80 + │ │ └─Selection_80 80007.93 cop like(Column#19, "green%", 92) + │ │ └─TableScan_79 10000000.00 cop table:part, range:[-inf,+inf], keep order:false │ └─IndexLookUp_67 4.02 root │ ├─IndexScan_65 4.02 cop table:partsupp, index:PS_PARTKEY, PS_SUPPKEY, range: decided by [eq(Column#12, Column#18)], keep order:true │ └─TableScan_66 4.02 cop table:partsupp, keep order:false - └─TableReader_95 44189356.65 root data:Selection_94 - └─Selection_94 44189356.65 cop ge(Column#38, 1993-01-01 00:00:00.000000), lt(Column#38, 1994-01-01) - └─TableScan_93 300005811.00 cop table:lineitem, range:[-inf,+inf], keep order:false + └─TableReader_86 44189356.65 root data:Selection_85 + └─Selection_85 44189356.65 cop ge(Column#38, 1993-01-01 00:00:00.000000), lt(Column#38, 1994-01-01) + └─TableScan_84 300005811.00 cop table:lineitem, range:[-inf,+inf], keep order:false /* Q21 Suppliers Who Kept Orders Waiting Query This query identifies certain suppliers who were not able to ship required parts in a timely manner. diff --git a/cmd/explaintest/t/explain-non-select-stmt.test b/cmd/explaintest/t/explain-non-select-stmt.test new file mode 100644 index 0000000000000..f231b95a8600e --- /dev/null +++ b/cmd/explaintest/t/explain-non-select-stmt.test @@ -0,0 +1,8 @@ +use test; +drop table if exists t; +create table t(a bigint, b bigint); +explain insert into t values(1, 1); +explain insert into t select * from t; +explain delete from t where a > 100; +explain update t set b = 100 where a = 200; +explain replace into t select a, 100 from t; diff --git a/cmd/explaintest/t/explain_indexmerge.test b/cmd/explaintest/t/explain_indexmerge.test index 20a6afb3ef88c..507e7559a0979 100644 --- a/cmd/explaintest/t/explain_indexmerge.test +++ b/cmd/explaintest/t/explain_indexmerge.test @@ -12,3 +12,4 @@ explain select * from t where b < 50 or c < 50; explain select * from t where b < 50 or c < 5000000; explain select * from t where a < 50 or b < 50 or c < 50; explain select * from t where (b < 10000 or c < 10000) and (a < 10 or d < 10) and f < 10; +explain format="dot" select * from t where (a < 50 or b < 50) and f > 100; diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index 0c8e8e435903b..fc9db586a6a69 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -754,6 +754,16 @@ func (s *testStateChangeSuite) TestParallelDropColumn(c *C) { s.testControlParallelExecSQL(c, sql, sql, f) } +func (s *testStateChangeSuite) TestParallelDropIndex(c *C) { + sql1 := "alter table t drop index idx1 ;" + sql2 := "alter table t drop index idx2 ;" + f := func(c *C, err1, err2 error) { + c.Assert(err1, IsNil) + c.Assert(err2.Error(), Equals, "[autoid:1075]Incorrect table definition; there can be only one auto column and it must be defined as a key") + } + s.testControlParallelExecSQL(c, sql1, sql2, f) +} + func (s *testStateChangeSuite) TestParallelCreateAndRename(c *C) { sql1 := "create table t_exists(c int);" sql2 := "alter table t rename to t_exists;" @@ -770,7 +780,7 @@ type checkRet func(c *C, err1, err2 error) func (s *testStateChangeSuite) testControlParallelExecSQL(c *C, sql1, sql2 string, f checkRet) { _, err := s.se.Execute(context.Background(), "use test_db_state") c.Assert(err, IsNil) - _, err = s.se.Execute(context.Background(), "create table t(a int, b int, c int)") + _, err = s.se.Execute(context.Background(), "create table t(a int, b int, c int, d int auto_increment,e int, index idx1(d), index idx2(d,e))") c.Assert(err, IsNil) defer s.se.Execute(context.Background(), "drop table t") diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 20d0d1a67f7f0..d09ca0746b522 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -3396,11 +3396,10 @@ func (d *ddl) DropIndex(ctx sessionctx.Context, ti ast.Ident, indexName model.CI return err } - cols := t.Cols() - for _, idxCol := range indexInfo.Columns { - if mysql.HasAutoIncrementFlag(cols[idxCol.Offset].Flag) { - return autoid.ErrWrongAutoKey - } + // Check for drop index on auto_increment column. + err = checkDropIndexOnAutoIncrementColumn(t.Meta(), indexInfo) + if err != nil { + return errors.Trace(err) } job := &model.Job{ diff --git a/ddl/index.go b/ddl/index.go index 9316d053254b5..e6aaeee58a060 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -465,9 +466,40 @@ func checkDropIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.Inde job.State = model.JobStateCancelled return nil, nil, ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) } + + // Double check for drop index on auto_increment column. + err = checkDropIndexOnAutoIncrementColumn(tblInfo, indexInfo) + if err != nil { + job.State = model.JobStateCancelled + return nil, nil, autoid.ErrWrongAutoKey + } + return tblInfo, indexInfo, nil } +func checkDropIndexOnAutoIncrementColumn(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) error { + cols := tblInfo.Columns + for _, idxCol := range indexInfo.Columns { + if !mysql.HasAutoIncrementFlag(cols[idxCol.Offset].Flag) { + continue + } + // check the count of index on auto_increment column. + count := 0 + for _, idx := range tblInfo.Indices { + for _, c := range idx.Columns { + if c.Name.L == idxCol.Name.L { + count++ + break + } + } + } + if count < 2 { + return autoid.ErrWrongAutoKey + } + } + return nil +} + func checkRenameIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, model.CIStr, model.CIStr, error) { var from, to model.CIStr schemaID := job.SchemaID diff --git a/ddl/table.go b/ddl/table.go index 274bba04086dc..9294ada250bba 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -296,7 +296,7 @@ func (w *worker) onRecoverTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver in // mockRecoverTableCommitErrOnce uses to make sure // `mockRecoverTableCommitErr` only mock error once. -var mockRecoverTableCommitErrOnce uint32 = 0 +var mockRecoverTableCommitErrOnce uint32 func enableGC(w *worker) error { ctx, err := w.sessPool.get() diff --git a/executor/adapter.go b/executor/adapter.go index 3238865cdba6a..a656d5fe78338 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -47,6 +47,7 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/stmtsummary" + "github.com/pingcap/tidb/util/stringutil" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -180,7 +181,8 @@ func (a *recordSet) Close() error { err := a.executor.Close() a.stmt.LogSlowQuery(a.txnStartTS, a.lastErr == nil) sessVars := a.stmt.Ctx.GetSessionVars() - sessVars.PrevStmt = FormatSQL(a.stmt.OriginText(), sessVars) + pps := types.CloneRow(sessVars.PreparedParams) + sessVars.PrevStmt = FormatSQL(a.stmt.OriginText(), pps) a.stmt.logAudit() a.stmt.SummaryStmt() return err @@ -731,13 +733,15 @@ func (a *ExecStmt) logAudit() { } // FormatSQL is used to format the original SQL, e.g. truncating long SQL, appending prepared arguments. -func FormatSQL(sql string, sessVars *variable.SessionVars) string { - cfg := config.GetGlobalConfig() - length := len(sql) - if maxQueryLen := atomic.LoadUint64(&cfg.Log.QueryLogMaxLen); uint64(length) > maxQueryLen { - sql = fmt.Sprintf("%.*q(len:%d)", maxQueryLen, sql, length) +func FormatSQL(sql string, pps variable.PreparedParams) stringutil.StringerFunc { + return func() string { + cfg := config.GetGlobalConfig() + length := len(sql) + if maxQueryLen := atomic.LoadUint64(&cfg.Log.QueryLogMaxLen); uint64(length) > maxQueryLen { + sql = fmt.Sprintf("%.*q(len:%d)", maxQueryLen, sql, length) + } + return QueryReplacer.Replace(sql) + pps.String() } - return QueryReplacer.Replace(sql) + sessVars.GetExecuteArgumentsInfo() } // LogSlowQuery is used to print the slow query in the log files. @@ -753,7 +757,7 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) { if costTime < threshold && level > zapcore.DebugLevel { return } - sql := FormatSQL(a.Text, sessVars) + sql := FormatSQL(a.Text, sessVars.PreparedParams) var tableIDs, indexNames string if len(sessVars.StmtCtx.TableIDs) > 0 { @@ -769,7 +773,7 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) { _, digest := sessVars.StmtCtx.SQLDigest() slowItems := &variable.SlowQueryLogItems{ TxnTS: txnTS, - SQL: sql, + SQL: sql.String(), Digest: digest, TimeTotal: costTime, TimeParse: a.Ctx.GetSessionVars().DurationParse, @@ -782,7 +786,7 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) { Succ: succ, } if _, ok := a.StmtNode.(*ast.CommitStmt); ok { - slowItems.PrevStmt = sessVars.PrevStmt + slowItems.PrevStmt = sessVars.PrevStmt.String() } if costTime < threshold { logutil.SlowQueryLogger.Debug(sessVars.SlowLogFormat(slowItems)) @@ -796,7 +800,7 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) { userString = sessVars.User.String() } domain.GetDomain(a.Ctx).LogSlowQuery(&domain.SlowQueryInfo{ - SQL: sql, + SQL: sql.String(), Digest: digest, Start: a.Ctx.GetSessionVars().StartTime, Duration: costTime, diff --git a/executor/builder.go b/executor/builder.go index 53fd43c2b3582..81e7d589d3fe2 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -833,7 +833,7 @@ func (b *executorBuilder) buildExplain(v *plannercore.Explain) Executor { } if v.Analyze { b.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl = execdetails.NewRuntimeStatsColl() - explainExec.analyzeExec = b.build(v.ExecPlan) + explainExec.analyzeExec = b.build(v.TargetPlan) } return explainExec } @@ -1996,6 +1996,7 @@ func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plannercore.PhysicalIn colLens: is.IdxColLens, idxPlans: v.IndexPlans, tblPlans: v.TablePlans, + PushedLimit: v.PushedLimit, } if containsLimit(indexReq.Executors) { diff --git a/executor/distsql.go b/executor/distsql.go index 2f14dd7f2d788..c162a5e8cf6eb 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -355,6 +355,8 @@ type IndexLookUpExecutor struct { corColInAccess bool idxCols []*expression.Column colLens []int + // PushedLimit is used to skip the preceding and tailing handles when Limit is sunk into IndexLookUpReader. + PushedLimit *plannercore.PushedDownLimit } type checkIndexValue struct { @@ -467,6 +469,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, kvRanges []k checkIndexValue: e.checkIndexValue, maxBatchSize: e.ctx.GetSessionVars().IndexLookupSize, maxChunkSize: e.maxChunkSize, + PushedLimit: e.PushedLimit, } if worker.batchSize > worker.maxBatchSize { worker.batchSize = worker.maxBatchSize @@ -484,9 +487,9 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, kvRanges []k } if e.runtimeStats != nil { copStats := e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.GetRootStats(e.idxPlans[len(e.idxPlans)-1].ExplainID().String()) - copStats.SetRowNum(count) + copStats.SetRowNum(int64(count)) copStats = e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.GetRootStats(e.tblPlans[0].ExplainID().String()) - copStats.SetRowNum(count) + copStats.SetRowNum(int64(count)) } e.ctx.StoreQueryFeedback(e.feedback) close(workCh) @@ -624,12 +627,14 @@ type indexWorker struct { // checkIndexValue is used to check the consistency of the index data. *checkIndexValue + // PushedLimit is used to skip the preceding and tailing handles when Limit is sunk into IndexLookUpReader. + PushedLimit *plannercore.PushedDownLimit } // fetchHandles fetches a batch of handles from index data and builds the index lookup tasks. // The tasks are sent to workCh to be further processed by tableWorker, and sent to e.resultCh // at the same time to keep data ordered. -func (w *indexWorker) fetchHandles(ctx context.Context, result distsql.SelectResult) (count int64, err error) { +func (w *indexWorker) fetchHandles(ctx context.Context, result distsql.SelectResult) (count uint64, err error) { defer func() { if r := recover(); r != nil { buf := make([]byte, 4096) @@ -654,7 +659,7 @@ func (w *indexWorker) fetchHandles(ctx context.Context, result distsql.SelectRes chk = chunk.NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}, w.idxLookup.maxChunkSize) } for { - handles, retChunk, err := w.extractTaskHandles(ctx, chk, result) + handles, retChunk, scannedKeys, err := w.extractTaskHandles(ctx, chk, result, count) if err != nil { doneCh := make(chan error, 1) doneCh <- err @@ -663,10 +668,10 @@ func (w *indexWorker) fetchHandles(ctx context.Context, result distsql.SelectRes } return count, err } + count += scannedKeys if len(handles) == 0 { return count, nil } - count += int64(len(handles)) task := w.buildTableTask(handles, retChunk) select { case <-ctx.Done(): @@ -679,20 +684,43 @@ func (w *indexWorker) fetchHandles(ctx context.Context, result distsql.SelectRes } } -func (w *indexWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, idxResult distsql.SelectResult) ( - handles []int64, retChk *chunk.Chunk, err error) { +func (w *indexWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, idxResult distsql.SelectResult, count uint64) ( + handles []int64, retChk *chunk.Chunk, scannedKeys uint64, err error) { handleOffset := chk.NumCols() - 1 handles = make([]int64, 0, w.batchSize) + // PushedLimit would always be nil for CheckIndex or CheckTable, we add this check just for insurance. + checkLimit := (w.PushedLimit != nil) && (w.checkIndexValue == nil) for len(handles) < w.batchSize { - chk.SetRequiredRows(w.batchSize-len(handles), w.maxChunkSize) + requiredRows := w.batchSize - len(handles) + if checkLimit { + if w.PushedLimit.Offset+w.PushedLimit.Count <= scannedKeys+count { + return handles, nil, scannedKeys, nil + } + leftCnt := w.PushedLimit.Offset + w.PushedLimit.Count - scannedKeys - count + if uint64(requiredRows) > leftCnt { + requiredRows = int(leftCnt) + } + } + chk.SetRequiredRows(requiredRows, w.maxChunkSize) err = errors.Trace(idxResult.Next(ctx, chk)) if err != nil { - return handles, nil, err + return handles, nil, scannedKeys, err } if chk.NumRows() == 0 { - return handles, retChk, nil + return handles, retChk, scannedKeys, nil } for i := 0; i < chk.NumRows(); i++ { + scannedKeys++ + if checkLimit { + if (count + scannedKeys) <= w.PushedLimit.Offset { + // Skip the preceding Offset handles. + continue + } + if (count + scannedKeys) > (w.PushedLimit.Offset + w.PushedLimit.Count) { + // Skip the handles after Offset+Count. + return handles, nil, scannedKeys, nil + } + } h := chk.GetRow(i).GetInt64(handleOffset) handles = append(handles, h) } @@ -707,7 +735,7 @@ func (w *indexWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, if w.batchSize > w.maxBatchSize { w.batchSize = w.maxBatchSize } - return handles, retChk, nil + return handles, retChk, scannedKeys, nil } func (w *indexWorker) buildTableTask(handles []int64, retChk *chunk.Chunk) *lookupTableTask { diff --git a/executor/distsql_test.go b/executor/distsql_test.go index 6ddf318d7f834..a4fcb5d877305 100644 --- a/executor/distsql_test.go +++ b/executor/distsql_test.go @@ -238,3 +238,18 @@ func (s *testSuite3) TestInconsistentIndex(c *C) { c.Assert(err, IsNil) } } + +func (s *testSuite3) TestPushLimitDownIndexLookUpReader(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists tbl") + tk.MustExec("create table tbl(a int, b int, c int, key idx_b_c(b,c))") + tk.MustExec("insert into tbl values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5)") + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 1 limit 2,1").Check(testkit.Rows("4 4 4")) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 4 limit 2,1").Check(testkit.Rows()) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 3 limit 2,1").Check(testkit.Rows()) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 2 limit 2,1").Check(testkit.Rows("5 5 5")) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 1 limit 1").Check(testkit.Rows("2 2 2")) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 1 order by b desc limit 2,1").Check(testkit.Rows("3 3 3")) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 1 and c > 1 limit 2,1").Check(testkit.Rows("4 4 4")) +} diff --git a/executor/executor.go b/executor/executor.go index 992d2dae7a20e..7aa3037300f6a 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -1385,52 +1385,56 @@ func extractStmtHintsFromStmtNode(stmtNode ast.StmtNode) []*ast.TableOptimizerHi } func handleStmtHints(hints []*ast.TableOptimizerHint) (stmtHints stmtctx.StmtHints, warns []error) { - var memoryQuotaHintList, noIndexMergeHintList, useToJAHintList, readReplicaHintList []*ast.TableOptimizerHint + if len(hints) == 0 { + return + } + var memoryQuotaHint, useToJAHint *ast.TableOptimizerHint + var memoryQuotaHintCnt, useToJAHintCnt, noIndexMergeHintCnt, readReplicaHintCnt int for _, hint := range hints { switch hint.HintName.L { case "memory_quota": - memoryQuotaHintList = append(memoryQuotaHintList, hint) - case "no_index_merge": - noIndexMergeHintList = append(noIndexMergeHintList, hint) + memoryQuotaHint = hint + memoryQuotaHintCnt++ case "use_toja": - useToJAHintList = append(useToJAHintList, hint) + useToJAHint = hint + useToJAHintCnt++ + case "no_index_merge": + noIndexMergeHintCnt++ case "read_consistent_replica": - readReplicaHintList = append(readReplicaHintList, hint) + readReplicaHintCnt++ } } // Handle MEMORY_QUOTA - if len(memoryQuotaHintList) != 0 { - if len(memoryQuotaHintList) > 1 { + if memoryQuotaHintCnt != 0 { + if memoryQuotaHintCnt > 1 { warn := errors.New("There are multiple MEMORY_QUOTA hints, only the last one will take effect") warns = append(warns, warn) } - hint := memoryQuotaHintList[len(memoryQuotaHintList)-1] // Executor use MemoryQuota <= 0 to indicate no memory limit, here use < 0 to handle hint syntax error. - if hint.MemoryQuota < 0 { + if memoryQuotaHint.MemoryQuota < 0 { warn := errors.New("The use of MEMORY_QUOTA hint is invalid, valid usage: MEMORY_QUOTA(10 MB) or MEMORY_QUOTA(10 GB)") warns = append(warns, warn) } else { stmtHints.HasMemQuotaHint = true - stmtHints.MemQuotaQuery = hint.MemoryQuota - if hint.MemoryQuota == 0 { + stmtHints.MemQuotaQuery = memoryQuotaHint.MemoryQuota + if memoryQuotaHint.MemoryQuota == 0 { warn := errors.New("Setting the MEMORY_QUOTA to 0 means no memory limit") warns = append(warns, warn) } } } // Handle USE_TOJA - if len(useToJAHintList) != 0 { - if len(useToJAHintList) > 1 { + if useToJAHintCnt != 0 { + if useToJAHintCnt > 1 { warn := errors.New("There are multiple USE_TOJA hints, only the last one will take effect") warns = append(warns, warn) } - hint := useToJAHintList[len(useToJAHintList)-1] stmtHints.HasAllowInSubqToJoinAndAggHint = true - stmtHints.AllowInSubqToJoinAndAgg = hint.HintFlag + stmtHints.AllowInSubqToJoinAndAgg = useToJAHint.HintFlag } // Handle NO_INDEX_MERGE - if len(noIndexMergeHintList) != 0 { - if len(noIndexMergeHintList) > 1 { + if noIndexMergeHintCnt != 0 { + if noIndexMergeHintCnt > 1 { warn := errors.New("There are multiple NO_INDEX_MERGE hints, only the last one will take effect") warns = append(warns, warn) } @@ -1438,8 +1442,8 @@ func handleStmtHints(hints []*ast.TableOptimizerHint) (stmtHints stmtctx.StmtHin stmtHints.EnableIndexMerge = false } // Handle READ_CONSISTENT_REPLICA - if len(readReplicaHintList) != 0 { - if len(readReplicaHintList) > 1 { + if readReplicaHintCnt != 0 { + if readReplicaHintCnt > 1 { warn := errors.New("There are multiple READ_CONSISTENT_REPLICA hints, only the last one will take effect") warns = append(warns, warn) } diff --git a/executor/prepared.go b/executor/prepared.go index adfd733b5af4f..76762c5ccd992 100644 --- a/executor/prepared.go +++ b/executor/prepared.go @@ -182,7 +182,10 @@ func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { param.InExecute = false } var p plannercore.Plan - p, err = plannercore.BuildLogicalPlan(ctx, e.ctx, stmt, e.is) + e.ctx.GetSessionVars().PlanID = 0 + e.ctx.GetSessionVars().PlanColumnID = 0 + destBuilder := plannercore.NewPlanBuilder(e.ctx, e.is, &plannercore.BlockHintProcessor{}) + p, err = destBuilder.Build(ctx, stmt) if err != nil { return err } @@ -195,7 +198,12 @@ func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { if e.name != "" { vars.PreparedStmtNameToID[e.name] = e.ID } - return vars.AddPreparedStmt(e.ID, prepared) + + preparedObj := &plannercore.CachedPrepareStmt{ + PreparedAst: prepared, + VisitInfos: destBuilder.GetVisitInfo(), + } + return vars.AddPreparedStmt(e.ID, preparedObj) } // ExecuteExec represents an EXECUTE executor. @@ -258,10 +266,16 @@ func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error { if !ok { return errors.Trace(plannercore.ErrStmtNotFound) } + preparedPointer := vars.PreparedStmts[id] + preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt) + if !ok { + return errors.Errorf("invalid CachedPrepareStmt type") + } + prepared := preparedObj.PreparedAst delete(vars.PreparedStmtNameToID, e.Name) if plannercore.PreparedPlanCacheEnabled() { e.ctx.PreparedPlanCache().Delete(plannercore.NewPSTMTPlanCacheKey( - vars, id, vars.PreparedStmts[id].SchemaVersion, + vars, id, prepared.SchemaVersion, )) } vars.RemovePreparedStmt(id) @@ -293,8 +307,12 @@ func CompileExecutePreparedStmt(ctx context.Context, sctx sessionctx.Context, Ctx: sctx, OutputNames: execPlan.OutputNames(), } - if prepared, ok := sctx.GetSessionVars().PreparedStmts[ID]; ok { - stmt.Text = prepared.Stmt.Text() + if preparedPointer, ok := sctx.GetSessionVars().PreparedStmts[ID]; ok { + preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt) + if !ok { + return nil, errors.Errorf("invalid CachedPrepareStmt type") + } + stmt.Text = preparedObj.PreparedAst.Stmt.Text() sctx.GetSessionVars().StmtCtx.OriginalSQL = stmt.Text } return stmt, nil @@ -308,8 +326,12 @@ func getPreparedStmt(stmt *ast.ExecuteStmt, vars *variable.SessionVars) (ast.Stm return nil, plannercore.ErrStmtNotFound } } - if prepared, ok := vars.PreparedStmts[execID]; ok { - return prepared.Stmt, nil + if preparedPointer, ok := vars.PreparedStmts[execID]; ok { + preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt) + if !ok { + return nil, errors.Errorf("invalid CachedPrepareStmt type") + } + return preparedObj.PreparedAst.Stmt, nil } return nil, plannercore.ErrStmtNotFound } diff --git a/executor/trace.go b/executor/trace.go index 91acb341928c5..41744820642b7 100644 --- a/executor/trace.go +++ b/executor/trace.go @@ -17,6 +17,7 @@ import ( "context" "encoding/json" "fmt" + "sort" "time" "github.com/opentracing/basictracer-go" @@ -199,6 +200,18 @@ func dfsTree(t *appdash.Trace, prefix string, isLast bool, chk *chunk.Chunk) { chk.AppendString(1, start.Format("15:04:05.000000")) chk.AppendString(2, duration.String()) + // Sort events by their start time + sort.Slice(t.Sub, func(i, j int) bool { + var istart, jstart time.Time + if ievent, err := t.Sub[i].TimespanEvent(); err == nil { + istart = ievent.Start() + } + if jevent, err := t.Sub[j].TimespanEvent(); err == nil { + jstart = jevent.Start() + } + return istart.Before(jstart) + }) + for i, sp := range t.Sub { dfsTree(sp, newPrefix, i == (len(t.Sub))-1 /*last element of array*/, chk) } diff --git a/executor/trace_test.go b/executor/trace_test.go index fb2edfa687958..70bdcc35aa056 100644 --- a/executor/trace_test.go +++ b/executor/trace_test.go @@ -40,11 +40,28 @@ func (s *testSuite1) TestTraceExec(c *C) { // +---------------------------+-----------------+------------+ rows = tk.MustQuery("trace format='row' select * from trace where id = 0;").Rows() c.Assert(len(rows) > 1, IsTrue) + c.Assert(rowsOrdered(rows), IsTrue) rows = tk.MustQuery("trace format='row' delete from trace where id = 0").Rows() c.Assert(len(rows) > 1, IsTrue) + c.Assert(rowsOrdered(rows), IsTrue) tk.MustExec("trace format='log' insert into trace (c1, c2, c3) values (1, 2, 3)") rows = tk.MustQuery("trace format='log' select * from trace where id = 0;").Rows() c.Assert(len(rows), GreaterEqual, 1) } + +func rowsOrdered(rows [][]interface{}) bool { + for idx := range rows { + if _, ok := rows[idx][1].(string); !ok { + return false + } + if idx == 0 { + continue + } + if rows[idx-1][1].(string) > rows[idx][1].(string) { + return false + } + } + return true +} diff --git a/executor/write.go b/executor/write.go index ca02907ae296e..cc2b05409385c 100644 --- a/executor/write.go +++ b/executor/write.go @@ -21,7 +21,6 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/expression" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" @@ -157,22 +156,7 @@ func updateRecord(ctx context.Context, sctx sessionctx.Context, h int64, oldData if sc.DupKeyAsWarning { newHandle, err = t.AddRecord(sctx, newData, table.IsUpdate, table.SkipHandleCheck, table.WithCtx(ctx)) } else { - txn, err1 := sctx.Txn(true) - if err1 != nil { - return false, false, 0, err1 - } - // If there are primary keys or unique indices, we have to check TiKV to ensure their uniqueness. - // The PresumeKeyNotExists option could delay the check to improve performance. - sessVars := sctx.GetSessionVars() - if !sessVars.ConstraintCheckInPlace { - // The purpose of adding the Autocommit and InTxn conditions here is for compatibility (older version TiDB behaviour). - // Remove the check should not affect correctness. - if sessVars.IsAutocommit() && !sessVars.InTxn() { - txn.SetOption(kv.PresumeKeyNotExists, nil) - } - } newHandle, err = t.AddRecord(sctx, newData, table.IsUpdate, table.WithCtx(ctx)) - txn.DelOption(kv.PresumeKeyNotExists) } if err != nil { diff --git a/executor/write_test.go b/executor/write_test.go index 0bf79e9d36545..1a4777a68c8fa 100644 --- a/executor/write_test.go +++ b/executor/write_test.go @@ -2451,11 +2451,17 @@ func (s *testSuite4) TestRebaseIfNeeded(c *C) { func (s *testSuite4) TestDeferConstraintCheckForInsert(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec(`use test`) + + tk.MustExec(`drop table if exists t;create table t (a int primary key, b int);`) + tk.MustExec(`insert into t values (1,2),(2,2)`) + _, err := tk.Exec("update t set a=a+1 where b=2") + c.Assert(err, NotNil) + tk.MustExec(`drop table if exists t;create table t (i int key);`) tk.MustExec(`insert t values (1);`) tk.MustExec(`set tidb_constraint_check_in_place = 1;`) tk.MustExec(`begin;`) - _, err := tk.Exec(`insert t values (1);`) + _, err = tk.Exec(`insert t values (1);`) c.Assert(err, NotNil) tk.MustExec(`update t set i = 2 where i = 1;`) tk.MustExec(`commit;`) diff --git a/expression/bench_test.go b/expression/bench_test.go index abd3eb5adb7b6..dca165e0e58ce 100644 --- a/expression/bench_test.go +++ b/expression/bench_test.go @@ -230,11 +230,14 @@ func (g *defaultGener) gen() interface{} { } return d case types.ETDatetime, types.ETTimestamp: - gt := types.FromDate(rand.Intn(2200), rand.Intn(10)+1, rand.Intn(20)+1, rand.Intn(12), rand.Intn(60), rand.Intn(60), rand.Intn(1000)) + gt := types.FromDate(rand.Intn(2200), rand.Intn(10)+1, rand.Intn(20)+1, rand.Intn(12), rand.Intn(60), rand.Intn(60), rand.Intn(1000000)) t := types.Time{Time: gt, Type: convertETType(g.eType)} return t case types.ETDuration: - d := types.Duration{Duration: time.Duration(rand.Int())} + d := types.Duration{ + // use rand.Int32() to make it not overflow when AddDuration + Duration: time.Duration(rand.Int31()), + } return d case types.ETJson: j := new(json.BinaryJSON) @@ -248,6 +251,25 @@ func (g *defaultGener) gen() interface{} { return nil } +// rangeRealGener is used to generate float64 items in [begin, end]. +type rangeRealGener struct { + begin float64 + end float64 + + nullRation float64 +} + +func (g *rangeRealGener) gen() interface{} { + if rand.Float64() < g.nullRation { + return nil + } + if g.end <= g.begin { + g.begin = -100 + g.end = 100 + } + return rand.Float64()*(g.end-g.begin) + g.begin +} + // rangeInt64Gener is used to generate int64 items in [begin, end). type rangeInt64Gener struct { begin int @@ -296,12 +318,21 @@ func (g *randDurInt) gen() interface{} { } type vecExprBenchCase struct { - retEvalType types.EvalType + // retEvalType is the EvalType of the expression result. + // This field is required. + retEvalType types.EvalType + // childrenTypes is the EvalTypes of the expression children(arguments). + // This field is required. childrenTypes []types.EvalType + // childrenFieldTypes is the field types of the expression children(arguments). + // If childrenFieldTypes is not set, it will be converted from childrenTypes. + // This field is optional. + childrenFieldTypes []*types.FieldType // geners are used to generate data for children and geners[i] generates data for children[i]. // If geners[i] is nil, the default dataGenerator will be used for its corresponding child. // The geners slice can be shorter than the children slice, if it has 3 children, then // geners[gen1, gen2] will be regarded as geners[gen1, gen2, nil]. + // This field is optional. geners []dataGenerator } @@ -368,7 +399,7 @@ func eType2FieldType(eType types.EvalType) *types.FieldType { case types.ETDecimal: return types.NewFieldType(mysql.TypeNewDecimal) case types.ETDatetime, types.ETTimestamp: - return types.NewFieldType(mysql.TypeDate) + return types.NewFieldType(mysql.TypeDatetime) case types.ETDuration: return types.NewFieldType(mysql.TypeDuration) case types.ETJson: @@ -380,10 +411,13 @@ func eType2FieldType(eType types.EvalType) *types.FieldType { } } -func genVecExprBenchCase(ctx sessionctx.Context, funcName string, testCase vecExprBenchCase) (expr Expression, input *chunk.Chunk, output *chunk.Chunk) { - fts := make([]*types.FieldType, len(testCase.childrenTypes)) - for i, eType := range testCase.childrenTypes { - fts[i] = eType2FieldType(eType) +func genVecExprBenchCase(ctx sessionctx.Context, funcName string, testCase vecExprBenchCase) (expr Expression, fts []*types.FieldType, input *chunk.Chunk, output *chunk.Chunk) { + fts = testCase.childrenFieldTypes + if fts == nil { + fts = make([]*types.FieldType, len(testCase.childrenTypes)) + for i, eType := range testCase.childrenTypes { + fts[i] = eType2FieldType(eType) + } } cols := make([]Expression, len(testCase.childrenTypes)) input = chunk.New(fts, 1024, 1024) @@ -398,7 +432,7 @@ func genVecExprBenchCase(ctx sessionctx.Context, funcName string, testCase vecEx } output = chunk.New([]*types.FieldType{eType2FieldType(testCase.retEvalType)}, 1024, 1024) - return expr, input, output + return expr, fts, input, output } // testVectorizedEvalOneVec is used to verify that the vectorized @@ -407,7 +441,10 @@ func testVectorizedEvalOneVec(c *C, vecExprCases vecExprBenchCases) { ctx := mock.NewContext() for funcName, testCases := range vecExprCases { for _, testCase := range testCases { - expr, input, output := genVecExprBenchCase(ctx, funcName, testCase) + expr, fts, input, output := genVecExprBenchCase(ctx, funcName, testCase) + commentf := func(row int) CommentInterface { + return Commentf("case %+v, row: %v, rowData: %v", testCase, row, input.GetRow(row).GetDatumRow(fts)) + } output2 := output.CopyConstruct() c.Assert(evalOneVec(ctx, expr, input, output, 0), IsNil) it := chunk.NewIterator4Chunk(input) @@ -417,31 +454,52 @@ func testVectorizedEvalOneVec(c *C, vecExprCases vecExprBenchCases) { switch testCase.retEvalType { case types.ETInt: for i := 0; i < input.NumRows(); i++ { - c.Assert(c1.IsNull(i) != c2.IsNull(i) || (!c1.IsNull(i) && c1.GetInt64(i) != c2.GetInt64(i)), IsFalse) + c.Assert(c1.IsNull(i), Equals, c2.IsNull(i), commentf(i)) + if !c1.IsNull(i) { + c.Assert(c1.GetInt64(i), Equals, c2.GetInt64(i), commentf(i)) + } } case types.ETReal: for i := 0; i < input.NumRows(); i++ { - c.Assert(c1.IsNull(i) != c2.IsNull(i) || (!c1.IsNull(i) && c1.GetFloat64(i) != c2.GetFloat64(i)), IsFalse) + c.Assert(c1.IsNull(i), Equals, c2.IsNull(i), commentf(i)) + if !c1.IsNull(i) { + c.Assert(c1.GetFloat64(i), Equals, c2.GetFloat64(i), commentf(i)) + } } case types.ETDecimal: for i := 0; i < input.NumRows(); i++ { - c.Assert(c1.IsNull(i) != c2.IsNull(i) || (!c1.IsNull(i) && c1.GetDecimal(i).Compare(c2.GetDecimal(i)) != 0), IsFalse) + c.Assert(c1.IsNull(i), Equals, c2.IsNull(i), commentf(i)) + if !c1.IsNull(i) { + c.Assert(c1.GetDecimal(i), DeepEquals, c2.GetDecimal(i), commentf(i)) + } } case types.ETDatetime, types.ETTimestamp: for i := 0; i < input.NumRows(); i++ { - c.Assert(c1.IsNull(i) != c2.IsNull(i) || (!c1.IsNull(i) && c1.GetTime(i).Compare(c2.GetTime(i)) != 0), IsFalse) + c.Assert(c1.IsNull(i), Equals, c2.IsNull(i), commentf(i)) + if !c1.IsNull(i) { + c.Assert(c1.GetTime(i), DeepEquals, c2.GetTime(i), commentf(i)) + } } case types.ETDuration: for i := 0; i < input.NumRows(); i++ { - c.Assert(c1.IsNull(i) != c2.IsNull(i) || (!c1.IsNull(i) && c1.GetDuration(i, 0) != c2.GetDuration(i, 0)), IsFalse) + c.Assert(c1.IsNull(i), Equals, c2.IsNull(i), commentf(i)) + if !c1.IsNull(i) { + c.Assert(c1.GetDuration(i, 0), Equals, c2.GetDuration(i, 0), commentf(i)) + } } case types.ETJson: for i := 0; i < input.NumRows(); i++ { - c.Assert(c1.IsNull(i) != c2.IsNull(i) || (!c1.IsNull(i) && c1.GetJSON(i).String() != c2.GetJSON(i).String()), IsFalse) + c.Assert(c1.IsNull(i), Equals, c2.IsNull(i), commentf(i)) + if !c1.IsNull(i) { + c.Assert(c1.GetJSON(i), DeepEquals, c2.GetJSON(i), commentf(i)) + } } case types.ETString: for i := 0; i < input.NumRows(); i++ { - c.Assert(c1.IsNull(i) != c2.IsNull(i) || (!c1.IsNull(i) && c1.GetString(i) != c2.GetString(i)), IsFalse) + c.Assert(c1.IsNull(i), Equals, c2.IsNull(i), commentf(i)) + if !c1.IsNull(i) { + c.Assert(c1.GetString(i), Equals, c2.GetString(i), commentf(i)) + } } } } @@ -454,7 +512,7 @@ func benchmarkVectorizedEvalOneVec(b *testing.B, vecExprCases vecExprBenchCases) ctx := mock.NewContext() for funcName, testCases := range vecExprCases { for _, testCase := range testCases { - expr, input, output := genVecExprBenchCase(ctx, funcName, testCase) + expr, _, input, output := genVecExprBenchCase(ctx, funcName, testCase) exprName := expr.String() if sf, ok := expr.(*ScalarFunction); ok { exprName = fmt.Sprintf("%v", reflect.TypeOf(sf.Function)) @@ -483,11 +541,14 @@ func benchmarkVectorizedEvalOneVec(b *testing.B, vecExprCases vecExprBenchCases) } } -func genVecBuiltinFuncBenchCase(ctx sessionctx.Context, funcName string, testCase vecExprBenchCase) (baseFunc builtinFunc, input *chunk.Chunk, result *chunk.Column) { +func genVecBuiltinFuncBenchCase(ctx sessionctx.Context, funcName string, testCase vecExprBenchCase) (baseFunc builtinFunc, fts []*types.FieldType, input *chunk.Chunk, result *chunk.Column) { childrenNumber := len(testCase.childrenTypes) - fts := make([]*types.FieldType, childrenNumber) - for i, eType := range testCase.childrenTypes { - fts[i] = eType2FieldType(eType) + fts = testCase.childrenFieldTypes + if fts == nil { + fts = make([]*types.FieldType, childrenNumber) + for i, eType := range testCase.childrenTypes { + fts[i] = eType2FieldType(eType) + } } cols := make([]Expression, childrenNumber) input = chunk.New(fts, 1024, 1024) @@ -524,7 +585,7 @@ func genVecBuiltinFuncBenchCase(ctx sessionctx.Context, funcName string, testCas panic(err) } result = chunk.NewColumn(eType2FieldType(testCase.retEvalType), 1024) - return baseFunc, input, result + return baseFunc, fts, input, result } // testVectorizedBuiltinFunc is used to verify that the vectorized @@ -539,7 +600,7 @@ func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { for funcName, testCases := range vecExprCases { for _, testCase := range testCases { ctx := mock.NewContext() - baseFunc, input, output := genVecBuiltinFuncBenchCase(ctx, funcName, testCase) + baseFunc, fts, input, output := genVecBuiltinFuncBenchCase(ctx, funcName, testCase) baseFuncName := fmt.Sprintf("%v", reflect.TypeOf(baseFunc)) tmp := strings.Split(baseFuncName, ".") baseFuncName = tmp[len(tmp)-1] @@ -547,7 +608,9 @@ func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { if !testAll && testFunc[baseFuncName] != true { continue } - + commentf := func(row int) CommentInterface { + return Commentf("case %+v, row: %v, rowData: %v", testCase, row, input.GetRow(row).GetDatumRow(fts)) + } it := chunk.NewIterator4Chunk(input) i := 0 var vecWarnCnt uint16 @@ -562,7 +625,7 @@ func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { c.Assert(err, IsNil) c.Assert(isNull, Equals, output.IsNull(i)) if !isNull { - c.Assert(val, Equals, i64s[i]) + c.Assert(val, Equals, i64s[i], commentf(i)) } i++ } @@ -576,7 +639,7 @@ func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { c.Assert(err, IsNil) c.Assert(isNull, Equals, output.IsNull(i)) if !isNull { - c.Assert(val, Equals, f64s[i]) + c.Assert(val, Equals, f64s[i], commentf(i)) } i++ } @@ -590,7 +653,7 @@ func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { c.Assert(err, IsNil) c.Assert(isNull, Equals, output.IsNull(i)) if !isNull { - c.Assert(*val, Equals, d64s[i]) + c.Assert(*val, Equals, d64s[i], commentf(i)) } i++ } @@ -604,7 +667,7 @@ func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { c.Assert(err, IsNil) c.Assert(isNull, Equals, output.IsNull(i)) if !isNull { - c.Assert(val, Equals, t64s[i]) + c.Assert(val, Equals, t64s[i], commentf(i)) } i++ } @@ -618,7 +681,7 @@ func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { c.Assert(err, IsNil) c.Assert(isNull, Equals, output.IsNull(i)) if !isNull { - c.Assert(val.Duration, Equals, d64s[i]) + c.Assert(val.Duration, Equals, d64s[i], commentf(i)) } i++ } @@ -633,7 +696,7 @@ func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { if !isNull { var cmp int cmp = json.CompareBinary(val, output.GetJSON(i)) - c.Assert(cmp, Equals, 0) + c.Assert(cmp, Equals, 0, commentf(i)) } i++ } @@ -646,7 +709,7 @@ func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { c.Assert(err, IsNil) c.Assert(isNull, Equals, output.IsNull(i)) if !isNull { - c.Assert(val, Equals, output.GetString(i)) + c.Assert(val, Equals, output.GetString(i), commentf(i)) } i++ } @@ -677,7 +740,7 @@ func benchmarkVectorizedBuiltinFunc(b *testing.B, vecExprCases vecExprBenchCases } for funcName, testCases := range vecExprCases { for _, testCase := range testCases { - baseFunc, input, output := genVecBuiltinFuncBenchCase(ctx, funcName, testCase) + baseFunc, _, input, output := genVecBuiltinFuncBenchCase(ctx, funcName, testCase) baseFuncName := fmt.Sprintf("%v", reflect.TypeOf(baseFunc)) tmp := strings.Split(baseFuncName, ".") baseFuncName = tmp[len(tmp)-1] diff --git a/expression/builtin.go b/expression/builtin.go index c4bda95176292..6839b3d7241a6 100644 --- a/expression/builtin.go +++ b/expression/builtin.go @@ -533,7 +533,6 @@ var funcs = map[string]functionClass{ ast.Year: &yearFunctionClass{baseFunctionClass{ast.Year, 1, 1}}, ast.YearWeek: &yearWeekFunctionClass{baseFunctionClass{ast.YearWeek, 1, 2}}, ast.LastDay: &lastDayFunctionClass{baseFunctionClass{ast.LastDay, 1, 1}}, - ast.TiDBParseTso: &tidbParseTsoFunctionClass{baseFunctionClass{ast.TiDBParseTso, 1, 1}}, // string functions ast.ASCII: &asciiFunctionClass{baseFunctionClass{ast.ASCII, 1, 1}}, @@ -605,9 +604,6 @@ var funcs = map[string]functionClass{ ast.RowCount: &rowCountFunctionClass{baseFunctionClass{ast.RowCount, 0, 0}}, ast.SessionUser: &userFunctionClass{baseFunctionClass{ast.SessionUser, 0, 0}}, ast.SystemUser: &userFunctionClass{baseFunctionClass{ast.SystemUser, 0, 0}}, - // This function is used to show tidb-server version info. - ast.TiDBVersion: &tidbVersionFunctionClass{baseFunctionClass{ast.TiDBVersion, 0, 0}}, - ast.TiDBIsDDLOwner: &tidbIsDDLOwnerFunctionClass{baseFunctionClass{ast.TiDBIsDDLOwner, 0, 0}}, // control functions ast.If: &ifFunctionClass{baseFunctionClass{ast.If, 3, 3}}, @@ -719,4 +715,11 @@ var funcs = map[string]functionClass{ ast.JSONDepth: &jsonDepthFunctionClass{baseFunctionClass{ast.JSONDepth, 1, 1}}, ast.JSONKeys: &jsonKeysFunctionClass{baseFunctionClass{ast.JSONKeys, 1, 2}}, ast.JSONLength: &jsonLengthFunctionClass{baseFunctionClass{ast.JSONLength, 1, 2}}, + + // TiDB internal function. + ast.TiDBDecodeKey: &tidbDecodeKeyFunctionClass{baseFunctionClass{ast.TiDBDecodeKey, 1, 1}}, + // This function is used to show tidb-server version info. + ast.TiDBVersion: &tidbVersionFunctionClass{baseFunctionClass{ast.TiDBVersion, 0, 0}}, + ast.TiDBIsDDLOwner: &tidbIsDDLOwnerFunctionClass{baseFunctionClass{ast.TiDBIsDDLOwner, 0, 0}}, + ast.TiDBParseTso: &tidbParseTsoFunctionClass{baseFunctionClass{ast.TiDBParseTso, 1, 1}}, } diff --git a/expression/builtin_cast.go b/expression/builtin_cast.go index 45e43e341c1da..4c16f1b6b2b96 100644 --- a/expression/builtin_cast.go +++ b/expression/builtin_cast.go @@ -1776,6 +1776,9 @@ func WrapWithCastAsDecimal(ctx sessionctx.Context, expr Expression) Expression { } tp := types.NewFieldType(mysql.TypeNewDecimal) tp.Flen, tp.Decimal = expr.GetType().Flen, expr.GetType().Decimal + if expr.GetType().EvalType() == types.ETInt { + tp.Flen = mysql.MaxIntWidth + } types.SetBinChsClnFlag(tp) tp.Flag |= expr.GetType().Flag & mysql.UnsignedFlag return BuildCastFunction(ctx, expr, tp) diff --git a/expression/builtin_cast_vec.go b/expression/builtin_cast_vec.go index 1308db703e32e..065f3ccec51bd 100644 --- a/expression/builtin_cast_vec.go +++ b/expression/builtin_cast_vec.go @@ -118,3 +118,28 @@ func (b *builtinCastIntAsRealSig) vecEvalReal(input *chunk.Chunk, result *chunk. func (b *builtinCastIntAsRealSig) vectorized() bool { return true } + +func (b *builtinCastRealAsRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + n := input.NumRows() + f64s := result.Float64s() + conditionUnionAndUnsigned := b.inUnion && mysql.HasUnsignedFlag(b.tp.Flag) + if !conditionUnionAndUnsigned { + return nil + } + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + if f64s[i] < 0 { + f64s[i] = 0 + } + } + return nil +} + +func (b *builtinCastRealAsRealSig) vectorized() bool { + return true +} diff --git a/expression/builtin_cast_vec_test.go b/expression/builtin_cast_vec_test.go index 0093f2b0a24fd..c143724c10c82 100644 --- a/expression/builtin_cast_vec_test.go +++ b/expression/builtin_cast_vec_test.go @@ -23,9 +23,10 @@ import ( var vecBuiltinCastCases = map[string][]vecExprBenchCase{ ast.Cast: { - {types.ETInt, []types.EvalType{types.ETInt}, nil}, - {types.ETReal, []types.EvalType{types.ETInt}, nil}, - {types.ETDuration, []types.EvalType{types.ETInt}, []dataGenerator{new(randDurInt)}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt}}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETInt}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETInt}, geners: []dataGenerator{new(randDurInt)}}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, }, } diff --git a/expression/builtin_compare_vec.go b/expression/builtin_compare_vec.go index 9ab360a822ca7..db0b0273f6f5b 100644 --- a/expression/builtin_compare_vec.go +++ b/expression/builtin_compare_vec.go @@ -90,3 +90,73 @@ func (b *builtinLeastDecimalSig) vecEvalDecimal(input *chunk.Chunk, result *chun func (b *builtinLeastDecimalSig) vectorized() bool { return true } + +func (b *builtinLeastIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + + i64s := result.Int64s() + for j := 1; j < len(b.args); j++ { + if err := b.args[j].VecEvalInt(b.ctx, input, buf); err != nil { + return err + } + + result.MergeNulls(buf) + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + v := buf.GetInt64(i) + if v < i64s[i] { + i64s[i] = v + } + } + } + return nil +} + +func (b *builtinLeastIntSig) vectorized() bool { + return true +} + +func (b *builtinGreatestIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + + i64s := result.Int64s() + for j := 1; j < len(b.args); j++ { + if err := b.args[j].VecEvalInt(b.ctx, input, buf); err != nil { + return err + } + + result.MergeNulls(buf) + v := buf.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + if v[i] > i64s[i] { + i64s[i] = v[i] + } + } + } + return nil +} + +func (b *builtinGreatestIntSig) vectorized() bool { + return true +} diff --git a/expression/builtin_compare_vec_test.go b/expression/builtin_compare_vec_test.go index 52c83b37328e1..0469208a84080 100644 --- a/expression/builtin_compare_vec_test.go +++ b/expression/builtin_compare_vec_test.go @@ -23,10 +23,12 @@ import ( var vecBuiltinCompareCases = map[string][]vecExprBenchCase{ ast.Greatest: { - {types.ETDecimal, []types.EvalType{types.ETDecimal, types.ETDecimal, types.ETDecimal}, nil}, + {retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETDecimal, types.ETDecimal, types.ETDecimal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETInt}}, }, ast.Least: { - {types.ETDecimal, []types.EvalType{types.ETDecimal, types.ETDecimal, types.ETDecimal}, nil}, + {retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETDecimal, types.ETDecimal, types.ETDecimal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETInt}}, }, } diff --git a/expression/builtin_control_vec_generated_test.go b/expression/builtin_control_vec_generated_test.go index d01a8138bd2ce..a0ef740624482 100644 --- a/expression/builtin_control_vec_generated_test.go +++ b/expression/builtin_control_vec_generated_test.go @@ -26,19 +26,19 @@ import ( var vecBuiltinControlCases = map[string][]vecExprBenchCase{ ast.If: { - {types.ETInt, []types.EvalType{types.ETInt, types.ETInt, types.ETInt}, nil}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETInt}}, - {types.ETReal, []types.EvalType{types.ETInt, types.ETReal, types.ETReal}, nil}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETReal}}, - {types.ETDecimal, []types.EvalType{types.ETInt, types.ETDecimal, types.ETDecimal}, nil}, + {retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETDecimal}}, - {types.ETString, []types.EvalType{types.ETInt, types.ETString, types.ETString}, nil}, + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}}, - {types.ETDatetime, []types.EvalType{types.ETInt, types.ETDatetime, types.ETDatetime}, nil}, + {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETInt, types.ETDatetime, types.ETDatetime}}, - {types.ETDuration, []types.EvalType{types.ETInt, types.ETDuration, types.ETDuration}, nil}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETInt, types.ETDuration, types.ETDuration}}, - {types.ETJson, []types.EvalType{types.ETInt, types.ETJson, types.ETJson}, nil}, + {retEvalType: types.ETJson, childrenTypes: []types.EvalType{types.ETInt, types.ETJson, types.ETJson}}, }, } diff --git a/expression/builtin_info.go b/expression/builtin_info.go index 2e6ab91c6b5c5..427250d39039c 100644 --- a/expression/builtin_info.go +++ b/expression/builtin_info.go @@ -18,13 +18,18 @@ package expression import ( + "encoding/hex" + "fmt" "sort" + "strconv" "github.com/pingcap/errors" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/printer" ) @@ -44,6 +49,7 @@ var ( _ functionClass = &rowCountFunctionClass{} _ functionClass = &tidbVersionFunctionClass{} _ functionClass = &tidbIsDDLOwnerFunctionClass{} + _ functionClass = &tidbDecodeKeyFunctionClass{} ) var ( @@ -57,6 +63,7 @@ var ( _ builtinFunc = &builtinVersionSig{} _ builtinFunc = &builtinTiDBVersionSig{} _ builtinFunc = &builtinRowCountSig{} + _ builtinFunc = &builtinTiDBDecodeKeySig{} ) type databaseFunctionClass struct { @@ -589,3 +596,63 @@ func (b *builtinRowCountSig) evalInt(_ chunk.Row) (res int64, isNull bool, err e res = int64(b.ctx.GetSessionVars().StmtCtx.PrevAffectedRows) return res, false, nil } + +type tidbDecodeKeyFunctionClass struct { + baseFunctionClass +} + +func (c *tidbDecodeKeyFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETString, types.ETString) + sig := &builtinTiDBDecodeKeySig{bf} + return sig, nil +} + +type builtinTiDBDecodeKeySig struct { + baseBuiltinFunc +} + +func (b *builtinTiDBDecodeKeySig) Clone() builtinFunc { + newSig := &builtinTiDBDecodeKeySig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +// evalInt evals a builtinTiDBIsDDLOwnerSig. +func (b *builtinTiDBDecodeKeySig) evalString(row chunk.Row) (string, bool, error) { + s, isNull, err := b.args[0].EvalString(b.ctx, row) + if isNull || err != nil { + return "", isNull, err + } + return decodeKey(b.ctx, s), false, nil +} + +func decodeKey(ctx sessionctx.Context, s string) string { + key, err := hex.DecodeString(s) + if err != nil { + ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("invalid record/index key: %X", key)) + return s + } + // Auto decode byte if needed. + _, bs, err := codec.DecodeBytes([]byte(key), nil) + if err == nil { + key = bs + } + // Try to decode it as a record key. + tableID, handle, err := tablecodec.DecodeRecordKey(key) + if err == nil { + return "tableID=" + strconv.FormatInt(tableID, 10) + ", _tidb_rowid=" + strconv.FormatInt(handle, 10) + } + // Try decode as table index key. + tableID, indexID, indexValues, err := tablecodec.DecodeIndexKeyPrefix(key) + if err == nil { + idxValueStr := fmt.Sprintf("%X", indexValues) + return "tableID=" + strconv.FormatInt(tableID, 10) + ", indexID=" + strconv.FormatInt(indexID, 10) + ", indexValues=" + idxValueStr + } + + // TODO: try to decode other type key. + ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("invalid record/index key: %X", key)) + return s +} diff --git a/expression/builtin_math_vec.go b/expression/builtin_math_vec.go index c9654e465f3de..6f7790d2ca08f 100644 --- a/expression/builtin_math_vec.go +++ b/expression/builtin_math_vec.go @@ -14,12 +14,36 @@ package expression import ( + "fmt" "math" + "strconv" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" ) +func (b *builtinLog1ArgSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + f64s := result.Float64s() + for i := 0; i < len(f64s); i++ { + if result.IsNull(i) { + continue + } + if f64s[i] <= 0 { + result.SetNull(i, true) + } else { + f64s[i] = math.Log(f64s[i]) + } + } + return nil +} + +func (b *builtinLog1ArgSig) vectorized() bool { + return true +} + func (b *builtinLog2Sig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { return err @@ -199,6 +223,113 @@ func (b *builtinCosSig) vectorized() bool { return true } +func (b *builtinCotSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + f64s := result.Float64s() + for i := 0; i < len(f64s); i++ { + if result.IsNull(i) { + continue + } + tan := math.Tan(f64s[i]) + if tan != 0 { + cot := 1 / tan + if !math.IsInf(cot, 0) && !math.IsNaN(cot) { + f64s[i] = cot + } + continue + } + if err := types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("cot(%s)", strconv.FormatFloat(f64s[i], 'f', -1, 64))); err != nil { + return err + } + } + return nil +} + +func (b *builtinCotSig) vectorized() bool { + return true +} + +func (b *builtinDegreesSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + f64s := result.Float64s() + for i := 0; i < len(f64s); i++ { + if result.IsNull(i) { + continue + } + f64s[i] = f64s[i] * 180 / math.Pi + } + return nil +} + +func (b *builtinDegreesSig) vectorized() bool { + return true +} + +func (b *builtinExpSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + f64s := result.Float64s() + for i := 0; i < len(f64s); i++ { + if result.IsNull(i) { + continue + } + exp := math.Exp(f64s[i]) + if math.IsInf(exp, 0) || math.IsNaN(exp) { + s := fmt.Sprintf("exp(%s)", b.args[0].String()) + if err := types.ErrOverflow.GenWithStackByArgs("DOUBLE", s); err != nil { + return err + } + } + f64s[i] = exp + } + return nil +} + +func (b *builtinExpSig) vectorized() bool { + return true +} + +func (b *builtinRadiansSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + f64s := result.Float64s() + for i := 0; i < len(f64s); i++ { + if result.IsNull(i) { + continue + } + f64s[i] = f64s[i] * math.Pi / 180 + } + return nil +} + +func (b *builtinRadiansSig) vectorized() bool { + return true +} + +func (b *builtinSinSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + f64s := result.Float64s() + for i := 0; i < len(f64s); i++ { + if result.IsNull(i) { + continue + } + f64s[i] = math.Sin(f64s[i]) + } + return nil +} + +func (b *builtinSinSig) vectorized() bool { + return true +} + func (b *builtinTanSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { return err @@ -263,3 +394,146 @@ func (b *builtinRoundDecSig) vecEvalDecimal(input *chunk.Chunk, result *chunk.Co func (b *builtinRoundDecSig) vectorized() bool { return true } + +func (b *builtinPowSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf1, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[0].VecEvalReal(b.ctx, input, buf1); err != nil { + return err + } + + if err := b.args[1].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + + x := buf1.Float64s() + y := result.Float64s() + result.MergeNulls(buf1) + f64s := result.Float64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + power := math.Pow(x[i], y[i]) + if math.IsInf(power, -1) || math.IsInf(power, 1) || math.IsNaN(power) { + return types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("pow(%s, %s)", strconv.FormatFloat(x[i], 'f', -1, 64), strconv.FormatFloat(y[i], 'f', -1, 64))) + } + f64s[i] = power + } + return nil +} + +func (b *builtinPowSig) vectorized() bool { + return true +} + +func (b *builtinTruncateRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + n := input.NumRows() + buf1, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil { + return err + } + + result.MergeNulls(buf1) + x := result.Float64s() + d := buf1.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + x[i] = types.Truncate(x[i], int(d[i])) + } + return nil +} + +func (b *builtinTruncateRealSig) vectorized() bool { + return true +} + +func (b *builtinAbsRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + f64s := result.Float64s() + for i := 0; i < len(f64s); i++ { + f64s[i] = math.Abs(f64s[i]) + } + return nil +} + +func (b *builtinAbsRealSig) vectorized() bool { + return true +} + +func (b *builtinAbsIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + i64s := result.Int64s() + for i := 0; i < len(i64s); i++ { + if result.IsNull(i) { + continue + } + if i64s[i] == math.MinInt64 { + return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("abs(%d)", i64s[i])) + } + if i64s[i] < 0 { + i64s[i] = -i64s[i] + } + } + return nil +} + +func (b *builtinAbsIntSig) vectorized() bool { + return true +} + +func (b *builtinRoundIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + return b.args[0].VecEvalInt(b.ctx, input, result) +} + +func (b *builtinRoundIntSig) vectorized() bool { + return true +} + +func (b *builtinRoundWithFracIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil { + return err + } + + i64s := result.Int64s() + frac := buf.Int64s() + result.MergeNulls(buf) + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + i64s[i] = int64(types.Round(float64(i64s[i]), int(frac[i]))) + } + return nil +} + +func (b *builtinRoundWithFracIntSig) vectorized() bool { + return true +} diff --git a/expression/builtin_math_vec_test.go b/expression/builtin_math_vec_test.go index 3febc6fe8ecff..7a5093e27ba7e 100644 --- a/expression/builtin_math_vec_test.go +++ b/expression/builtin_math_vec_test.go @@ -22,38 +22,66 @@ import ( ) var vecBuiltinMathCases = map[string][]vecExprBenchCase{ + ast.Log: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, + }, ast.Log10: { - {types.ETReal, []types.EvalType{types.ETReal}, nil}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, }, ast.Log2: { - {types.ETReal, []types.EvalType{types.ETReal}, nil}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, }, ast.Sqrt: { - {types.ETReal, []types.EvalType{types.ETReal}, nil}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, }, ast.Acos: { - {types.ETReal, []types.EvalType{types.ETReal}, nil}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, }, ast.Asin: { - {types.ETReal, []types.EvalType{types.ETReal}, nil}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, }, ast.Atan: { - {types.ETReal, []types.EvalType{types.ETReal}, nil}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, }, ast.Atan2: { - {types.ETReal, []types.EvalType{types.ETReal, types.ETReal}, nil}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, }, ast.Cos: { - {types.ETReal, []types.EvalType{types.ETReal}, nil}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, + }, + ast.Exp: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, + }, + ast.Degrees: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, + }, + ast.Cot: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, + }, + ast.Radians: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, + }, + ast.Sin: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, }, ast.Tan: { - {types.ETReal, []types.EvalType{types.ETReal}, nil}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, }, ast.Abs: { - {types.ETDecimal, []types.EvalType{types.ETDecimal}, nil}, + {retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETDecimal}}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt}}, }, ast.Round: { - {types.ETDecimal, []types.EvalType{types.ETDecimal}, nil}, + {retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETDecimal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, geners: []dataGenerator{nil, &rangeInt64Gener{-100, 100}}}, + }, + ast.Pow: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}, geners: []dataGenerator{&rangeRealGener{0, 10, 0.5}, &rangeRealGener{0, 100, 0.5}}}, + }, + ast.Truncate: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal, types.ETInt}, geners: []dataGenerator{nil, &rangeInt64Gener{-10, 10}}}, }, } diff --git a/expression/builtin_miscellaneous_vec_test.go b/expression/builtin_miscellaneous_vec_test.go index 7a1d93c7f4e4f..afe748a1c97f3 100644 --- a/expression/builtin_miscellaneous_vec_test.go +++ b/expression/builtin_miscellaneous_vec_test.go @@ -23,7 +23,7 @@ import ( var vecBuiltinMiscellaneousCases = map[string][]vecExprBenchCase{ ast.InetNtoa: { - {types.ETString, []types.EvalType{types.ETInt}, nil}, + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt}}, }, } diff --git a/expression/builtin_string_vec.go b/expression/builtin_string_vec.go index eab0e82f1bc18..0f277f21b9ea5 100644 --- a/expression/builtin_string_vec.go +++ b/expression/builtin_string_vec.go @@ -18,6 +18,7 @@ import ( "strings" "unicode/utf8" + "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" ) @@ -164,3 +165,136 @@ Loop: func (b *builtinUpperSig) vectorized() bool { return true } + +func (b *builtinLeftSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil { + return err + } + + buf2, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf2) + if err := b.args[1].VecEvalInt(b.ctx, input, buf2); err != nil { + return err + } + + result.ReserveString(n) + nums := buf2.Int64s() + for i := 0; i < n; i++ { + if buf.IsNull(i) || buf2.IsNull(i) { + result.AppendNull() + continue + } + + str := buf.GetString(i) + runes, leftLength := []rune(str), int(nums[i]) + if runeLength := len(runes); leftLength > runeLength { + leftLength = runeLength + } else if leftLength < 0 { + leftLength = 0 + } + + result.AppendString(string(runes[:leftLength])) + } + return nil +} + +func (b *builtinLeftSig) vectorized() bool { + return true +} + +func (b *builtinRightSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil { + return err + } + + buf2, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf2) + if err := b.args[1].VecEvalInt(b.ctx, input, buf2); err != nil { + return err + } + + result.ReserveString(n) + nums := buf2.Int64s() + for i := 0; i < n; i++ { + if buf.IsNull(i) || buf2.IsNull(i) { + result.AppendNull() + continue + } + + str := buf.GetString(i) + runes := []rune(str) + strLength, rightLength := len(runes), int(nums[i]) + if rightLength > strLength { + rightLength = strLength + } else if rightLength < 0 { + rightLength = 0 + } + + result.AppendString(string(runes[strLength-rightLength:])) + } + return nil +} + +func (b *builtinRightSig) vectorized() bool { + return true +} + +// vecEvalString evals a builtinSpaceSig. +// See https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_space +func (b *builtinSpaceSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalInt(b.ctx, input, buf); err != nil { + return err + } + + result.ReserveString(n) + nums := buf.Int64s() + for i := 0; i < n; i++ { + if buf.IsNull(i) { + result.AppendNull() + continue + } + num := nums[i] + if num < 0 { + num = 0 + } + if uint64(num) > b.maxAllowedPacket { + b.ctx.GetSessionVars().StmtCtx.AppendWarning(errWarnAllowedPacketOverflowed.GenWithStackByArgs("space", b.maxAllowedPacket)) + result.AppendNull() + continue + } + if num > mysql.MaxBlobWidth { + result.AppendNull() + continue + } + result.AppendString(strings.Repeat(" ", int(num))) + } + return nil +} + +func (b *builtinSpaceSig) vectorized() bool { + return true +} diff --git a/expression/builtin_string_vec_test.go b/expression/builtin_string_vec_test.go index 38bd6c49bc96d..29d18e675d763 100644 --- a/expression/builtin_string_vec_test.go +++ b/expression/builtin_string_vec_test.go @@ -23,17 +23,27 @@ import ( var vecBuiltinStringCases = map[string][]vecExprBenchCase{ ast.Repeat: { - {types.ETString, []types.EvalType{types.ETString, types.ETInt}, []dataGenerator{&randLenStrGener{10, 20}, &rangeInt64Gener{-10, 10}}}, + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt}, geners: []dataGenerator{&randLenStrGener{10, 20}, &rangeInt64Gener{-10, 10}}}, }, ast.Lower: { - {types.ETString, []types.EvalType{types.ETString}, nil}, + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}}, }, ast.IsNull: { - {types.ETInt, []types.EvalType{types.ETString}, []dataGenerator{&randLenStrGener{10, 20}}}, - {types.ETInt, []types.EvalType{types.ETString}, []dataGenerator{&defaultGener{0.2, types.ETString}}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&randLenStrGener{10, 20}}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&defaultGener{0.2, types.ETString}}}, }, ast.Upper: { - {types.ETString, []types.EvalType{types.ETString}, nil}, + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}}, + }, + ast.Right: { + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt}}, + }, + ast.Left: { + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt}}, + }, + ast.Space: { + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt}, geners: []dataGenerator{&rangeInt64Gener{-10, 2000}}}, + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt}, geners: []dataGenerator{&rangeInt64Gener{5, 10}}}, }, } diff --git a/expression/builtin_time.go b/expression/builtin_time.go index 388372d76d76b..8e82d46408774 100644 --- a/expression/builtin_time.go +++ b/expression/builtin_time.go @@ -2168,7 +2168,7 @@ func (b *builtinTimeSig) evalDuration(row chunk.Row) (res types.Duration, isNull fsp = len(expr) - idx - 1 } - tmpFsp := int8(0) + var tmpFsp int8 if tmpFsp, err = types.CheckFsp(fsp); err != nil { return res, isNull, err } diff --git a/expression/builtin_time_vec.go b/expression/builtin_time_vec.go index 6409a06e84da7..297514ddbaa9a 100644 --- a/expression/builtin_time_vec.go +++ b/expression/builtin_time_vec.go @@ -58,6 +58,45 @@ func (b *builtinMonthSig) vectorized() bool { return true } +func (b *builtinYearSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETDatetime, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalTime(b.ctx, input, buf); err != nil { + return err + } + + result.ResizeInt64(n, false) + result.MergeNulls(buf) + i64s := result.Int64s() + ds := buf.Times() + for i := 0; i < input.NumRows(); i++ { + if result.IsNull(i) { + continue + } + if ds[i].IsZero() { + if b.ctx.GetSessionVars().SQLMode.HasNoZeroDateMode() { + if err := handleInvalidTimeError(b.ctx, types.ErrIncorrectDatetimeValue.GenWithStackByArgs(ds[i].String())); err != nil { + return err + } + result.SetNull(i, true) + continue + } + i64s[i] = 0 + continue + } + i64s[i] = int64(ds[i].Time.Year()) + } + return nil +} + +func (b *builtinYearSig) vectorized() bool { + return true +} + func (b *builtinDateSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { if err := b.args[0].VecEvalTime(b.ctx, input, result); err != nil { return err diff --git a/expression/builtin_time_vec_test.go b/expression/builtin_time_vec_test.go index 8587649d7add9..fc58f65173fdb 100644 --- a/expression/builtin_time_vec_test.go +++ b/expression/builtin_time_vec_test.go @@ -26,10 +26,13 @@ import ( var vecBuiltinTimeCases = map[string][]vecExprBenchCase{ ast.Month: { - {types.ETInt, []types.EvalType{types.ETDatetime}, nil}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETDatetime}}, + }, + ast.Year: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETDatetime}}, }, ast.Date: { - {types.ETDatetime, []types.EvalType{types.ETDatetime}, nil}, + {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETDatetime}}, }, } @@ -58,7 +61,7 @@ func (s *testEvaluatorSuite) TestVecMonth(c *C) { input.AppendNull(0) input.AppendTime(0, types.ZeroDate) - f, _, result := genVecBuiltinFuncBenchCase(ctx, ast.Month, vecExprBenchCase{types.ETInt, []types.EvalType{types.ETDatetime}, nil}) + f, _, _, result := genVecBuiltinFuncBenchCase(ctx, ast.Month, vecExprBenchCase{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETDatetime}}) c.Assert(ctx.GetSessionVars().StrictSQLMode, IsTrue) c.Assert(f.vecEvalInt(input, result), IsNil) c.Assert(len(ctx.GetSessionVars().StmtCtx.GetWarnings()), Equals, 2) diff --git a/expression/generator/control_vec.go b/expression/generator/control_vec.go index 73abcca75d2f1..1bf4b24181b12 100644 --- a/expression/generator/control_vec.go +++ b/expression/generator/control_vec.go @@ -147,7 +147,7 @@ import ( var vecBuiltinControlCases = map[string][]vecExprBenchCase{ ast.If: { {{ range . }} - {types.ET{{ .ETName }}, []types.EvalType{types.ETInt, types.ET{{ .ETName }}, types.ET{{ .ETName }}}, nil}, + {retEvalType: types.ET{{ .ETName }}, childrenTypes: []types.EvalType{types.ETInt, types.ET{{ .ETName }}, types.ET{{ .ETName }}}}, {{ end }} }, } diff --git a/expression/integration_test.go b/expression/integration_test.go index 04c530ba3ccb1..8d7f2be02788f 100755 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -4123,6 +4123,23 @@ func (s *testIntegrationSuite) testTiDBIsOwnerFunc(c *C) { result.Check(testkit.Rows(fmt.Sprintf("%v", ret))) } +func (s *testIntegrationSuite) TestTiDBInternalFunc(c *C) { + tk := testkit.NewTestKit(c, s.store) + defer s.cleanEnv(c) + result := tk.MustQuery("select tidb_decode_key( '74800000000000002B5F72800000000000A5D3' )") + result.Check(testkit.Rows("tableID=43, _tidb_rowid=42451")) + + result = tk.MustQuery("select tidb_decode_key( '74800000000000019B5F698000000000000001015257303100000000FB013736383232313130FF3900000000000000F8010000000000000000F7' )") + result.Check(testkit.Rows("tableID=411, indexID=1, indexValues=015257303100000000FB013736383232313130FF3900000000000000F8010000000000000000F7")) + + // Test invalid record/index key. + result = tk.MustQuery("select tidb_decode_key( '7480000000000000FF2E5F728000000011FFE1A3000000000000' )") + result.Check(testkit.Rows("7480000000000000FF2E5F728000000011FFE1A3000000000000")) + warns := tk.Se.GetSessionVars().StmtCtx.GetWarnings() + c.Assert(warns, HasLen, 1) + c.Assert(warns[0].Err.Error(), Equals, "invalid record/index key: 7480000000000000FF2E5F728000000011FFE1A3000000000000") +} + func newStoreWithBootstrap() (kv.Storage, *domain.Domain, error) { store, err := mockstore.NewMockTikvStore() if err != nil { @@ -4922,6 +4939,14 @@ func (s *testIntegrationSuite) TestIssue11309And11319(c *C) { tk.MustQuery(`SELECT DATE_ADD('2007-03-28 22:08:28',INTERVAL 2.2 YEAR_MONTH)`).Check(testkit.Rows("2009-05-28 22:08:28")) } +func (s *testIntegrationSuite) TestIssue12301(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table t (d decimal(19, 0), i bigint(11))") + tk.MustExec("insert into t values (123456789012, 123456789012)") + tk.MustQuery("select * from t where d = i").Check(testkit.Rows("123456789012 123456789012")) +} + func (s *testIntegrationSuite) TestNotExistFunc(c *C) { tk := testkit.NewTestKit(c, s.store) diff --git a/expression/typeinfer_test.go b/expression/typeinfer_test.go index b602bf01d54a5..424c3863852b3 100644 --- a/expression/typeinfer_test.go +++ b/expression/typeinfer_test.go @@ -694,7 +694,7 @@ func (s *testInferTypeSuite) createTestCase4ArithmeticFuncs() []typeInferTestCas {"c_int_d + c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength}, {"c_int_d + c_time_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0}, {"c_int_d + c_double_d", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength}, - {"c_int_d + c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 17, 3}, + {"c_int_d + c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3}, {"c_datetime + c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3}, {"c_bigint_d + c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3}, {"c_double_d + c_decimal", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength}, @@ -706,7 +706,7 @@ func (s *testInferTypeSuite) createTestCase4ArithmeticFuncs() []typeInferTestCas {"c_int_d - c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength}, {"c_int_d - c_time_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0}, {"c_int_d - c_double_d", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength}, - {"c_int_d - c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 17, 3}, + {"c_int_d - c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3}, {"c_datetime - c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3}, {"c_bigint_d - c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3}, {"c_double_d - c_decimal", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength}, @@ -718,7 +718,7 @@ func (s *testInferTypeSuite) createTestCase4ArithmeticFuncs() []typeInferTestCas {"c_int_d * c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength}, {"c_int_d * c_time_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0}, {"c_int_d * c_double_d", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength}, - {"c_int_d * c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 20, 3}, + {"c_int_d * c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 29, 3}, {"c_datetime * c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 31, 5}, {"c_bigint_d * c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 29, 3}, {"c_double_d * c_decimal", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength}, diff --git a/go.mod b/go.mod index 51f5e84f294b4..4132687f4202f 100644 --- a/go.mod +++ b/go.mod @@ -39,9 +39,9 @@ require ( github.com/pingcap/errors v0.11.4 github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e - github.com/pingcap/kvproto v0.0.0-20190904075355-9a1bd6a31da2 + github.com/pingcap/kvproto v0.0.0-20190910074005-0e61b6f435c1 github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd - github.com/pingcap/parser v0.0.0-20190912032624-978b8272c04e + github.com/pingcap/parser v0.0.0-20190923031704-33636bc5e5d6 github.com/pingcap/pd v0.0.0-20190712044914-75a1f9f3062b github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible github.com/pingcap/tipb v0.0.0-20190806070524-16909e03435e @@ -77,3 +77,5 @@ require ( sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 ) + +go 1.13 diff --git a/go.sum b/go.sum index 4969e9175359b..5379dd051dfff 100644 --- a/go.sum +++ b/go.sum @@ -161,13 +161,13 @@ github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c/go.mod h1:DNS3Qg github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20190516013202-4cf58ad90b6c/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= -github.com/pingcap/kvproto v0.0.0-20190904075355-9a1bd6a31da2 h1:wBORZD4gvEKK0tGP4g1Rv0Y7f2cNnObzI/ckPhsU11M= -github.com/pingcap/kvproto v0.0.0-20190904075355-9a1bd6a31da2/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= +github.com/pingcap/kvproto v0.0.0-20190910074005-0e61b6f435c1 h1:DNvxkdcjA0TBIIIF+K2w9KMlTzMZzLZ5JVF26kTCPhg= +github.com/pingcap/kvproto v0.0.0-20190910074005-0e61b6f435c1/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= github.com/pingcap/log v0.0.0-20190214045112-b37da76f67a7/go.mod h1:xsfkWVaFVV5B8e1K9seWfyJWFrIhbtUTAD8NV1Pq3+w= github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd h1:hWDol43WY5PGhsh3+8794bFHY1bPrmu6bTalpssCrGg= github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= -github.com/pingcap/parser v0.0.0-20190912032624-978b8272c04e h1:QeD1wC7bGElAhufSHH4JcIbs1cVdxnGWD3n3gcE5qeY= -github.com/pingcap/parser v0.0.0-20190912032624-978b8272c04e/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/parser v0.0.0-20190923031704-33636bc5e5d6 h1:PyjsTUD8gJ6QGilbwiy/TTn89J84/69Pj9LixOd/fFE= +github.com/pingcap/parser v0.0.0-20190923031704-33636bc5e5d6/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/pd v0.0.0-20190712044914-75a1f9f3062b h1:oS9PftxQqgcRouKhhdaB52tXhVLEP7Ng3Qqsd6Z18iY= github.com/pingcap/pd v0.0.0-20190712044914-75a1f9f3062b/go.mod h1:3DlDlFT7EF64A1bmb/tulZb6wbPSagm5G4p1AlhaEDs= github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU= diff --git a/planner/core/cache.go b/planner/core/cache.go index cf324868e4718..af5c574395582 100644 --- a/planner/core/cache.go +++ b/planner/core/cache.go @@ -17,6 +17,7 @@ import ( "sync/atomic" "time" + "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/types" @@ -134,3 +135,9 @@ func NewPSTMTPlanCacheValue(plan Plan, names []*types.FieldName) *PSTMTPlanCache OutPutNames: names, } } + +// CachedPrepareStmt store prepared ast from PrepareExec and other related fields +type CachedPrepareStmt struct { + PreparedAst *ast.Prepared + VisitInfos []visitInfo +} diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index 1ae6965873688..549c68ad14622 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/metrics" + "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" @@ -189,10 +190,15 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont if e.Name != "" { e.ExecID = vars.PreparedStmtNameToID[e.Name] } - prepared, ok := vars.PreparedStmts[e.ExecID] + preparedPointer, ok := vars.PreparedStmts[e.ExecID] if !ok { return errors.Trace(ErrStmtNotFound) } + preparedObj, ok := preparedPointer.(*CachedPrepareStmt) + if !ok { + return errors.Errorf("invalid CachedPrepareStmt type") + } + prepared := preparedObj.PreparedAst vars.StmtCtx.StmtType = prepared.StmtType paramLen := len(e.PrepareParams) @@ -239,7 +245,7 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont } prepared.SchemaVersion = is.SchemaMetaVersion() } - err := e.getPhysicalPlan(ctx, sctx, is, prepared) + err := e.getPhysicalPlan(ctx, sctx, is, preparedObj) if err != nil { return err } @@ -247,7 +253,19 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont return nil } -func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, prepared *ast.Prepared) error { +func (e *Execute) checkPreparedPriv(ctx context.Context, sctx sessionctx.Context, + preparedObj *CachedPrepareStmt, is infoschema.InfoSchema) error { + if pm := privilege.GetPrivilegeManager(sctx); pm != nil { + if err := CheckPrivilege(sctx.GetSessionVars().ActiveRoles, pm, preparedObj.VisitInfos); err != nil { + return err + } + } + err := CheckTableLock(sctx, is, preparedObj.VisitInfos) + return err +} + +func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, preparedStmt *CachedPrepareStmt) error { + prepared := preparedStmt.PreparedAst if prepared.CachedPlan != nil { // Rewriting the expression in the select.where condition will convert its // type from "paramMarker" to "Constant".When Point Select queries are executed, @@ -272,6 +290,9 @@ func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, if prepared.UseCache { cacheKey = NewPSTMTPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion) if cacheValue, exists := sctx.PreparedPlanCache().Get(cacheKey); exists { + if err := e.checkPreparedPriv(ctx, sctx, preparedStmt, is); err != nil { + return err + } if metrics.ResettablePlanCacheCounterFortTest { metrics.PlanCacheCounter.WithLabelValues("prepare").Inc() } else { @@ -579,52 +600,50 @@ type DDL struct { type Explain struct { baseSchemaProducer - StmtPlan Plan + TargetPlan Plan + Format string + Analyze bool + ExecStmt ast.StmtNode + Rows [][]string explainedPlans map[int]bool - Format string - Analyze bool - ExecStmt ast.StmtNode - ExecPlan Plan } // prepareSchema prepares explain's result schema. func (e *Explain) prepareSchema() error { - switch strings.ToLower(e.Format) { - case ast.ExplainFormatROW: - retFields := []string{"id", "count", "task", "operator info"} - if e.Analyze { - retFields = append(retFields, "execution info", "memory") - } - schema := expression.NewSchema(make([]*expression.Column, 0, len(retFields))...) - for _, fieldName := range retFields { - schema.Append(buildColumn("", fieldName, mysql.TypeString, mysql.MaxBlobWidth)) - } - e.SetSchema(schema) - case ast.ExplainFormatDOT: - retFields := []string{"dot contents"} - schema := expression.NewSchema(make([]*expression.Column, 0, len(retFields))...) - for _, fieldName := range retFields { - schema.Append(buildColumn("", fieldName, mysql.TypeString, mysql.MaxBlobWidth)) - } - e.SetSchema(schema) + var fieldNames []string + format := strings.ToLower(e.Format) + + switch { + case format == ast.ExplainFormatROW && !e.Analyze: + fieldNames = []string{"id", "count", "task", "operator info"} + case format == ast.ExplainFormatROW && e.Analyze: + fieldNames = []string{"id", "count", "task", "operator info", "execution info", "memory"} + case format == ast.ExplainFormatDOT: + fieldNames = []string{"dot contents"} default: return errors.Errorf("explain format '%s' is not supported now", e.Format) } + + schema := expression.NewSchema(make([]*expression.Column, 0, len(fieldNames))...) + for _, fieldName := range fieldNames { + schema.Append(buildColumn("", fieldName, mysql.TypeString, mysql.MaxBlobWidth)) + } + e.SetSchema(schema) return nil } // RenderResult renders the explain result as specified format. func (e *Explain) RenderResult() error { - if e.StmtPlan == nil { + if e.TargetPlan == nil { return nil } switch strings.ToLower(e.Format) { case ast.ExplainFormatROW: e.explainedPlans = map[int]bool{} - e.explainPlanInRowFormat(e.StmtPlan.(PhysicalPlan), "root", "", true) + e.explainPlanInRowFormat(e.TargetPlan, "root", "", true) case ast.ExplainFormatDOT: - e.prepareDotInfo(e.StmtPlan.(PhysicalPlan)) + e.prepareDotInfo(e.TargetPlan.(PhysicalPlan)) default: return errors.Errorf("explain format '%s' is not supported now", e.Format) } @@ -632,46 +651,69 @@ func (e *Explain) RenderResult() error { } // explainPlanInRowFormat generates explain information for root-tasks. -func (e *Explain) explainPlanInRowFormat(p PhysicalPlan, taskType, indent string, isLastChild bool) { +func (e *Explain) explainPlanInRowFormat(p Plan, taskType, indent string, isLastChild bool) { e.prepareOperatorInfo(p, taskType, indent, isLastChild) e.explainedPlans[p.ID()] = true // For every child we create a new sub-tree rooted by it. childIndent := e.getIndent4Child(indent, isLastChild) - for i, child := range p.Children() { - if e.explainedPlans[child.ID()] { - continue + + if physPlan, ok := p.(PhysicalPlan); ok { + for i, child := range physPlan.Children() { + if e.explainedPlans[child.ID()] { + continue + } + e.explainPlanInRowFormat(child, taskType, childIndent, i == len(physPlan.Children())-1) } - e.explainPlanInRowFormat(child.(PhysicalPlan), taskType, childIndent, i == len(p.Children())-1) } - switch copPlan := p.(type) { + switch x := p.(type) { case *PhysicalTableReader: - e.explainPlanInRowFormat(copPlan.tablePlan, "cop", childIndent, true) + e.explainPlanInRowFormat(x.tablePlan, "cop", childIndent, true) case *PhysicalIndexReader: - e.explainPlanInRowFormat(copPlan.indexPlan, "cop", childIndent, true) + e.explainPlanInRowFormat(x.indexPlan, "cop", childIndent, true) case *PhysicalIndexLookUpReader: - e.explainPlanInRowFormat(copPlan.indexPlan, "cop", childIndent, false) - e.explainPlanInRowFormat(copPlan.tablePlan, "cop", childIndent, true) + e.explainPlanInRowFormat(x.indexPlan, "cop", childIndent, false) + e.explainPlanInRowFormat(x.tablePlan, "cop", childIndent, true) case *PhysicalIndexMergeReader: - for i := 0; i < len(copPlan.partialPlans); i++ { - if copPlan.tablePlan == nil && i == len(copPlan.partialPlans)-1 { - e.explainPlanInRowFormat(copPlan.partialPlans[i], "cop", childIndent, true) + for i := 0; i < len(x.partialPlans); i++ { + if x.tablePlan == nil && i == len(x.partialPlans)-1 { + e.explainPlanInRowFormat(x.partialPlans[i], "cop", childIndent, true) } else { - e.explainPlanInRowFormat(copPlan.partialPlans[i], "cop", childIndent, false) + e.explainPlanInRowFormat(x.partialPlans[i], "cop", childIndent, false) } } - if copPlan.tablePlan != nil { - e.explainPlanInRowFormat(copPlan.tablePlan, "cop", childIndent, true) + if x.tablePlan != nil { + e.explainPlanInRowFormat(x.tablePlan, "cop", childIndent, true) + } + case *Insert: + if x.SelectPlan != nil { + e.explainPlanInRowFormat(x.SelectPlan, "root", childIndent, true) + } + case *Update: + if x.SelectPlan != nil { + e.explainPlanInRowFormat(x.SelectPlan, "root", childIndent, true) + } + case *Delete: + if x.SelectPlan != nil { + e.explainPlanInRowFormat(x.SelectPlan, "root", childIndent, true) + } + case *Execute: + if x.Plan != nil { + e.explainPlanInRowFormat(x.Plan, "root", childIndent, true) } } } // prepareOperatorInfo generates the following information for every plan: // operator id, task type, operator info, and the estemated row count. -func (e *Explain) prepareOperatorInfo(p PhysicalPlan, taskType string, indent string, isLastChild bool) { +func (e *Explain) prepareOperatorInfo(p Plan, taskType string, indent string, isLastChild bool) { operatorInfo := p.ExplainInfo() - count := string(strconv.AppendFloat([]byte{}, p.statsInfo().RowCount, 'f', 2, 64)) + + count := "N/A" + if si := p.statsInfo(); si != nil { + count = strconv.FormatFloat(si.RowCount, 'f', 2, 64) + } explainID := p.ExplainID().String() row := []string{e.prettyIdentifier(explainID, indent, isLastChild), count, taskType, operatorInfo} if e.Analyze { @@ -808,6 +850,15 @@ func (e *Explain) prepareTaskDot(p PhysicalPlan, taskTp string, buffer *bytes.Bu pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.indexPlan.ExplainID())) copTasks = append(copTasks, copPlan.tablePlan) copTasks = append(copTasks, copPlan.indexPlan) + case *PhysicalIndexMergeReader: + for i := 0; i < len(copPlan.partialPlans); i++ { + pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.partialPlans[i].ExplainID())) + copTasks = append(copTasks, copPlan.partialPlans[i]) + } + if copPlan.tablePlan != nil { + pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.tablePlan.ExplainID())) + copTasks = append(copTasks, copPlan.tablePlan) + } } for _, child := range curPlan.Children() { fmt.Fprintf(buffer, "\"%s\" -> \"%s\"\n", curPlan.ExplainID(), child.ExplainID()) diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index c131f1270eced..fce669dfc5a1e 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -312,7 +312,6 @@ func (p *LogicalJoin) constructIndexJoin( compareFilters *ColWithCmpFuncManager, ) []PhysicalPlan { joinType := p.JoinType - outerSchema := p.children[outerIdx].Schema() var ( innerJoinKeys []*expression.Column outerJoinKeys []*expression.Column @@ -324,11 +323,6 @@ func (p *LogicalJoin) constructIndexJoin( innerJoinKeys = p.LeftJoinKeys outerJoinKeys = p.RightJoinKeys } - all, _ := prop.AllSameOrder() - // If the order by columns are not all from outer child, index join cannot promise the order. - if !prop.AllColsFromSchema(outerSchema) || !all { - return nil - } chReqProps := make([]*property.PhysicalProperty, 2) chReqProps[outerIdx] = &property.PhysicalProperty{TaskTp: property.RootTaskType, ExpectedCnt: math.MaxFloat64, Items: prop.Items} if prop.ExpectedCnt < p.stats.RowCount { @@ -425,8 +419,13 @@ func (p *LogicalJoin) constructIndexMergeJoin( // First of all, we'll check whether the inner child is DataSource. // Then, we will extract the join keys of p's equal conditions. Then check whether all of them are just the primary key // or match some part of on index. If so we will choose the best one and construct a index join. -func (p *LogicalJoin) getIndexJoinByOuterIdx(prop *property.PhysicalProperty, outerIdx int) []PhysicalPlan { +func (p *LogicalJoin) getIndexJoinByOuterIdx(prop *property.PhysicalProperty, outerIdx int) (joins []PhysicalPlan) { outerChild, innerChild := p.children[outerIdx], p.children[1-outerIdx] + all, _ := prop.AllSameOrder() + // If the order by columns are not all from outer child, index join cannot promise the order. + if !prop.AllColsFromSchema(outerChild.Schema()) || !all { + return nil + } var ( innerJoinKeys []*expression.Column outerJoinKeys []*expression.Column @@ -454,6 +453,21 @@ func (p *LogicalJoin) getIndexJoinByOuterIdx(prop *property.PhysicalProperty, ou if outerChild.statsInfo().RowCount > 0 { avgInnerRowCnt = p.equalCondOutCnt / outerChild.statsInfo().RowCount } + joins = p.buildIndexJoinInner2TableScan(prop, ds, innerJoinKeys, outerJoinKeys, outerIdx, us, avgInnerRowCnt) + if joins != nil { + return + } + return p.buildIndexJoinInner2IndexScan(prop, ds, innerJoinKeys, outerJoinKeys, outerIdx, us, avgInnerRowCnt) +} + +// buildIndexJoinInner2TableScan builds a TableScan as the inner child for an +// IndexJoin if possible. +// If the inner side of a index join is a TableScan, only one tuple will be +// fetched from the inner side for every tuple from the outer side. This will be +// promised to be no worse than building IndexScan as the inner child. +func (p *LogicalJoin) buildIndexJoinInner2TableScan( + prop *property.PhysicalProperty, ds *DataSource, innerJoinKeys, outerJoinKeys []*expression.Column, + outerIdx int, us *LogicalUnionScan, avgInnerRowCnt float64) (joins []PhysicalPlan) { var tblPath *accessPath for _, path := range ds.possibleAccessPaths { if path.isTablePath { @@ -461,31 +475,39 @@ func (p *LogicalJoin) getIndexJoinByOuterIdx(prop *property.PhysicalProperty, ou break } } - if pkCol := ds.getPKIsHandleCol(); pkCol != nil && tblPath != nil { - keyOff2IdxOff := make([]int, len(innerJoinKeys)) - pkMatched := false - for i, key := range innerJoinKeys { - if !key.Equal(nil, pkCol) { - keyOff2IdxOff[i] = -1 - continue - } - pkMatched = true - keyOff2IdxOff[i] = 0 - } - if pkMatched { - joins := make([]PhysicalPlan, 0, 2) - - innerTask := p.constructInnerTableScanTask(ds, pkCol, outerJoinKeys, us, false, avgInnerRowCnt) - joins = append(joins, p.constructIndexJoin(prop, outerIdx, innerTask, nil, keyOff2IdxOff, nil, nil)...) - // The index merge join's inner plan is different from index join, so we should consturct another inner plan - // for it. - innerTask2 := p.constructInnerTableScanTask(ds, pkCol, outerJoinKeys, us, true, avgInnerRowCnt) - joins = append(joins, p.constructIndexMergeJoin(prop, outerIdx, innerTask2, nil, keyOff2IdxOff, nil, nil)...) - // Since the primary key means one value corresponding to exact one row, this will always be a no worse one - // comparing to other index. - return joins + if tblPath == nil { + return nil + } + pkCol := ds.getPKIsHandleCol() + if pkCol == nil { + return nil + } + keyOff2IdxOff := make([]int, len(innerJoinKeys)) + pkMatched := false + for i, key := range innerJoinKeys { + if !key.Equal(nil, pkCol) { + keyOff2IdxOff[i] = -1 + continue } + pkMatched = true + keyOff2IdxOff[i] = 0 } + if !pkMatched { + return nil + } + joins = make([]PhysicalPlan, 0, 2) + innerTask := p.constructInnerTableScanTask(ds, pkCol, outerJoinKeys, us, false, avgInnerRowCnt) + joins = append(joins, p.constructIndexJoin(prop, outerIdx, innerTask, nil, keyOff2IdxOff, nil, nil)...) + // The index merge join's inner plan is different from index join, so we + // should construct another inner plan for it. + innerTask2 := p.constructInnerTableScanTask(ds, pkCol, outerJoinKeys, us, true, avgInnerRowCnt) + joins = append(joins, p.constructIndexMergeJoin(prop, outerIdx, innerTask2, nil, keyOff2IdxOff, nil, nil)...) + return joins +} + +func (p *LogicalJoin) buildIndexJoinInner2IndexScan( + prop *property.PhysicalProperty, ds *DataSource, innerJoinKeys, outerJoinKeys []*expression.Column, + outerIdx int, us *LogicalUnionScan, avgInnerRowCnt float64) (joins []PhysicalPlan) { helper := &indexJoinBuildHelper{join: p} for _, path := range ds.possibleAccessPaths { if path.isTablePath { @@ -499,28 +521,28 @@ func (p *LogicalJoin) getIndexJoinByOuterIdx(prop *property.PhysicalProperty, ou logutil.BgLogger().Warn("build index join failed", zap.Error(err)) } } - if helper.chosenPath != nil { - keyOff2IdxOff := make([]int, len(innerJoinKeys)) - for i := range keyOff2IdxOff { - keyOff2IdxOff[i] = -1 - } - for idxOff, keyOff := range helper.idxOff2KeyOff { - if keyOff != -1 { - keyOff2IdxOff[keyOff] = idxOff - } + if helper.chosenPath == nil { + return nil + } + keyOff2IdxOff := make([]int, len(innerJoinKeys)) + for i := range keyOff2IdxOff { + keyOff2IdxOff[i] = -1 + } + for idxOff, keyOff := range helper.idxOff2KeyOff { + if keyOff != -1 { + keyOff2IdxOff[keyOff] = idxOff } - joins := make([]PhysicalPlan, 0, 2) - rangeInfo := helper.buildRangeDecidedByInformation(helper.chosenPath.idxCols, outerJoinKeys) - innerTask := p.constructInnerIndexScanTask(ds, helper.chosenPath, helper.chosenRemained, outerJoinKeys, us, rangeInfo, false, avgInnerRowCnt) - - joins = append(joins, p.constructIndexJoin(prop, outerIdx, innerTask, helper.chosenRanges, keyOff2IdxOff, helper.chosenPath, helper.lastColManager)...) - // The index merge join's inner plan is different from index join, so we should consturct another inner plan - // for it. - innerTask2 := p.constructInnerIndexScanTask(ds, helper.chosenPath, helper.chosenRemained, outerJoinKeys, us, rangeInfo, true, avgInnerRowCnt) - joins = append(joins, p.constructIndexMergeJoin(prop, outerIdx, innerTask2, helper.chosenRanges, keyOff2IdxOff, helper.chosenPath, helper.lastColManager)...) - return joins } - return nil + joins = make([]PhysicalPlan, 0, 2) + rangeInfo := helper.buildRangeDecidedByInformation(helper.chosenPath.idxCols, outerJoinKeys) + innerTask := p.constructInnerIndexScanTask(ds, helper.chosenPath, helper.chosenRemained, outerJoinKeys, us, rangeInfo, false, avgInnerRowCnt) + + joins = append(joins, p.constructIndexJoin(prop, outerIdx, innerTask, helper.chosenRanges, keyOff2IdxOff, helper.chosenPath, helper.lastColManager)...) + // The index merge join's inner plan is different from index join, so we + // should construct another inner plan for it. + innerTask2 := p.constructInnerIndexScanTask(ds, helper.chosenPath, helper.chosenRemained, outerJoinKeys, us, rangeInfo, true, avgInnerRowCnt) + joins = append(joins, p.constructIndexMergeJoin(prop, outerIdx, innerTask2, helper.chosenRanges, keyOff2IdxOff, helper.chosenPath, helper.lastColManager)...) + return joins } type indexJoinBuildHelper struct { @@ -1091,20 +1113,12 @@ func (p *LogicalJoin) tryToGetIndexJoin(prop *property.PhysicalProperty) (indexJ rhsCardinality := p.Children()[1].statsInfo().Count() leftJoins := p.getIndexJoinByOuterIdx(prop, 0) - if leftJoins != nil && leftOuter && !rightOuter { - return leftJoins, true - } - - rightJoins := p.getIndexJoinByOuterIdx(prop, 1) - if rightJoins != nil && rightOuter && !leftOuter { - return rightJoins, true - } - - if leftJoins != nil && lhsCardinality < rhsCardinality { + if leftJoins != nil && (leftOuter && !rightOuter || lhsCardinality < rhsCardinality) { return leftJoins, leftOuter } - if rightJoins != nil && rhsCardinality < lhsCardinality { + rightJoins := p.getIndexJoinByOuterIdx(prop, 1) + if rightJoins != nil && (rightOuter && !leftOuter || rhsCardinality < lhsCardinality) { return rightJoins, rightOuter } diff --git a/planner/core/explain.go b/planner/core/explain.go index 4fc57e35c8c41..e0d467902fbe6 100644 --- a/planner/core/explain.go +++ b/planner/core/explain.go @@ -141,6 +141,9 @@ func (p *PhysicalIndexReader) ExplainInfo() string { // ExplainInfo implements PhysicalPlan interface. func (p *PhysicalIndexLookUpReader) ExplainInfo() string { // The children can be inferred by the relation symbol. + if p.PushedLimit != nil { + return fmt.Sprintf("limit embedded(offset:%v, count:%v)", p.PushedLimit.Offset, p.PushedLimit.Count) + } return "" } diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 844b8552ed5a7..1ddd0dbc49429 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -17,11 +17,23 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" ) var _ = Suite(&testIntegrationSuite{}) type testIntegrationSuite struct { + testData testutil.TestData +} + +func (s *testIntegrationSuite) SetUpSuite(c *C) { + var err error + s.testData, err = testutil.LoadTestSuiteData("testdata", "integration_suite") + c.Assert(err, IsNil) +} + +func (s *testIntegrationSuite) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) } func (s *testIntegrationSuite) TestShowSubquery(c *C) { @@ -96,3 +108,32 @@ func (s *testIntegrationSuite) TestBitColErrorMessage(c *C) { tk.MustGetErrCode("create table bit_col_t (a bit(0))", mysql.ErrInvalidFieldSize) tk.MustGetErrCode("create table bit_col_t (a bit(65))", mysql.ErrTooBigDisplaywidth) } + +func (s *testIntegrationSuite) TestPushLimitDownIndexLookUpReader(c *C) { + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + tk := testkit.NewTestKit(c, store) + defer func() { + dom.Close() + store.Close() + }() + + tk.MustExec("use test") + tk.MustExec("drop table if exists tbl") + tk.MustExec("create table tbl(a int, b int, c int, key idx_b_c(b,c))") + tk.MustExec("insert into tbl values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5)") + tk.MustExec("analyze table tbl") + var input []string + var output []struct { + SQL string + Plan []string + } + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + }) + tk.MustQuery(tt).Check(testkit.Rows(output[i].Plan...)) + } +} diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index 693f1e0c982a7..ff1fa79aa3162 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -89,6 +89,12 @@ type PhysicalIndexReader struct { OutputColumns []*expression.Column } +// PushedDownLimit is the limit operator pushed down into PhysicalIndexLookUpReader. +type PushedDownLimit struct { + Offset uint64 + Count uint64 +} + // PhysicalIndexLookUpReader is the index look up reader in tidb. It's used in case of double reading. type PhysicalIndexLookUpReader struct { physicalSchemaProducer @@ -101,6 +107,8 @@ type PhysicalIndexLookUpReader struct { tablePlan PhysicalPlan ExtraHandleCol *expression.Column + // PushedLimit is used to avoid unnecessary table scan tasks of IndexLookUpReader. + PushedLimit *PushedDownLimit } // PhysicalIndexMergeReader is the reader using multiple indexes in tidb. diff --git a/planner/core/plan.go b/planner/core/plan.go index 716ce33f1fb49..8e91a52c85689 100644 --- a/planner/core/plan.go +++ b/planner/core/plan.go @@ -34,10 +34,16 @@ import ( type Plan interface { // Get the schema. Schema() *expression.Schema + // Get the ID. ID() int + // Get the ID in explain statement ExplainID() fmt.Stringer + + // ExplainInfo returns operator information to be explained. + ExplainInfo() string + // replaceExprColumns replace all the column reference in the plan's expression node. replaceExprColumns(replace map[string]*expression.Column) @@ -48,6 +54,7 @@ type Plan interface { // OutputNames returns the outputting names of each column. OutputNames() []*types.FieldName + SelectBlockOffset() int } @@ -134,9 +141,6 @@ type PhysicalPlan interface { // ToPB converts physical plan to tipb executor. ToPB(ctx sessionctx.Context) (*tipb.Executor, error) - // ExplainInfo returns operator information to be explained. - ExplainInfo() string - // getChildReqProps gets the required property by child index. GetChildReqProps(idx int) *property.PhysicalProperty @@ -284,6 +288,10 @@ func (p *basePlan) ExplainID() fmt.Stringer { }) } +func (p *basePlan) ExplainInfo() string { + return "N/A" +} + func (p *basePlan) SelectBlockOffset() int { return p.blockOffset } diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index cff2ade2795df..17a7325bdb4df 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -2352,30 +2352,14 @@ func (b *PlanBuilder) buildTrace(trace *ast.TraceStmt) (Plan, error) { } func (b *PlanBuilder) buildExplainPlan(targetPlan Plan, format string, analyze bool, execStmt ast.StmtNode) (Plan, error) { - pp, ok := targetPlan.(PhysicalPlan) - if !ok { - switch x := targetPlan.(type) { - case *Delete: - pp = x.SelectPlan - case *Update: - pp = x.SelectPlan - case *Insert: - if x.SelectPlan != nil { - pp = x.SelectPlan - } - } - if pp == nil { - return nil, ErrUnsupportedType.GenWithStackByArgs(targetPlan) - } + p := &Explain{ + TargetPlan: targetPlan, + Format: format, + Analyze: analyze, + ExecStmt: execStmt, } - - p := &Explain{StmtPlan: pp, Analyze: analyze, Format: format, ExecStmt: execStmt, ExecPlan: targetPlan} p.ctx = b.ctx - err := p.prepareSchema() - if err != nil { - return nil, err - } - return p, nil + return p, p.prepareSchema() } // buildExplainFor gets *last* (maybe running or finished) query plan from connection #connection id. diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 94a5e7992d89f..d16f6570909fe 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -38,6 +38,7 @@ import ( // This plan is much faster to build and to execute because it avoid the optimization and coprocessor cost. type PointGetPlan struct { basePlan + dbName string schema *expression.Schema TblInfo *model.TableInfo IndexInfo *model.IndexInfo @@ -299,10 +300,6 @@ func tryPointGetPlan(ctx sessionctx.Context, selStmt *ast.SelectStmt) *PointGetP if tbl == nil { return nil } - dbName := tblName.Schema - if dbName.L == "" { - dbName = model.NewCIStr(ctx.GetSessionVars().CurrentDB) - } // Do not handle partitioned table. // Table partition implementation translates LogicalPlan from `DataSource` to // `Union -> DataSource` in the logical plan optimization pass, since PointGetPlan @@ -331,7 +328,11 @@ func tryPointGetPlan(ctx sessionctx.Context, selStmt *ast.SelectStmt) *PointGetP if schema == nil { return nil } - p := newPointGetPlan(ctx, schema, tbl, names) + dbName := tblName.Schema.L + if dbName == "" { + dbName = ctx.GetSessionVars().CurrentDB + } + p := newPointGetPlan(ctx, dbName, schema, tbl, names) intDatum, err := handlePair.value.ConvertTo(ctx.GetSessionVars().StmtCtx, fieldType) if err != nil { if terror.ErrorEqual(types.ErrOverflow, err) { @@ -371,7 +372,11 @@ func tryPointGetPlan(ctx sessionctx.Context, selStmt *ast.SelectStmt) *PointGetP if schema == nil { return nil } - p := newPointGetPlan(ctx, schema, tbl, names) + dbName := tblName.Schema.L + if dbName == "" { + dbName = ctx.GetSessionVars().CurrentDB + } + p := newPointGetPlan(ctx, dbName, schema, tbl, names) p.IndexInfo = idxInfo p.IndexValues = idxValues p.IndexValueParams = idxValueParams @@ -380,9 +385,10 @@ func tryPointGetPlan(ctx sessionctx.Context, selStmt *ast.SelectStmt) *PointGetP return nil } -func newPointGetPlan(ctx sessionctx.Context, schema *expression.Schema, tbl *model.TableInfo, names []*types.FieldName) *PointGetPlan { +func newPointGetPlan(ctx sessionctx.Context, dbName string, schema *expression.Schema, tbl *model.TableInfo, names []*types.FieldName) *PointGetPlan { p := &PointGetPlan{ basePlan: newBasePlan(ctx, "Point_Get", 0), + dbName: dbName, schema: schema, TblInfo: tbl, outputNames: names, @@ -396,9 +402,8 @@ func checkFastPlanPrivilege(ctx sessionctx.Context, fastPlan *PointGetPlan, chec if pm == nil { return nil } - dbName := ctx.GetSessionVars().CurrentDB for _, checkType := range checkTypes { - if !pm.RequestVerification(ctx.GetSessionVars().ActiveRoles, dbName, fastPlan.TblInfo.Name.L, "", checkType) { + if !pm.RequestVerification(ctx.GetSessionVars().ActiveRoles, fastPlan.dbName, fastPlan.TblInfo.Name.L, "", checkType) { return errors.New("privilege check fail") } } diff --git a/planner/core/point_get_plan_test.go b/planner/core/point_get_plan_test.go index 031f8aeee614d..bff5118c56b6f 100644 --- a/planner/core/point_get_plan_test.go +++ b/planner/core/point_get_plan_test.go @@ -78,13 +78,16 @@ func (s *testPointGetSuite) TestPointGetPlanCache(c *C) { "Point_Get_1 1.00 root table:t, handle:1", )) tk.MustQuery("explain update t set b=b+1, c=c+1 where a = 1").Check(testkit.Rows( - "Point_Get_1 1.00 root table:t, handle:1", + "Update_2 N/A root N/A", + "└─Point_Get_1 1.00 root table:t, handle:1", )) tk.MustQuery("explain delete from t where a = 1").Check(testkit.Rows( - "Point_Get_1 1.00 root table:t, handle:1", + "Delete_2 N/A root N/A", + "└─Point_Get_1 1.00 root table:t, handle:1", )) tk.MustQuery("explain select a from t where a = -1").Check(testkit.Rows( - "TableDual_5 0.00 root rows:0")) + "TableDual_5 0.00 root rows:0", + )) tk.MustExec(`prepare stmt0 from "select a from t where a = ?"`) tk.MustExec("set @p0 = -1") tk.MustQuery("execute stmt0 using @p0").Check(testkit.Rows()) diff --git a/planner/core/prepare_test.go b/planner/core/prepare_test.go index 36eeb38a30c9b..a03b3f87b1a8d 100644 --- a/planner/core/prepare_test.go +++ b/planner/core/prepare_test.go @@ -20,11 +20,14 @@ import ( "time" . "github.com/pingcap/check" + "github.com/pingcap/parser/auth" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" @@ -85,6 +88,48 @@ func (s *testPrepareSuite) TestPrepareCache(c *C) { tk.MustExec(`prepare stmt6 from "select distinct a from t order by a"`) tk.MustQuery("execute stmt6").Check(testkit.Rows("1", "2", "3", "4", "5", "6")) tk.MustQuery("execute stmt6").Check(testkit.Rows("1", "2", "3", "4", "5", "6")) + + // test privilege change + rootSe := tk.Se + tk.MustExec("drop table if exists tp") + tk.MustExec(`create table tp(c1 int, c2 int, primary key (c1))`) + tk.MustExec(`insert into tp values(1, 1), (2, 2), (3, 3)`) + + tk.MustExec(`create user 'u_tp'@'localhost'`) + tk.MustExec(`grant select on test.tp to u_tp@'localhost';flush privileges;`) + + // user u_tp + userSess := newSession(c, store, "test") + c.Assert(userSess.Auth(&auth.UserIdentity{Username: "u_tp", Hostname: "localhost"}, nil, nil), IsTrue) + mustExec(c, userSess, `prepare ps_stp_r from 'select * from tp where c1 > ?'`) + mustExec(c, userSess, `set @p2 = 2`) + tk.Se = userSess + tk.MustQuery(`execute ps_stp_r using @p2`).Check(testkit.Rows("3 3")) + tk.MustQuery(`execute ps_stp_r using @p2`).Check(testkit.Rows("3 3")) + tk.MustQuery(`execute ps_stp_r using @p2`).Check(testkit.Rows("3 3")) + + // root revoke + tk.Se = rootSe + tk.MustExec(`revoke all on test.tp from 'u_tp'@'localhost';flush privileges;`) + + // user u_tp + tk.Se = userSess + _, err = tk.Exec(`execute ps_stp_r using @p2`) + c.Assert(err, NotNil) + + // grant again + tk.Se = rootSe + tk.MustExec(`grant select on test.tp to u_tp@'localhost';flush privileges;`) + + // user u_tp + tk.Se = userSess + tk.MustQuery(`execute ps_stp_r using @p2`).Check(testkit.Rows("3 3")) + tk.MustQuery(`execute ps_stp_r using @p2`).Check(testkit.Rows("3 3")) + + // restore + tk.Se = rootSe + tk.MustExec("drop table if exists tp") + tk.MustExec(`DROP USER 'u_tp'@'localhost';`) } func (s *testPrepareSuite) TestPrepareCacheIndexScan(c *C) { @@ -370,3 +415,16 @@ func (s *testPrepareSuite) TestPrepareForGroupByItems(c *C) { tk.MustExec("set @a=2.0;") tk.MustQuery("execute s1 using @a;").Check(testkit.Rows("3")) } + +func newSession(c *C, store kv.Storage, dbName string) session.Session { + se, err := session.CreateSession4Test(store) + c.Assert(err, IsNil) + mustExec(c, se, "create database if not exists "+dbName) + mustExec(c, se, "use "+dbName) + return se +} + +func mustExec(c *C, se session.Session, sql string) { + _, err := se.Execute(context.Background(), sql) + c.Assert(err, IsNil) +} diff --git a/planner/core/task.go b/planner/core/task.go index 3074ece7d5e46..de998cdf6179f 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -567,6 +567,7 @@ func (t *rootTask) plan() PhysicalPlan { func (p *PhysicalLimit) attach2Task(tasks ...task) task { t := tasks[0].copy() + sunk := false if cop, ok := t.(*copTask); ok { // For double read which requires order being kept, the limit cannot be pushed down to the table side, // because handles would be reordered before being sent to table scan. @@ -581,9 +582,42 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { cop = attachPlan2Task(pushedDownLimit, cop).(*copTask) } t = finishCopTask(p.ctx, cop) + sunk = p.sinkIntoIndexLookUp(t) } - t = attachPlan2Task(p, t) - return t + if sunk { + return t + } + return attachPlan2Task(p, t) +} + +func (p *PhysicalLimit) sinkIntoIndexLookUp(t task) bool { + root := t.(*rootTask) + reader, isDoubleRead := root.p.(*PhysicalIndexLookUpReader) + proj, isProj := root.p.(*PhysicalProjection) + if !isDoubleRead && !isProj { + return false + } + if isProj { + reader, isDoubleRead = proj.Children()[0].(*PhysicalIndexLookUpReader) + if !isDoubleRead { + return false + } + } + // We can sink Limit into IndexLookUpReader only if tablePlan contains no Selection. + ts, isTableScan := reader.tablePlan.(*PhysicalTableScan) + if !isTableScan { + return false + } + reader.PushedLimit = &PushedDownLimit{ + Offset: p.Offset, + Count: p.Count, + } + ts.stats = p.stats + reader.stats = p.stats + if isProj { + proj.stats = p.stats + } + return true } // GetCost computes cost of TopN operator itself. diff --git a/planner/core/testdata/integration_suite_in.json b/planner/core/testdata/integration_suite_in.json new file mode 100644 index 0000000000000..9eb10afcb3d88 --- /dev/null +++ b/planner/core/testdata/integration_suite_in.json @@ -0,0 +1,15 @@ +[ + { + "name": "TestPushLimitDownIndexLookUpReader", + "cases": [ + // Limit should be pushed down into IndexLookUpReader, row count of IndexLookUpReader and TableScan should be 1.00. + "explain select * from tbl use index(idx_b_c) where b > 1 limit 2,1", + // Projection atop IndexLookUpReader, Limit should be pushed down into IndexLookUpReader, and Projection should have row count 1.00 as well. + "explain select * from tbl use index(idx_b_c) where b > 1 order by b desc limit 2,1", + // Limit should be pushed down into IndexLookUpReader when Selection on top of IndexScan. + "explain select * from tbl use index(idx_b_c) where b > 1 and c > 1 limit 2,1", + // Limit should NOT be pushed down into IndexLookUpReader when Selection on top of TableScan. + "explain select * from tbl use index(idx_b_c) where b > 1 and a > 1 limit 2,1" + ] + } +] diff --git a/planner/core/testdata/integration_suite_out.json b/planner/core/testdata/integration_suite_out.json new file mode 100644 index 0000000000000..51e8c4001d067 --- /dev/null +++ b/planner/core/testdata/integration_suite_out.json @@ -0,0 +1,47 @@ +[ + { + "Name": "TestPushLimitDownIndexLookUpReader", + "Cases": [ + { + "SQL": "explain select * from tbl use index(idx_b_c) where b > 1 limit 2,1", + "Plan": [ + "IndexLookUp_14 1.00 root limit embedded(offset:2, count:1)", + "├─Limit_13 3.00 cop offset:0, count:3", + "│ └─IndexScan_11 3.00 cop table:tbl, index:b, c, range:(1,+inf], keep order:false", + "└─TableScan_12 1.00 cop table:tbl, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from tbl use index(idx_b_c) where b > 1 order by b desc limit 2,1", + "Plan": [ + "Projection_25 1.00 root Column#1, Column#2, Column#3", + "└─IndexLookUp_24 1.00 root limit embedded(offset:2, count:1)", + " ├─Limit_23 3.00 cop offset:0, count:3", + " │ └─IndexScan_21 3.00 cop table:tbl, index:b, c, range:(1,+inf], keep order:true, desc", + " └─TableScan_22 1.00 cop table:tbl, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from tbl use index(idx_b_c) where b > 1 and c > 1 limit 2,1", + "Plan": [ + "IndexLookUp_15 1.00 root limit embedded(offset:2, count:1)", + "├─Limit_14 3.00 cop offset:0, count:3", + "│ └─Selection_13 3.00 cop gt(Column#3, 1)", + "│ └─IndexScan_11 3.75 cop table:tbl, index:b, c, range:(1,+inf], keep order:false", + "└─TableScan_12 1.00 cop table:tbl, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from tbl use index(idx_b_c) where b > 1 and a > 1 limit 2,1", + "Plan": [ + "Limit_9 1.00 root offset:2, count:1", + "└─IndexLookUp_15 3.00 root ", + " ├─IndexScan_11 3.75 cop table:tbl, index:b, c, range:(1,+inf], keep order:false", + " └─Limit_14 3.00 cop offset:0, count:3", + " └─Selection_13 3.00 cop gt(Column#1, 1)", + " └─TableScan_12 3.75 cop table:tbl, keep order:false" + ] + } + ] + } +] diff --git a/planner/core/testdata/plan_suite_out.json b/planner/core/testdata/plan_suite_out.json index 5b7bb38f5bde5..d1cda149dd289 100644 --- a/planner/core/testdata/plan_suite_out.json +++ b/planner/core/testdata/plan_suite_out.json @@ -141,7 +141,7 @@ }, { "SQL": "select * from t where t.c = 1 and t.e = 1 order by t.d limit 1", - "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(Column#5, 1)])->Limit, Table(t))->Limit" + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(Column#5, 1)])->Limit, Table(t))" }, { "SQL": "select c from t where t.c = 1 and t.e = 1 order by t.d limit 1", @@ -177,7 +177,7 @@ }, { "SQL": "select c, b from t where c = 1 limit 1", - "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Limit, Table(t))->Limit->Projection" + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Limit, Table(t))->Projection" }, { "SQL": "select c, b from t where c = 1 and e = 1 and b = 1 limit 1", @@ -201,7 +201,7 @@ }, { "SQL": "select * from t where t.c = 1 and t.a > 1 order by t.d limit 1", - "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([gt(Column#1, 1)])->Limit, Table(t))->Limit" + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([gt(Column#1, 1)])->Limit, Table(t))" }, { "SQL": "select * from t use index(e_d_c_str_prefix) where t.c_str = 'abcdefghijk' and t.d_str = 'd' and t.e_str = 'e'", @@ -496,7 +496,7 @@ }, { "SQL": "select * from t where c = 1 order by c limit 1", - "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Limit, Table(t))->Limit" + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Limit, Table(t))" }, { "SQL": "select * from t order by a limit 1", diff --git a/privilege/privileges/privileges_test.go b/privilege/privileges/privileges_test.go index ac96ed9417eec..949c88273acf6 100644 --- a/privilege/privileges/privileges_test.go +++ b/privilege/privileges/privileges_test.go @@ -131,6 +131,24 @@ func (s *testPrivilegeSuite) TestCheckDBPrivilege(c *C) { c.Assert(pc.RequestVerification(activeRoles, "test", "", "", mysql.UpdatePriv), IsTrue) } +func (s *testPrivilegeSuite) TestCheckPointGetDBPrivilege(c *C) { + rootSe := newSession(c, s.store, s.dbName) + mustExec(c, rootSe, `CREATE USER 'tester'@'localhost';`) + mustExec(c, rootSe, `GRANT SELECT,UPDATE ON test.* TO 'tester'@'localhost';`) + mustExec(c, rootSe, `flush privileges;`) + mustExec(c, rootSe, `create database test2`) + mustExec(c, rootSe, `create table test2.t(id int, v int, primary key(id))`) + mustExec(c, rootSe, `insert into test2.t(id, v) values(1, 1)`) + + se := newSession(c, s.store, s.dbName) + c.Assert(se.Auth(&auth.UserIdentity{Username: "tester", Hostname: "localhost"}, nil, nil), IsTrue) + mustExec(c, se, `use test;`) + _, err := se.Execute(context.Background(), `select * from test2.t where id = 1`) + c.Assert(terror.ErrorEqual(err, core.ErrTableaccessDenied), IsTrue) + _, err = se.Execute(context.Background(), "update test2.t set v = 2 where id = 1") + c.Assert(terror.ErrorEqual(err, core.ErrTableaccessDenied), IsTrue) +} + func (s *testPrivilegeSuite) TestCheckTablePrivilege(c *C) { rootSe := newSession(c, s.store, s.dbName) mustExec(c, rootSe, `CREATE USER 'test1'@'localhost';`) diff --git a/server/conn_stmt.go b/server/conn_stmt.go index eea084db14829..39794d252e2b9 100644 --- a/server/conn_stmt.go +++ b/server/conn_stmt.go @@ -44,6 +44,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/parser/mysql" + plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/hack" @@ -624,8 +625,14 @@ func (cc *clientConn) handleSetOption(data []byte) (err error) { func (cc *clientConn) preparedStmt2String(stmtID uint32) string { sv := cc.ctx.GetSessionVars() - if prepared, ok := sv.PreparedStmts[stmtID]; ok { - return prepared.Stmt.Text() + sv.GetExecuteArgumentsInfo() + preparedPointer, ok := sv.PreparedStmts[stmtID] + if !ok { + return "prepared statement not found, ID: " + strconv.FormatUint(uint64(stmtID), 10) } - return "prepared statement not found, ID: " + strconv.FormatUint(uint64(stmtID), 10) + preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt) + if !ok { + return "invalidate CachedPrepareStmt type, ID: " + strconv.FormatUint(uint64(stmtID), 10) + } + preparedAst := preparedObj.PreparedAst + return preparedAst.Stmt.Text() + sv.PreparedParams.String() } diff --git a/session/session.go b/session/session.go index cea22827f6928..3ce811ab673a9 100644 --- a/session/session.go +++ b/session/session.go @@ -266,16 +266,21 @@ func (s *session) cleanRetryInfo() { planCacheEnabled := plannercore.PreparedPlanCacheEnabled() var cacheKey kvcache.Key + var preparedAst *ast.Prepared if planCacheEnabled { firstStmtID := retryInfo.DroppedPreparedStmtIDs[0] - cacheKey = plannercore.NewPSTMTPlanCacheKey( - s.sessionVars, firstStmtID, s.sessionVars.PreparedStmts[firstStmtID].SchemaVersion, - ) + if preparedPointer, ok := s.sessionVars.PreparedStmts[firstStmtID]; ok { + preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt) + if ok { + preparedAst = preparedObj.PreparedAst + cacheKey = plannercore.NewPSTMTPlanCacheKey(s.sessionVars, firstStmtID, preparedAst.SchemaVersion) + } + } } for i, stmtID := range retryInfo.DroppedPreparedStmtIDs { if planCacheEnabled { - if i > 0 { - plannercore.SetPstmtIDSchemaVersion(cacheKey, stmtID, s.sessionVars.PreparedStmts[stmtID].SchemaVersion) + if i > 0 && preparedAst != nil { + plannercore.SetPstmtIDSchemaVersion(cacheKey, stmtID, preparedAst.SchemaVersion) } s.PreparedPlanCache().Delete(cacheKey) } @@ -667,7 +672,7 @@ func (s *session) retry(ctx context.Context, maxCnt uint) (err error) { zap.Int64("schemaVersion", schemaVersion), zap.Uint("retryCnt", retryCnt), zap.Int("queryNum", i), - zap.String("sql", sqlForLog(st.OriginText())+sessVars.GetExecuteArgumentsInfo())) + zap.String("sql", sqlForLog(st.OriginText())+sessVars.PreparedParams.String())) } else { logutil.Logger(ctx).Warn("retrying", zap.Int64("schemaVersion", schemaVersion), @@ -1172,7 +1177,8 @@ func (s *session) PrepareStmt(sql string) (stmtID uint32, paramCount int, fields // CachedPlanExec short path currently ONLY for cached "point select plan" execution func (s *session) CachedPlanExec(ctx context.Context, - stmtID uint32, prepared *ast.Prepared, args []types.Datum) (sqlexec.RecordSet, error) { + stmtID uint32, prepareStmt *plannercore.CachedPrepareStmt, args []types.Datum) (sqlexec.RecordSet, error) { + prepared := prepareStmt.PreparedAst // compile ExecStmt is := executor.GetInfoSchema(s) execAst := &ast.ExecuteStmt{ExecID: stmtID} @@ -1205,7 +1211,8 @@ func (s *session) CachedPlanExec(ctx context.Context, // IsCachedExecOk check if we can execute using plan cached in prepared structure // Be careful for the short path, current precondition is ths cached plan satisfying // IsPointGetWithPKOrUniqueKeyByAutoCommit -func (s *session) IsCachedExecOk(ctx context.Context, prepared *ast.Prepared) (bool, error) { +func (s *session) IsCachedExecOk(ctx context.Context, preparedStmt *plannercore.CachedPrepareStmt) (bool, error) { + prepared := preparedStmt.PreparedAst if prepared.CachedPlan == nil { return false, nil } @@ -1222,18 +1229,22 @@ func (s *session) IsCachedExecOk(ctx context.Context, prepared *ast.Prepared) (b func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args []types.Datum) (sqlexec.RecordSet, error) { var err error s.sessionVars.StartTime = time.Now() - prepared, ok := s.sessionVars.PreparedStmts[stmtID] + preparedPointer, ok := s.sessionVars.PreparedStmts[stmtID] if !ok { err = plannercore.ErrStmtNotFound logutil.Logger(ctx).Error("prepared statement not found", zap.Uint32("stmtID", stmtID)) return nil, err } - ok, err = s.IsCachedExecOk(ctx, prepared) + preparedStmt, ok := preparedPointer.(*plannercore.CachedPrepareStmt) + if !ok { + return nil, errors.Errorf("invalid CachedPrepareStmt type") + } + ok, err = s.IsCachedExecOk(ctx, preparedStmt) if err != nil { return nil, err } if ok { - return s.CachedPlanExec(ctx, stmtID, prepared, args) + return s.CachedPlanExec(ctx, stmtID, preparedStmt, args) } s.PrepareTxnCtx(ctx) st, err := executor.CompileExecutePreparedStmt(ctx, s, stmtID, args) @@ -1967,7 +1978,7 @@ func logQuery(query string, vars *variable.SessionVars) { zap.Int64("schemaVersion", vars.TxnCtx.SchemaVersion), zap.Uint64("txnStartTS", vars.TxnCtx.StartTS), zap.String("current_db", vars.CurrentDB), - zap.String("sql", query+vars.GetExecuteArgumentsInfo())) + zap.String("sql", query+vars.PreparedParams.String())) } } diff --git a/session/tidb.go b/session/tidb.go index 1b8643d88e77a..38c05d25b2288 100644 --- a/session/tidb.go +++ b/session/tidb.go @@ -34,6 +34,7 @@ import ( "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" @@ -226,7 +227,8 @@ func runStmt(ctx context.Context, sctx sessionctx.Context, s sqlexec.Statement) if rs == nil { s.(*executor.ExecStmt).LogSlowQuery(origTxnCtx.StartTS, err == nil) s.(*executor.ExecStmt).SummaryStmt() - sessVars.PrevStmt = executor.FormatSQL(s.OriginText(), sessVars) + pps := types.CloneRow(sessVars.PreparedParams) + sessVars.PrevStmt = executor.FormatSQL(s.OriginText(), pps) } }() diff --git a/session/tidb_test.go b/session/tidb_test.go index 04aa6b84700fd..c5cb8cde111b5 100644 --- a/session/tidb_test.go +++ b/session/tidb_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sqlexec" @@ -168,3 +169,30 @@ func match(c *C, row []types.Datum, expected ...interface{}) { c.Assert(got, Equals, need) } } + +func (s *testMainSuite) TestKeysNeedLock(c *C) { + rowKey := tablecodec.EncodeRowKeyWithHandle(1, 1) + indexKey := tablecodec.EncodeIndexSeekKey(1, 1, []byte{1}) + uniqueValue := make([]byte, 8) + uniqueUntouched := append(uniqueValue, '1') + nonUniqueVal := []byte{'0'} + nonUniqueUntouched := []byte{'1'} + var deleteVal []byte + rowVal := []byte{'a', 'b', 'c'} + tests := []struct { + key []byte + val []byte + need bool + }{ + {rowKey, rowVal, true}, + {rowKey, deleteVal, true}, + {indexKey, nonUniqueVal, false}, + {indexKey, nonUniqueUntouched, false}, + {indexKey, uniqueValue, true}, + {indexKey, uniqueUntouched, false}, + {indexKey, deleteVal, false}, + } + for _, tt := range tests { + c.Assert(keyNeedToLock(tt.key, tt.val), Equals, tt.need) + } +} diff --git a/session/txn.go b/session/txn.go index 76d593e094ce0..b4eedb521079b 100755 --- a/session/txn.go +++ b/session/txn.go @@ -333,6 +333,9 @@ func keyNeedToLock(k, v []byte) bool { // only need to delete row key. return k[10] == 'r' } + if tablecodec.IsUntouchedIndexKValue(k, v) { + return false + } isNonUniqueIndex := len(v) == 1 // Put row key and unique index need to lock. return !isNonUniqueIndex diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 6fb291d6d57c7..c7ad86f29e5e0 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -202,12 +202,12 @@ type SessionVars struct { // systems variables, don't modify it directly, use GetSystemVar/SetSystemVar method. systems map[string]string // PreparedStmts stores prepared statement. - PreparedStmts map[uint32]*ast.Prepared + PreparedStmts map[uint32]interface{} PreparedStmtNameToID map[string]uint32 // preparedStmtID is id of prepared statement. preparedStmtID uint32 // PreparedParams params for prepared statements - PreparedParams []types.Datum + PreparedParams PreparedParams // ActiveRoles stores active roles for current user ActiveRoles []*auth.RoleIdentity @@ -407,7 +407,7 @@ type SessionVars struct { DurationCompile time.Duration // PrevStmt is used to store the previous executed statement in the current session. - PrevStmt string + PrevStmt fmt.Stringer // AllowRemoveAutoInc indicates whether a user can drop the auto_increment column attribute or not. AllowRemoveAutoInc bool @@ -424,6 +424,16 @@ type SessionVars struct { replicaRead kv.ReplicaReadType } +// PreparedParams contains the parameters of the current prepared statement when executing it. +type PreparedParams []types.Datum + +func (pps PreparedParams) String() string { + if len(pps) == 0 { + return "" + } + return " [arguments: " + types.DatumsToStrNoErr(pps) + "]" +} + // ConnectionInfo present connection used by audit. type ConnectionInfo struct { ConnectionID uint32 @@ -449,7 +459,7 @@ func NewSessionVars() *SessionVars { vars := &SessionVars{ Users: make(map[string]string), systems: make(map[string]string), - PreparedStmts: make(map[uint32]*ast.Prepared), + PreparedStmts: make(map[uint32]interface{}), PreparedStmtNameToID: make(map[string]uint32), PreparedParams: make([]types.Datum, 0, 10), TxnCtx: &TransactionContext{}, @@ -642,26 +652,6 @@ func (s *SessionVars) Location() *time.Location { return loc } -// GetExecuteArgumentsInfo gets the argument list as a string of execute statement. -func (s *SessionVars) GetExecuteArgumentsInfo() string { - if len(s.PreparedParams) == 0 { - return "" - } - args := make([]string, 0, len(s.PreparedParams)) - for _, v := range s.PreparedParams { - if v.IsNull() { - args = append(args, "") - } else { - str, err := v.ToString() - if err != nil { - terror.Log(err) - } - args = append(args, str) - } - } - return fmt.Sprintf(" [arguments: %s]", strings.Join(args, ", ")) -} - // GetSystemVar gets the string value of a system variable. func (s *SessionVars) GetSystemVar(name string) (string, bool) { val, ok := s.systems[name] @@ -683,7 +673,7 @@ func (s *SessionVars) setDDLReorgPriority(val string) { } // AddPreparedStmt adds prepareStmt to current session and count in global. -func (s *SessionVars) AddPreparedStmt(stmtID uint32, stmt *ast.Prepared) error { +func (s *SessionVars) AddPreparedStmt(stmtID uint32, stmt interface{}) error { if _, exists := s.PreparedStmts[stmtID]; !exists { valStr, _ := s.GetSystemVar(MaxPreparedStmtCount) maxPreparedStmtCount, err := strconv.ParseInt(valStr, 10, 64) diff --git a/store/mockstore/mocktikv/mock_tikv_test.go b/store/mockstore/mocktikv/mock_tikv_test.go index fa0d10cfe8923..9a354365c04f7 100644 --- a/store/mockstore/mocktikv/mock_tikv_test.go +++ b/store/mockstore/mocktikv/mock_tikv_test.go @@ -556,7 +556,7 @@ func (s *testMockTiKVSuite) TestRollbackAndWriteConflict(c *C) { s.mustPutOK(c, "test", "test2", 5, 8) // simulate `getTxnStatus` for txn 2. - err := s.store.Cleanup([]byte("test"), 2) + err := s.store.Cleanup([]byte("test"), 2, math.MaxUint64) c.Assert(err, IsNil) req = &kvrpcpb.PrewriteRequest{ Mutations: putMutations("test", "test3"), @@ -712,7 +712,7 @@ func (s *testMVCCLevelDB) TestTxnHeartBeat(c *C) { c.Assert(ttl, Greater, uint64(300)) // The lock has already been clean up - c.Assert(s.store.Cleanup([]byte("pk"), 5), IsNil) + c.Assert(s.store.Cleanup([]byte("pk"), 5, math.MaxUint64), IsNil) _, err = s.store.TxnHeartBeat([]byte("pk"), 5, 1000) c.Assert(err, NotNil) } diff --git a/store/mockstore/mocktikv/mvcc.go b/store/mockstore/mocktikv/mvcc.go index d9aa80fa4e433..be85563479903 100644 --- a/store/mockstore/mocktikv/mvcc.go +++ b/store/mockstore/mocktikv/mvcc.go @@ -259,7 +259,7 @@ type MVCCStore interface { Prewrite(req *kvrpcpb.PrewriteRequest) []error Commit(keys [][]byte, startTS, commitTS uint64) error Rollback(keys [][]byte, startTS uint64) error - Cleanup(key []byte, startTS uint64) error + Cleanup(key []byte, startTS, currentTS uint64) error ScanLock(startKey, endKey []byte, maxTS uint64) ([]*kvrpcpb.LockInfo, error) TxnHeartBeat(primaryKey []byte, startTS uint64, adviseTTL uint64) (uint64, error) ResolveLock(startKey, endKey []byte, startTS, commitTS uint64) error diff --git a/store/mockstore/mocktikv/mvcc_leveldb.go b/store/mockstore/mocktikv/mvcc_leveldb.go index cb97d8a874502..4afb1299f69e3 100644 --- a/store/mockstore/mocktikv/mvcc_leveldb.go +++ b/store/mockstore/mocktikv/mvcc_leveldb.go @@ -700,6 +700,12 @@ func prewriteMutation(db *leveldb.DB, batch *leveldb.Batch, } if ok { if dec.lock.startTS != startTS { + if isPessimisticLock { + // NOTE: A special handling. + // When pessimistic txn prewrite meets lock, set the TTL = 0 means + // telling TiDB to rollback the transaction **unconditionly**. + dec.lock.ttl = 0 + } return dec.lock.lockErr(mutation.Key) } if dec.lock.op != kvrpcpb.Op_PessimisticLock { @@ -926,7 +932,8 @@ func getTxnCommitInfo(iter *Iterator, expectKey []byte, startTS uint64) (mvccVal } // Cleanup implements the MVCCStore interface. -func (mvcc *MVCCLevelDB) Cleanup(key []byte, startTS uint64) error { +// Cleanup API is deprecated, use CheckTxnStatus instead. +func (mvcc *MVCCLevelDB) Cleanup(key []byte, startTS, currentTS uint64) error { mvcc.mu.Lock() defer func() { mvcc.mu.Unlock() @@ -934,11 +941,64 @@ func (mvcc *MVCCLevelDB) Cleanup(key []byte, startTS uint64) error { }() batch := &leveldb.Batch{} - err := rollbackKey(mvcc.db, batch, key, startTS) + startKey := mvccEncode(key, lockVer) + iter := newIterator(mvcc.db, &util.Range{ + Start: startKey, + }) + defer iter.Release() + + if iter.Valid() { + dec := lockDecoder{ + expectKey: key, + } + ok, err := dec.Decode(iter) + if err != nil { + return err + } + // If current transaction's lock exists. + if ok && dec.lock.startTS == startTS { + + // If the lock has already outdated, clean up it. + if currentTS == 0 || uint64(oracle.ExtractPhysical(dec.lock.startTS))+dec.lock.ttl < uint64(oracle.ExtractPhysical(currentTS)) { + if err = rollbackLock(batch, dec.lock, key, startTS); err != nil { + return err + } + return mvcc.db.Write(batch, nil) + } + + // Otherwise, return a locked error with the TTL information. + return dec.lock.lockErr(key) + } + + // If current transaction's lock does not exist. + // If the commit information of the current transaction exist. + c, ok, err := getTxnCommitInfo(iter, key, startTS) + if err != nil { + return errors.Trace(err) + } + if ok { + // If the current transaction has already committed. + if c.valueType != typeRollback { + return ErrAlreadyCommitted(c.commitTS) + } + // If the current transaction has already rollbacked. + return nil + } + } + + // If current transaction is not prewritted before. + value := mvccValue{ + valueType: typeRollback, + startTS: startTS, + commitTS: startTS, + } + writeKey := mvccEncode(key, startTS) + writeValue, err := value.MarshalBinary() if err != nil { return errors.Trace(err) } - return mvcc.db.Write(batch, nil) + batch.Put(writeKey, writeValue) + return nil } // CheckTxnStatus checks the primary lock of a transaction to decide its status. diff --git a/store/mockstore/mocktikv/rpc.go b/store/mockstore/mocktikv/rpc.go index c4b79af71d6a2..21f9acca10efc 100644 --- a/store/mockstore/mocktikv/rpc.go +++ b/store/mockstore/mocktikv/rpc.go @@ -357,7 +357,7 @@ func (h *rpcHandler) handleKvCleanup(req *kvrpcpb.CleanupRequest) *kvrpcpb.Clean panic("KvCleanup: key not in region") } var resp kvrpcpb.CleanupResponse - err := h.mvccStore.Cleanup(req.Key, req.GetStartVersion()) + err := h.mvccStore.Cleanup(req.Key, req.GetStartVersion(), req.GetCurrentTs()) if err != nil { if commitTS, ok := errors.Cause(err).(ErrAlreadyCommitted); ok { resp.CommitVersion = uint64(commitTS) diff --git a/store/tikv/client_batch.go b/store/tikv/client_batch.go index 9a3d137f1a7a9..6f4fcad407042 100644 --- a/store/tikv/client_batch.go +++ b/store/tikv/client_batch.go @@ -418,8 +418,10 @@ func (a *batchConn) batchSendLoop(cfg config.TiKVClient) { func (a *batchConn) getClientAndSend(entries []*batchCommandsEntry, requests []*tikvpb.BatchCommandsRequest_Request, requestIDs []uint64) { // Choose a connection by round-robbin. - var cli *batchCommandsClient = nil - var target string = "" + var ( + cli *batchCommandsClient + target string + ) for i := 0; i < len(a.batchCommandsClients); i++ { a.index = (a.index + 1) % uint32(len(a.batchCommandsClients)) target = a.batchCommandsClients[a.index].target diff --git a/store/tikv/gcworker/gc_worker_test.go b/store/tikv/gcworker/gc_worker_test.go index 44081f702b98b..ba7e2534faca9 100644 --- a/store/tikv/gcworker/gc_worker_test.go +++ b/store/tikv/gcworker/gc_worker_test.go @@ -16,8 +16,6 @@ package gcworker import ( "bytes" "context" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/store/tikv/oracle" "math" "sort" "strconv" @@ -30,15 +28,17 @@ import ( "github.com/pingcap/kvproto/pkg/errorpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/client" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/store/mockoracle" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/mockstore/mocktikv" "github.com/pingcap/tidb/store/tikv" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/store/tikv/tikvrpc" ) @@ -470,8 +470,10 @@ func (s *testGCWorkerSuite) testDeleteRangesFailureImpl(c *C, failType int) { sendReqCh := make(chan SentReq, 20) // The request sent to the specified key and store wil fail. - var failKey []byte = nil - var failStore *metapb.Store = nil + var ( + failKey []byte + failStore *metapb.Store + ) s.client.unsafeDestroyRangeHandler = func(addr string, req *tikvrpc.Request) (*tikvrpc.Response, error) { sendReqCh <- SentReq{req, addr} resp := &tikvrpc.Response{ diff --git a/store/tikv/lock_resolver.go b/store/tikv/lock_resolver.go index f440f93956870..263caab94cfd2 100644 --- a/store/tikv/lock_resolver.go +++ b/store/tikv/lock_resolver.go @@ -174,7 +174,8 @@ func (lr *LockResolver) getResolved(txnID uint64) (TxnStatus, bool) { return s, ok } -// BatchResolveLocks resolve locks in a batch +// BatchResolveLocks resolve locks in a batch. +// Used it in gcworker only! func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc RegionVerID) (bool, error) { if len(locks) == 0 { return true, nil @@ -182,7 +183,7 @@ func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc Regi tikvLockResolverCountWithBatchResolve.Inc() - var expiredLocks []*Lock + expiredLocks := make([]*Lock, 0, len(locks)) for _, l := range locks { if lr.store.GetOracle().IsExpired(l.TxnID, l.TTL) { tikvLockResolverCountWithExpired.Inc() @@ -205,7 +206,7 @@ func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc Regi continue } - status, err := lr.getTxnStatus(bo, l.TxnID, l.Primary) + status, err := lr.getTxnStatus(bo, l.TxnID, l.Primary, 0) if err != nil { return false, errors.Trace(err) } @@ -266,9 +267,10 @@ func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc Regi // commit status. // 3) Send `ResolveLock` cmd to the lock's region to resolve all locks belong to // the same transaction. -func (lr *LockResolver) ResolveLocks(bo *Backoffer, locks []*Lock) (msBeforeTxnExpired int64, err error) { +func (lr *LockResolver) ResolveLocks(bo *Backoffer, locks []*Lock) (int64, error) { + var msBeforeTxnExpired txnExpireTime if len(locks) == 0 { - return + return msBeforeTxnExpired.value(), nil } tikvLockResolverCountWithResolve.Inc() @@ -277,61 +279,111 @@ func (lr *LockResolver) ResolveLocks(bo *Backoffer, locks []*Lock) (msBeforeTxnE for _, l := range locks { msBeforeLockExpired := lr.store.GetOracle().UntilExpired(l.TxnID, l.TTL) if msBeforeLockExpired <= 0 { - tikvLockResolverCountWithExpired.Inc() expiredLocks = append(expiredLocks, l) } else { - if msBeforeTxnExpired == 0 || msBeforeLockExpired < msBeforeTxnExpired { - msBeforeTxnExpired = msBeforeLockExpired - } + msBeforeTxnExpired.update(int64(l.TTL)) tikvLockResolverCountWithNotExpired.Inc() } } - if len(expiredLocks) == 0 { - if msBeforeTxnExpired > 0 { - tikvLockResolverCountWithWaitExpired.Inc() - } - return - } - // TxnID -> []Region, record resolved Regions. // TODO: Maybe put it in LockResolver and share by all txns. cleanTxns := make(map[uint64]map[RegionVerID]struct{}) for _, l := range expiredLocks { - var status TxnStatus - status, err = lr.getTxnStatus(bo, l.TxnID, l.Primary) + status, err := lr.getTxnStatusFromLock(bo, l) if err != nil { - msBeforeTxnExpired = 0 + msBeforeTxnExpired.update(0) err = errors.Trace(err) - return + return msBeforeTxnExpired.value(), err } - cleanRegions, exists := cleanTxns[l.TxnID] - if !exists { - cleanRegions = make(map[RegionVerID]struct{}) - cleanTxns[l.TxnID] = cleanRegions - } + if status.ttl == 0 { + tikvLockResolverCountWithExpired.Inc() + // If the lock is committed or rollbacked, resolve lock. + cleanRegions, exists := cleanTxns[l.TxnID] + if !exists { + cleanRegions = make(map[RegionVerID]struct{}) + cleanTxns[l.TxnID] = cleanRegions + } - err = lr.resolveLock(bo, l, status, cleanRegions) - if err != nil { - msBeforeTxnExpired = 0 - err = errors.Trace(err) - return + err = lr.resolveLock(bo, l, status, cleanRegions) + if err != nil { + msBeforeTxnExpired.update(0) + err = errors.Trace(err) + return msBeforeTxnExpired.value(), err + } + } else { + tikvLockResolverCountWithNotExpired.Inc() + // If the lock is valid, the txn may be a pessimistic transaction. + // Update the txn expire time. + msBeforeLockExpired := lr.store.GetOracle().UntilExpired(l.TxnID, status.ttl) + msBeforeTxnExpired.update(msBeforeLockExpired) } } + + if msBeforeTxnExpired.value() > 0 { + tikvLockResolverCountWithWaitExpired.Inc() + } + return msBeforeTxnExpired.value(), nil +} + +type txnExpireTime struct { + initialized bool + txnExpire int64 +} + +func (t *txnExpireTime) update(lockExpire int64) { + if lockExpire <= 0 { + lockExpire = 0 + } + if !t.initialized { + t.txnExpire = lockExpire + t.initialized = true + return + } + if lockExpire < t.txnExpire { + t.txnExpire = lockExpire + } return } +func (t *txnExpireTime) value() int64 { + if !t.initialized { + return 0 + } + return t.txnExpire +} + // GetTxnStatus queries tikv-server for a txn's status (commit/rollback). // If the primary key is still locked, it will launch a Rollback to abort it. // To avoid unnecessarily aborting too many txns, it is wiser to wait a few // seconds before calling it after Prewrite. func (lr *LockResolver) GetTxnStatus(txnID uint64, primary []byte) (TxnStatus, error) { + var status TxnStatus bo := NewBackoffer(context.Background(), cleanupMaxBackoff) - status, err := lr.getTxnStatus(bo, txnID, primary) - return status, errors.Trace(err) + currentTS, err := lr.store.GetOracle().GetLowResolutionTimestamp(bo.ctx) + if err != nil { + return status, err + } + return lr.getTxnStatus(bo, txnID, primary, currentTS) +} + +func (lr *LockResolver) getTxnStatusFromLock(bo *Backoffer, l *Lock) (TxnStatus, error) { + // NOTE: l.TTL = 0 is a special protocol!!! + // When the pessimistic txn prewrite meets locks of a txn, it should rollback that txn **unconditionally**. + // In this case, TiKV set the lock TTL = 0, and TiDB use currentTS = 0 to call + // getTxnStatus, and getTxnStatus with currentTS = 0 would rollback the transaction. + if l.TTL == 0 { + return lr.getTxnStatus(bo, l.TxnID, l.Primary, 0) + } + + currentTS, err := lr.store.GetOracle().GetLowResolutionTimestamp(bo.ctx) + if err != nil { + return TxnStatus{}, err + } + return lr.getTxnStatus(bo, l.TxnID, l.Primary, currentTS) } -func (lr *LockResolver) getTxnStatus(bo *Backoffer, txnID uint64, primary []byte) (TxnStatus, error) { +func (lr *LockResolver) getTxnStatus(bo *Backoffer, txnID uint64, primary []byte, currentTS uint64) (TxnStatus, error) { if s, ok := lr.getResolved(txnID); ok { return s, nil } @@ -342,6 +394,7 @@ func (lr *LockResolver) getTxnStatus(bo *Backoffer, txnID uint64, primary []byte req := tikvrpc.NewRequest(tikvrpc.CmdCleanup, &kvrpcpb.CleanupRequest{ Key: primary, StartVersion: txnID, + CurrentTs: currentTS, }) for { loc, err := lr.store.GetRegionCache().LocateKey(bo, primary) @@ -368,6 +421,12 @@ func (lr *LockResolver) getTxnStatus(bo *Backoffer, txnID uint64, primary []byte } cmdResp := resp.Resp.(*kvrpcpb.CleanupResponse) if keyErr := cmdResp.GetError(); keyErr != nil { + // If the TTL of the primary lock is not outdated, the proto returns a ErrLocked contains the TTL. + if lockInfo := keyErr.GetLocked(); lockInfo != nil { + status.ttl = lockInfo.LockTtl + status.commitTS = 0 + return status, nil + } err = errors.Errorf("unexpected cleanup err: %s, tid: %v", keyErr, txnID) logutil.BgLogger().Error("getTxnStatus error", zap.Error(err)) return status, err diff --git a/store/tikv/lock_test.go b/store/tikv/lock_test.go index e5bb7e063ae47..7fac316899b0f 100644 --- a/store/tikv/lock_test.go +++ b/store/tikv/lock_test.go @@ -200,6 +200,43 @@ func (s *testLockSuite) TestGetTxnStatus(c *C) { status, err = s.store.lockResolver.GetTxnStatus(startTS, []byte("a")) c.Assert(err, IsNil) c.Assert(status.IsCommitted(), IsFalse) + c.Assert(status.ttl, Greater, uint64(0)) +} + +func (s *testLockSuite) TestCheckTxnStatusTTL(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + txn.Set(kv.Key("key"), []byte("value")) + s.prewriteTxn(c, txn.(*tikvTxn)) + + // Check the lock TTL of a transaction. + bo := NewBackoffer(context.Background(), prewriteMaxBackoff) + lr := newLockResolver(s.store) + status, err := lr.GetTxnStatus(txn.StartTS(), []byte("key")) + c.Assert(err, IsNil) + c.Assert(status.IsCommitted(), IsFalse) + c.Assert(status.ttl, Greater, uint64(0)) + c.Assert(status.CommitTS(), Equals, uint64(0)) + + // Rollback the txn. + lock := s.mustGetLock(c, []byte("key")) + status = TxnStatus{} + cleanRegions := make(map[RegionVerID]struct{}) + err = newLockResolver(s.store).resolveLock(bo, lock, status, cleanRegions) + c.Assert(err, IsNil) + + // Check its status is rollbacked. + status, err = lr.GetTxnStatus(txn.StartTS(), []byte("key")) + c.Assert(err, IsNil) + c.Assert(status.ttl, Equals, uint64(0)) + c.Assert(status.commitTS, Equals, uint64(0)) + + // Check a committed txn. + startTS, commitTS := s.putKV(c, []byte("a"), []byte("a")) + status, err = lr.GetTxnStatus(startTS, []byte("a")) + c.Assert(err, IsNil) + c.Assert(status.ttl, Equals, uint64(0)) + c.Assert(status.commitTS, Equals, commitTS) } func (s *testLockSuite) TestTxnHeartBeat(c *C) { @@ -217,11 +254,11 @@ func (s *testLockSuite) TestTxnHeartBeat(c *C) { c.Assert(err, IsNil) c.Assert(newTTL, Equals, uint64(666)) - // The getTxnStatus API is confusing, it really means rollback! - status, err := newLockResolver(s.store).getTxnStatus(bo, txn.StartTS(), []byte("key")) + lock := s.mustGetLock(c, []byte("key")) + status := TxnStatus{ttl: newTTL} + cleanRegions := make(map[RegionVerID]struct{}) + err = newLockResolver(s.store).resolveLock(bo, lock, status, cleanRegions) c.Assert(err, IsNil) - c.Assert(status.ttl, Equals, uint64(0)) - c.Assert(status.commitTS, Equals, uint64(0)) newTTL, err = sendTxnHeartBeat(bo, s.store, []byte("key"), txn.StartTS(), 666) c.Assert(err, NotNil) diff --git a/store/tikv/split_region.go b/store/tikv/split_region.go index 0050e69fb9d6b..204e3decab266 100644 --- a/store/tikv/split_region.go +++ b/store/tikv/split_region.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/store/tikv/tikvrpc" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/stringutil" "go.uber.org/zap" ) @@ -154,7 +155,12 @@ func (s *tikvStore) batchSendSingleRegion(bo *Backoffer, batch batch, scatter bo logutil.BgLogger().Info("batch split regions complete", zap.Uint64("batch region ID", batch.regionID.id), zap.Stringer("first at", kv.Key(batch.keys[0])), - zap.Stringer("first new region left", logutil.Hex(spResp.Regions[0])), + zap.Stringer("first new region left", stringutil.MemoizeStr(func() string { + if len(spResp.Regions) == 0 { + return "" + } + return logutil.Hex(spResp.Regions[0]).String() + })), zap.Int("new region count", len(spResp.Regions))) if !scatter { diff --git a/tablecodec/tablecodec.go b/tablecodec/tablecodec.go index 97cbea9c815dc..547e9c2092a4d 100644 --- a/tablecodec/tablecodec.go +++ b/tablecodec/tablecodec.go @@ -127,14 +127,10 @@ func DecodeRecordKey(key kv.Key) (tableID int64, handle int64, err error) { func DecodeIndexKey(key kv.Key) (tableID int64, indexID int64, indexValues []string, err error) { k := key - tableID, indexID, isRecord, err := DecodeKeyHead(key) + tableID, indexID, key, err = DecodeIndexKeyPrefix(key) if err != nil { return 0, 0, nil, errors.Trace(err) } - if isRecord { - return 0, 0, nil, errInvalidIndexKey.GenWithStack("invalid index key - %q", k) - } - key = key[prefixLen+idLen:] for len(key) > 0 { // FIXME: Without the schema information, we can only decode the raw kind of @@ -153,6 +149,22 @@ func DecodeIndexKey(key kv.Key) (tableID int64, indexID int64, indexValues []str return } +// DecodeIndexKeyPrefix decodes the key and gets the tableID, indexID, indexValues. +func DecodeIndexKeyPrefix(key kv.Key) (tableID int64, indexID int64, indexValues []byte, err error) { + k := key + + tableID, indexID, isRecord, err := DecodeKeyHead(key) + if err != nil { + return 0, 0, nil, errors.Trace(err) + } + if isRecord { + return 0, 0, nil, errInvalidIndexKey.GenWithStack("invalid index key - %q", k) + } + indexValues = key[prefixLen+idLen:] + + return tableID, indexID, indexValues, nil +} + // DecodeKeyHead decodes the key's head and gets the tableID, indexID. isRecordKey is true when is a record key. func DecodeKeyHead(key kv.Key) (tableID int64, indexID int64, isRecordKey bool, err error) { isRecordKey = false diff --git a/tools/check/check-gogenerate.sh b/tools/check/check-gogenerate.sh index f140dbcd998f5..aa0a682742ada 100755 --- a/tools/check/check-gogenerate.sh +++ b/tools/check/check-gogenerate.sh @@ -2,4 +2,8 @@ set -euo pipefail go generate ./... -git diff --quiet +if git status -s | awk '{print $2}' | xargs grep '^// Code generated .* DO NOT EDIT\.$' > /dev/null +then + echo "Your commit is changed after running go generate ./..., it should not hanppen." + exit 1 +fi diff --git a/tools/check/go.mod b/tools/check/go.mod index ca5d580f6d6a4..6dfc12cecadbd 100644 --- a/tools/check/go.mod +++ b/tools/check/go.mod @@ -20,3 +20,5 @@ require ( gopkg.in/yaml.v2 v2.2.2 // indirect honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3 ) + +go 1.13 diff --git a/types/datum.go b/types/datum.go index 5e448d77652df..c5b47112bdec7 100644 --- a/types/datum.go +++ b/types/datum.go @@ -341,6 +341,50 @@ func (d *Datum) SetAutoID(id int64, flag uint) { } } +// String returns a human-readable description of Datum. It is intended only for debugging. +func (d Datum) String() string { + var t string + switch d.k { + case KindNull: + t = "KindNull" + case KindInt64: + t = "KindInt64" + case KindUint64: + t = "KindUint64" + case KindFloat32: + t = "KindFloat32" + case KindFloat64: + t = "KindFloat64" + case KindString: + t = "KindString" + case KindBytes: + t = "KindBytes" + case KindMysqlDecimal: + t = "KindMysqlDecimal" + case KindMysqlDuration: + t = "KindMysqlDuration" + case KindMysqlEnum: + t = "KindMysqlEnum" + case KindBinaryLiteral: + t = "KindBinaryLiteral" + case KindMysqlBit: + t = "KindMysqlBit" + case KindMysqlSet: + t = "KindMysqlSet" + case KindMysqlJSON: + t = "KindMysqlJSON" + case KindMysqlTime: + t = "KindMysqlTime" + default: + t = "Unknown" + } + v := d.GetValue() + if b, ok := v.([]byte); ok && d.k == KindBytes { + v = string(b) + } + return fmt.Sprintf("%v %v", t, v) +} + // GetValue gets the value of the datum of any kind. func (d *Datum) GetValue() interface{} { switch d.k { diff --git a/types/datum_test.go b/types/datum_test.go index 9b983ad496921..a5f0f30cf7f70 100644 --- a/types/datum_test.go +++ b/types/datum_test.go @@ -14,6 +14,7 @@ package types import ( + "fmt" "reflect" "testing" "time" @@ -47,6 +48,7 @@ func (ts *testDatumSuite) TestDatum(c *C) { d.SetCollation(d.Collation()) c.Assert(d.Collation(), NotNil) c.Assert(d.Length(), Equals, int(d.length)) + c.Assert(fmt.Sprint(d), Equals, d.String()) } } diff --git a/util/stringutil/string_util.go b/util/stringutil/string_util.go index f69fb03165e64..b6e472b5a40e4 100644 --- a/util/stringutil/string_util.go +++ b/util/stringutil/string_util.go @@ -252,17 +252,17 @@ func Copy(src string) string { return string(hack.Slice(src)) } -// stringerFunc defines string func implement fmt.Stringer. -type stringerFunc func() string +// StringerFunc defines string func implement fmt.Stringer. +type StringerFunc func() string // String implements fmt.Stringer -func (l stringerFunc) String() string { +func (l StringerFunc) String() string { return l() } // MemoizeStr returns memoized version of stringFunc. func MemoizeStr(l func() string) fmt.Stringer { - return stringerFunc(func() string { + return StringerFunc(func() string { return l() }) }