diff --git a/ddl/table.go b/ddl/table.go index 264f396404d4a..a3962621e5c63 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -38,7 +38,6 @@ import ( "github.com/pingcap/tidb/tablecodec" tidb_util "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/gcutil" - "github.com/tikv/client-go/v2/tikv" ) const tiflashCheckTiDBHTTPAPIHalfInterval = 2500 * time.Millisecond @@ -330,7 +329,7 @@ func (w *worker) onRecoverTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver in failpoint.Inject("mockRecoverTableCommitErr", func(val failpoint.Value) { if val.(bool) && atomic.CompareAndSwapUint32(&mockRecoverTableCommitErrOnce, 0, 1) { - tikv.MockCommitErrorEnable() + err = failpoint.Enable(`tikvclient/mockCommitErrorOpt`, "return(true)") } }) diff --git a/executor/adapter.go b/executor/adapter.go index bb10a5da7822c..16cd9302bc5f4 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -906,6 +906,8 @@ func (a *ExecStmt) FinishExecuteStmt(txnTS uint64, err error, hasMoreResults boo } // Reset DurationParse due to the next statement may not need to be parsed (not a text protocol query). sessVars.DurationParse = 0 + // Clean the stale read flag when statement execution finish + sessVars.StmtCtx.IsStaleness = false } // CloseRecordSet will finish the execution of current statement and do some record work diff --git a/executor/analyze.go b/executor/analyze.go index e9a6b2268b33d..4b59913efe4ac 100644 --- a/executor/analyze.go +++ b/executor/analyze.go @@ -778,7 +778,7 @@ func (e AnalyzeColumnsExec) decodeSampleDataWithVirtualColumn( totFts = append(totFts, col.RetType) } chk := chunk.NewChunkWithCapacity(totFts, len(collector.Samples)) - decoder := codec.NewDecoder(chk, e.ctx.GetSessionVars().TimeZone) + decoder := codec.NewDecoder(chk, e.ctx.GetSessionVars().Location()) for _, sample := range collector.Samples { for i := range sample.Columns { if schema.Columns[i].VirtualExpr != nil { diff --git a/executor/compiler.go b/executor/compiler.go index 8a9c21b277510..511b516a96dfc 100644 --- a/executor/compiler.go +++ b/executor/compiler.go @@ -15,8 +15,10 @@ package executor import ( "context" + "fmt" "github.com/opentracing/opentracing-go" + "github.com/pingcap/failpoint" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/config" @@ -63,6 +65,14 @@ func (c *Compiler) Compile(ctx context.Context, stmtNode ast.StmtNode) (*ExecStm return nil, err } + failpoint.Inject("assertStmtCtxIsStaleness", func(val failpoint.Value) { + expected := val.(bool) + got := c.Ctx.GetSessionVars().StmtCtx.IsStaleness + if got != expected { + panic(fmt.Sprintf("stmtctx isStaleness wrong, expected:%v, got:%v", expected, got)) + } + }) + CountStmtNode(stmtNode, c.Ctx.GetSessionVars().InRestrictedSQL) var lowerPriority bool if c.Ctx.GetSessionVars().StmtCtx.Priority == mysql.NoPriority { diff --git a/executor/executor.go b/executor/executor.go index a3497b1b15331..472b3f6ab3e67 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -1662,6 +1662,7 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { DiskTracker: disk.NewTracker(memory.LabelForSQLText, -1), TaskID: stmtctx.AllocateTaskID(), CTEStorageMap: map[int]*CTEStorages{}, + IsStaleness: false, } sc.MemTracker.AttachToGlobalTracker(GlobalMemoryUsageTracker) globalConfig := config.GetGlobalConfig() diff --git a/executor/set_test.go b/executor/set_test.go index 526ade67e5ce8..4f2b79a988631 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -509,6 +509,16 @@ func (s *testSerialSuite1) TestSetVar(c *C) { tk.MustQuery(`show warnings`).Check(testkit.Rows()) tk.MustExec("set @@tidb_enable_clustered_index = 'int_only'") tk.MustQuery(`show warnings`).Check(testkit.Rows("Warning 1287 'INT_ONLY' is deprecated and will be removed in a future release. Please use 'ON' or 'OFF' instead")) + + // test for tidb_enable_stable_result_mode + tk.MustQuery(`select @@tidb_enable_stable_result_mode`).Check(testkit.Rows("0")) + tk.MustExec(`set global tidb_enable_stable_result_mode = 1`) + tk.MustQuery(`select @@global.tidb_enable_stable_result_mode`).Check(testkit.Rows("1")) + tk.MustExec(`set global tidb_enable_stable_result_mode = 0`) + tk.MustQuery(`select @@global.tidb_enable_stable_result_mode`).Check(testkit.Rows("0")) + tk.MustExec(`set tidb_enable_stable_result_mode=1`) + tk.MustQuery(`select @@global.tidb_enable_stable_result_mode`).Check(testkit.Rows("0")) + tk.MustQuery(`select @@tidb_enable_stable_result_mode`).Check(testkit.Rows("1")) } func (s *testSuite5) TestTruncateIncorrectIntSessionVar(c *C) { diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index e308803932ff2..694a73ec4707b 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -226,6 +226,8 @@ func (s *testStaleTxnSerialSuite) TestSelectAsOf(c *C) { c.Assert(tk.Se.GetSessionVars().TxnReadTS.PeakTxnReadTS(), Equals, uint64(0)) } } + failpoint.Disable("github.com/pingcap/tidb/executor/assertStaleTSO") + failpoint.Disable("github.com/pingcap/tidb/executor/assertStaleTSOWithTolerance") } func (s *testStaleTxnSerialSuite) TestStaleReadKVRequest(c *C) { @@ -996,3 +998,96 @@ func (s *testStaleTxnSerialSuite) TestStaleReadPrepare(c *C) { tk.MustExec(fmt.Sprintf(`set transaction read only as of timestamp '%s'`, time1.Format("2006-1-2 15:04:05.000"))) c.Assert("execute p1", NotNil) } + +func (s *testStaleTxnSuite) TestStmtCtxStaleFlag(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + defer tk.MustExec("drop table if exists t") + tk.MustExec("create table t (id int)") + time.Sleep(2 * time.Second) + time1 := time.Now().Format("2006-1-2 15:04:05") + testcases := []struct { + sql string + hasStaleFlag bool + }{ + // assert select as of statement + { + sql: fmt.Sprintf("select * from t as of timestamp '%v'", time1), + hasStaleFlag: true, + }, + // assert select statement + { + sql: fmt.Sprintf("select * from t"), + hasStaleFlag: false, + }, + // assert select statement in stale transaction + { + sql: fmt.Sprintf("start transaction read only as of timestamp '%v'", time1), + hasStaleFlag: false, + }, + { + sql: fmt.Sprintf("select * from t"), + hasStaleFlag: true, + }, + { + sql: "commit", + hasStaleFlag: false, + }, + // assert select statement after set transaction + { + sql: fmt.Sprintf("set transaction read only as of timestamp '%v'", time1), + hasStaleFlag: false, + }, + { + sql: fmt.Sprintf("select * from t"), + hasStaleFlag: true, + }, + // assert select statement after consumed set transaction + { + sql: fmt.Sprintf("select * from t"), + hasStaleFlag: false, + }, + // assert prepare statement with select as of statement + { + sql: fmt.Sprintf(`prepare p from 'select * from t as of timestamp "%v"'`, time1), + hasStaleFlag: false, + }, + // assert execute statement with select as of statement + { + sql: "execute p", + hasStaleFlag: true, + }, + // assert prepare common select statement + { + sql: "prepare p1 from 'select * from t'", + hasStaleFlag: false, + }, + { + sql: "execute p1", + hasStaleFlag: false, + }, + // assert execute select statement in stale transaction + { + sql: fmt.Sprintf("start transaction read only as of timestamp '%v'", time1), + hasStaleFlag: false, + }, + { + sql: "execute p1", + hasStaleFlag: true, + }, + { + sql: "commit", + hasStaleFlag: false, + }, + } + + for _, testcase := range testcases { + failpoint.Enable("github.com/pingcap/tidb/exector/assertStmtCtxIsStaleness", + fmt.Sprintf("return(%v)", testcase.hasStaleFlag)) + tk.MustExec(testcase.sql) + failpoint.Disable("github.com/pingcap/tidb/exector/assertStmtCtxIsStaleness") + // assert stale read flag should be false after each statement execution + c.Assert(tk.Se.GetSessionVars().StmtCtx.IsStaleness, IsFalse) + } +} diff --git a/executor/tiflash_test.go b/executor/tiflash_test.go index e06237eefdc39..cca6bbe4841e5 100644 --- a/executor/tiflash_test.go +++ b/executor/tiflash_test.go @@ -14,6 +14,7 @@ package executor_test import ( + "bytes" "fmt" "math/rand" "strings" @@ -848,3 +849,38 @@ func (s *tiflashTestSuite) TestTiFlashPartitionTableBroadcastJoin(c *C) { } } } + +func (s *tiflashTestSuite) TestForbidTiflashDuringStaleRead(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a bigint(20))") + tk.MustExec("alter table t set tiflash replica 1") + tb := testGetTableByName(c, tk.Se, "test", "t") + err := domain.GetDomain(tk.Se).DDL().UpdateTableReplicaInfo(tk.Se, tb.Meta().ID, true) + c.Assert(err, IsNil) + time.Sleep(2 * time.Second) + tk.MustExec("insert into t values (9223372036854775807)") + tk.MustExec("insert into t values (9223372036854775807)") + tk.MustExec("insert into t values (9223372036854775807)") + tk.MustExec("insert into t values (9223372036854775807)") + tk.MustExec("insert into t values (9223372036854775807)") + tk.MustExec("insert into t values (9223372036854775807)") + rows := tk.MustQuery("explain select avg(a) from t").Rows() + resBuff := bytes.NewBufferString("") + for _, row := range rows { + fmt.Fprintf(resBuff, "%s\n", row) + } + res := resBuff.String() + c.Assert(strings.Contains(res, "tiflash"), IsTrue) + c.Assert(strings.Contains(res, "tikv"), IsFalse) + tk.MustExec("set transaction read only as of timestamp now(1)") + rows = tk.MustQuery("explain select avg(a) from t").Rows() + resBuff = bytes.NewBufferString("") + for _, row := range rows { + fmt.Fprintf(resBuff, "%s\n", row) + } + res = resBuff.String() + c.Assert(strings.Contains(res, "tiflash"), IsFalse) + c.Assert(strings.Contains(res, "tikv"), IsTrue) +} diff --git a/expression/builtin_json.go b/expression/builtin_json.go index 287e4383bf165..0690f334b45b9 100644 --- a/expression/builtin_json.go +++ b/expression/builtin_json.go @@ -216,11 +216,14 @@ func (c *jsonUnquoteFunctionClass) getFunction(ctx sessionctx.Context, args []Ex return sig, nil } -func (b *builtinJSONUnquoteSig) evalString(row chunk.Row) (string, bool, error) { - str, isNull, err := b.args[0].EvalString(b.ctx, row) +func (b *builtinJSONUnquoteSig) evalString(row chunk.Row) (str string, isNull bool, err error) { + str, isNull, err = b.args[0].EvalString(b.ctx, row) if isNull || err != nil { return "", isNull, err } + if len(str) >= 2 && str[0] == '"' && str[len(str)-1] == '"' && !goJSON.Valid([]byte(str)) { + return "", false, json.ErrInvalidJSONText.GenWithStackByArgs("The document root must not be followed by other values.") + } str, err = json.UnquoteString(str) if err != nil { return "", false, err diff --git a/expression/builtin_json_test.go b/expression/builtin_json_test.go index 666c1a1eb5b00..4cb744bb47969 100644 --- a/expression/builtin_json_test.go +++ b/expression/builtin_json_test.go @@ -16,6 +16,7 @@ package expression import ( . "github.com/pingcap/check" "github.com/pingcap/parser/ast" + "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/json" @@ -78,31 +79,38 @@ func (s *testEvaluatorSuite) TestJSONQuote(c *C) { func (s *testEvaluatorSuite) TestJSONUnquote(c *C) { fc := funcs[ast.JSONUnquote] tbl := []struct { - Input interface{} - Expected interface{} + Input string + Result string + Error error }{ - {nil, nil}, - {``, ``}, - {`""`, ``}, - {`''`, `''`}, - {`"a"`, `a`}, - {`3`, `3`}, - {`{"a": "b"}`, `{"a": "b"}`}, - {`{"a": "b"}`, `{"a": "b"}`}, - {`"hello,\"quoted string\",world"`, `hello,"quoted string",world`}, - {`"hello,\"宽字符\",world"`, `hello,"宽字符",world`}, - {`Invalid Json string\tis OK`, `Invalid Json string\tis OK`}, - {`"1\\u2232\\u22322"`, `1\u2232\u22322`}, - {`"[{\"x\":\"{\\\"y\\\":12}\"}]"`, `[{"x":"{\"y\":12}"}]`}, - {`[{\"x\":\"{\\\"y\\\":12}\"}]`, `[{\"x\":\"{\\\"y\\\":12}\"}]`}, + {``, ``, nil}, + {`""`, ``, nil}, + {`''`, `''`, nil}, + {`3`, `3`, nil}, + {`{"a": "b"}`, `{"a": "b"}`, nil}, + {`{"a": "b"}`, `{"a": "b"}`, nil}, + {`"hello,\"quoted string\",world"`, `hello,"quoted string",world`, nil}, + {`"hello,\"宽字符\",world"`, `hello,"宽字符",world`, nil}, + {`Invalid Json string\tis OK`, `Invalid Json string\tis OK`, nil}, + {`"1\\u2232\\u22322"`, `1\u2232\u22322`, nil}, + {`"[{\"x\":\"{\\\"y\\\":12}\"}]"`, `[{"x":"{\"y\":12}"}]`, nil}, + {`[{\"x\":\"{\\\"y\\\":12}\"}]`, `[{\"x\":\"{\\\"y\\\":12}\"}]`, nil}, + {`"a"`, `a`, nil}, + {`""a""`, `""a""`, json.ErrInvalidJSONText.GenWithStackByArgs("The document root must not be followed by other values.")}, + {`"""a"""`, `"""a"""`, json.ErrInvalidJSONText.GenWithStackByArgs("The document root must not be followed by other values.")}, } - dtbl := tblToDtbl(tbl) - for _, t := range dtbl { - f, err := fc.getFunction(s.ctx, s.datumsToConstants(t["Input"])) - c.Assert(err, IsNil) - d, err := evalBuiltinFunc(f, chunk.Row{}) + for _, t := range tbl { + var d types.Datum + d.SetString(t.Input, mysql.DefaultCollationName) + f, err := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{d})) c.Assert(err, IsNil) - c.Assert(d, testutil.DatumEquals, t["Expected"][0]) + d, err = evalBuiltinFunc(f, chunk.Row{}) + if t.Error == nil { + c.Assert(d.GetString(), Equals, t.Result) + c.Assert(err, IsNil) + } else { + c.Assert(err, ErrorMatches, ".*The document root must not be followed by other values.*") + } } } diff --git a/expression/builtin_json_vec.go b/expression/builtin_json_vec.go index 86e1d64b09902..767029cd1a141 100644 --- a/expression/builtin_json_vec.go +++ b/expression/builtin_json_vec.go @@ -1170,7 +1170,11 @@ func (b *builtinJSONUnquoteSig) vecEvalString(input *chunk.Chunk, result *chunk. result.AppendNull() continue } - str, err := json.UnquoteString(buf.GetString(i)) + str := buf.GetString(i) + if len(str) >= 2 && str[0] == '"' && str[len(str)-1] == '"' && !goJSON.Valid([]byte(str)) { + return json.ErrInvalidJSONText.GenWithStackByArgs("The document root must not be followed by other values.") + } + str, err := json.UnquoteString(str) if err != nil { return err } diff --git a/expression/builtin_time.go b/expression/builtin_time.go index 00c26b9140dbf..ad39498b82764 100644 --- a/expression/builtin_time.go +++ b/expression/builtin_time.go @@ -7037,7 +7037,7 @@ func (b *builtinLastDaySig) evalTime(row chunk.Row) (types.Time, bool, error) { } tm := arg year, month := tm.Year(), tm.Month() - if arg.InvalidZero() { + if tm.Month() == 0 || (tm.Day() == 0 && b.ctx.GetSessionVars().SQLMode.HasNoZeroDateMode()) { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, arg.String())) } lastDay := types.GetLastDay(year, month) diff --git a/expression/builtin_time_test.go b/expression/builtin_time_test.go index 050d3fc5c93b3..b8ef7f9d4a36e 100644 --- a/expression/builtin_time_test.go +++ b/expression/builtin_time_test.go @@ -2776,22 +2776,35 @@ func (s *testEvaluatorSuite) TestLastDay(c *C) { c.Assert(result, Equals, test.expect) } - testsNull := []interface{}{ - "0000-00-00", - "1992-13-00", - "2007-10-07 23:59:61", - "2005-00-00", - "2005-00-01", - "2243-01 00:00:00", - 123456789} + var timeData types.Time + timeData.StrToDate(s.ctx.GetSessionVars().StmtCtx, "202010", "%Y%m") + testsNull := []struct { + param interface{} + isNilNoZeroDate bool + isNil bool + }{ + {"0000-00-00", true, true}, + {"1992-13-00", true, true}, + {"2007-10-07 23:59:61", true, true}, + {"2005-00-00", true, true}, + {"2005-00-01", true, true}, + {"2243-01 00:00:00", true, true}, + {123456789, true, true}, + {timeData, true, false}, + } for _, i := range testsNull { - t := []types.Datum{types.NewDatum(i)} + t := []types.Datum{types.NewDatum(i.param)} f, err := fc.getFunction(s.ctx, s.datumsToConstants(t)) c.Assert(err, IsNil) d, err := evalBuiltinFunc(f, chunk.Row{}) c.Assert(err, IsNil) - c.Assert(d.IsNull(), IsTrue) + c.Assert(d.IsNull() == i.isNilNoZeroDate, IsTrue) + s.ctx.GetSessionVars().SQLMode &= ^mysql.ModeNoZeroDate + d, err = evalBuiltinFunc(f, chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(d.IsNull() == i.isNil, IsTrue) + s.ctx.GetSessionVars().SQLMode |= mysql.ModeNoZeroDate } } diff --git a/expression/builtin_time_vec.go b/expression/builtin_time_vec.go index 78c93e705f539..bb04fd973d2c2 100644 --- a/expression/builtin_time_vec.go +++ b/expression/builtin_time_vec.go @@ -712,15 +712,15 @@ func (b *builtinLastDaySig) vecEvalTime(input *chunk.Chunk, result *chunk.Column if result.IsNull(i) { continue } - if times[i].InvalidZero() { + tm := times[i] + year, month := tm.Year(), tm.Month() + if tm.Month() == 0 || (tm.Day() == 0 && b.ctx.GetSessionVars().SQLMode.HasNoZeroDateMode()) { if err := handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, times[i].String())); err != nil { return err } result.SetNull(i, true) continue } - tm := times[i] - year, month := tm.Year(), tm.Month() lastDay := types.GetLastDay(year, month) times[i] = types.NewTime(types.FromDate(year, month, lastDay, 0, 0, 0, 0), mysql.TypeDate, types.DefaultFsp) } diff --git a/go.mod b/go.mod index 8c2bce826595d..2502c28eea2fd 100644 --- a/go.mod +++ b/go.mod @@ -55,7 +55,7 @@ require ( github.com/shirou/gopsutil v3.21.2+incompatible github.com/soheilhy/cmux v0.1.4 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210701075128-88f909bcdd3f + github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210706041121-6ca00989ddb4 github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d github.com/twmb/murmur3 v1.1.3 github.com/uber-go/atomic v1.4.0 @@ -76,7 +76,6 @@ require ( golang.org/x/tools v0.1.4 google.golang.org/grpc v1.27.1 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - honnef.co/go/tools v0.2.0 // indirect modernc.org/mathutil v1.2.2 // indirect sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 diff --git a/go.sum b/go.sum index 8cc16ae095365..bb44c6cd99136 100644 --- a/go.sum +++ b/go.sum @@ -558,8 +558,8 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfK github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210701075128-88f909bcdd3f h1:T3zFmJfdvmF+sVUvLsZKJZmCzfkbo0O0DjlbQdmd74A= -github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210701075128-88f909bcdd3f/go.mod h1:crzTwbliZf57xC5ZSzmQx4iMZCLCGhA364to+E2JAPU= +github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210706041121-6ca00989ddb4 h1:kokFPtkpQ2ZH910iuxfmKMk8M9zzCLskN8h1LELWN20= +github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210706041121-6ca00989ddb4/go.mod h1:crzTwbliZf57xC5ZSzmQx4iMZCLCGhA364to+E2JAPU= github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d h1:K0XnvsnT6ofLDuM8Rt3PuFQO4p8bNraeHYstspD316g= github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d/go.mod h1:Jw9KG11C/23Rr7DW4XWQ7H5xOgGZo6DFL1OKAF4+Igw= github.com/tklauser/go-sysconf v0.3.4 h1:HT8SVixZd3IzLdfs/xlpq0jeSfTX57g1v6wB1EuzV7M= diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index 40d859a10689b..4e2894af1be4f 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -271,6 +271,7 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont if err != nil { return errors.Trace(err) } + sctx.GetSessionVars().StmtCtx.IsStaleness = true } if prepared.SchemaVersion != is.SchemaMetaVersion() { // In order to avoid some correctness issues, we have to clear the diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index 59c228767171a..aa9e7752de3b5 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -48,6 +48,7 @@ var IsReadOnly func(node ast.Node, vars *variable.SessionVars) bool const ( flagGcSubstitute uint64 = 1 << iota flagPrunColumns + flagStabilizeResults flagBuildKeyInfo flagDecorrelate flagEliminateAgg @@ -65,6 +66,7 @@ const ( var optRuleList = []logicalOptRule{ &gcSubstituter{}, &columnPruner{}, + &resultsStabilizer{}, &buildKeySolver{}, &decorrelateSolver{}, &aggregationEliminator{}, @@ -132,12 +134,21 @@ func CheckTableLock(ctx sessionctx.Context, is infoschema.InfoSchema, vs []visit return nil } +func checkStableResultMode(sctx sessionctx.Context) bool { + s := sctx.GetSessionVars() + st := s.StmtCtx + return s.EnableStableResultMode && (!st.InInsertStmt && !st.InUpdateStmt && !st.InDeleteStmt && !st.InLoadDataStmt) +} + // DoOptimize optimizes a logical plan to a physical plan. func DoOptimize(ctx context.Context, sctx sessionctx.Context, flag uint64, logic LogicalPlan) (PhysicalPlan, float64, error) { // if there is something after flagPrunColumns, do flagPrunColumnsAgain if flag&flagPrunColumns > 0 && flag-flagPrunColumns > flagPrunColumns { flag |= flagPrunColumnsAgain } + if checkStableResultMode(sctx) { + flag |= flagStabilizeResults + } logic, err := logicalOptimize(ctx, flag, logic) if err != nil { return nil, 0, err diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 179a9369ce228..3e40e5509d548 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -1086,6 +1086,10 @@ func getPossibleAccessPaths(ctx sessionctx.Context, tableHints *tableHintInfo, i } available = removeIgnoredPaths(available, ignored, tblInfo) + if ctx.GetSessionVars().StmtCtx.IsStaleness { + // skip tiflash if the statement is for stale read until tiflash support stale read + available = removeTiflashDuringStaleRead(available) + } // If we have got "FORCE" or "USE" index hint but got no available index, // we have to use table scan. @@ -1141,6 +1145,17 @@ func removeIgnoredPaths(paths, ignoredPaths []*util.AccessPath, tblInfo *model.T return remainedPaths } +func removeTiflashDuringStaleRead(paths []*util.AccessPath) []*util.AccessPath { + n := 0 + for _, path := range paths { + if path.StoreType != kv.TiFlash { + paths[n] = path + n++ + } + } + return paths[:n] +} + func (b *PlanBuilder) buildSelectLock(src LogicalPlan, lock *ast.SelectLockInfo) (*LogicalLock, error) { selectLock := LogicalLock{ Lock: lock, diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 2c466ac9ddbae..8791d9913a2c6 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -445,6 +445,11 @@ type PointPlanVal struct { // TryFastPlan tries to use the PointGetPlan for the query. func TryFastPlan(ctx sessionctx.Context, node ast.Node) (p Plan) { + if checkStableResultMode(ctx) { + // the rule of stabilizing results has not taken effect yet, so cannot generate a plan here in this mode + return nil + } + ctx.GetSessionVars().PlanID = 0 ctx.GetSessionVars().PlanColumnID = 0 switch x := node.(type) { diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index b9e2b421c2cfe..1e367b9d78e6d 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -1501,6 +1501,9 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { } txnCtx := p.ctx.GetSessionVars().TxnCtx p.TxnScope = txnCtx.TxnScope + // It means we meet following case: + // 1. start transaction read only as of timestamp ts + // 2. select statement if txnCtx.IsStaleness { p.LastSnapshotTS = txnCtx.StartTS p.IsStaleness = txnCtx.IsStaleness @@ -1517,6 +1520,9 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { p.err = ErrAsOf.FastGenWithCause("can't use select as of while already set transaction as of") return } + // it means we meet following case: + // 1. set transaction read only as of timestamp ts + // 2. select statement if !p.initedLastSnapshotTS { p.SnapshotTSEvaluator = func(sessionctx.Context) (uint64, error) { return ts, nil @@ -1534,6 +1540,8 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { p.err = errors.Trace(err) return } + // It means we meet following case: + // select statement with as of timestamp if !p.initedLastSnapshotTS { p.SnapshotTSEvaluator = func(ctx sessionctx.Context) (uint64, error) { return calculateTsExpr(ctx, node) @@ -1553,6 +1561,9 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { return } } + if p.flag&inPrepare == 0 { + p.ctx.GetSessionVars().StmtCtx.IsStaleness = p.IsStaleness + } p.initedLastSnapshotTS = true } diff --git a/planner/core/rule_stabilize_results.go b/planner/core/rule_stabilize_results.go new file mode 100644 index 0000000000000..f327bb70a98f3 --- /dev/null +++ b/planner/core/rule_stabilize_results.go @@ -0,0 +1,125 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/planner/util" +) + +/* + resultsStabilizer stabilizes query results. + NOTE: it's not a common rule for all queries, it's specially implemented for a few customers. + + Results of some queries are not stable, for example: + create table t (a int); insert into t values (1), (2); select a from t; + In the case above, the result can be `1 2` or `2 1`, which is not stable. + This rule stabilizes results by modifying or injecting a Sort operator: + 1. iterate the plan from the root, and ignore all input-order operators (Sel/Proj/Limit); + 2. when meeting the first non-input-order operator, + 2.1. if it's a Sort, update it by appending all output columns into its order-by list, + 2.2. otherwise, inject a new Sort upon this operator. +*/ +type resultsStabilizer struct { +} + +func (rs *resultsStabilizer) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { + stable := rs.completeSort(lp) + if !stable { + lp = rs.injectSort(lp) + } + return lp, nil +} + +func (rs *resultsStabilizer) completeSort(lp LogicalPlan) bool { + if rs.isInputOrderKeeper(lp) { + return rs.completeSort(lp.Children()[0]) + } else if sort, ok := lp.(*LogicalSort); ok { + cols := sort.Schema().Columns // sort results by all output columns + if handleCol := rs.extractHandleCol(sort.Children()[0]); handleCol != nil { + cols = []*expression.Column{handleCol} // sort results by the handle column if we can get it + } + for _, col := range cols { + exist := false + for _, byItem := range sort.ByItems { + if col.Equal(nil, byItem.Expr) { + exist = true + break + } + } + if !exist { + sort.ByItems = append(sort.ByItems, &util.ByItems{Expr: col}) + } + } + return true + } + return false +} + +func (rs *resultsStabilizer) injectSort(lp LogicalPlan) LogicalPlan { + if rs.isInputOrderKeeper(lp) { + lp.SetChildren(rs.injectSort(lp.Children()[0])) + return lp + } + + byItems := make([]*util.ByItems, 0, len(lp.Schema().Columns)) + cols := lp.Schema().Columns + if handleCol := rs.extractHandleCol(lp); handleCol != nil { + cols = []*expression.Column{handleCol} + } + for _, col := range cols { + byItems = append(byItems, &util.ByItems{Expr: col}) + } + sort := LogicalSort{ + ByItems: byItems, + }.Init(lp.SCtx(), lp.SelectBlockOffset()) + sort.SetChildren(lp) + return sort +} + +func (rs *resultsStabilizer) isInputOrderKeeper(lp LogicalPlan) bool { + switch lp.(type) { + case *LogicalSelection, *LogicalProjection, *LogicalLimit: + return true + } + return false +} + +// extractHandleCols does the best effort to get the handle column. +func (rs *resultsStabilizer) extractHandleCol(lp LogicalPlan) *expression.Column { + switch x := lp.(type) { + case *LogicalSelection, *LogicalLimit: + handleCol := rs.extractHandleCol(lp.Children()[0]) + if x.Schema().Contains(handleCol) { + // some Projection Operator might be inlined, so check the column again here + return handleCol + } + case *DataSource: + if x.tableInfo.IsCommonHandle { + // Currently we deliberately don't support common handle case for simplicity. + return nil + } + handleCol := x.getPKIsHandleCol() + if handleCol != nil { + return handleCol + } + } + return nil +} + +func (rs *resultsStabilizer) name() string { + return "stabilize_results" +} diff --git a/planner/core/rule_stabilize_results_test.go b/planner/core/rule_stabilize_results_test.go new file mode 100644 index 0000000000000..00b3cf1fb12e5 --- /dev/null +++ b/planner/core/rule_stabilize_results_test.go @@ -0,0 +1,218 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + "fmt" + "math" + "strings" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/kvcache" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testRuleStabilizeResults{}) +var _ = SerialSuites(&testRuleStabilizeResultsSerial{}) + +type testRuleStabilizeResultsSerial struct { + store kv.Storage + dom *domain.Domain +} + +func (s *testRuleStabilizeResultsSerial) SetUpTest(c *C) { + var err error + s.store, s.dom, err = newStoreWithBootstrap() + c.Assert(err, IsNil) +} + +func (s *testRuleStabilizeResultsSerial) TestPlanCache(c *C) { + tk := testkit.NewTestKit(c, s.store) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + var err error + tk.Se, err = session.CreateSession4TestWithOpt(s.store, &session.Opt{ + PreparedPlanCache: kvcache.NewSimpleLRUCache(100, 0.1, math.MaxUint64), + }) + c.Assert(err, IsNil) + + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key, b int, c int, d int, key(b))") + tk.MustExec("prepare s1 from 'select * from t where a > ? limit 10'") + tk.MustExec("set @a = 10") + tk.MustQuery("execute s1 using @a").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + tk.MustQuery("execute s1 using @a").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) // plan cache is still working +} + +func (s *testRuleStabilizeResultsSerial) TestSQLBinding(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key, b int, c int, d int, key(b))") + tk.MustQuery("explain select * from t where a > 0 limit 1").Check(testkit.Rows( + "Limit_12 1.00 root offset:0, count:1", + "└─TableReader_22 1.00 root data:Limit_21", + " └─Limit_21 1.00 cop[tikv] offset:0, count:1", + " └─TableRangeScan_20 1.00 cop[tikv] table:t range:(0,+inf], keep order:true, stats:pseudo")) + + tk.MustExec("create session binding for select * from t where a>0 limit 1 using select * from t use index(b) where a>0 limit 1") + tk.MustQuery("explain select * from t where a > 0 limit 1").Check(testkit.Rows( + "TopN_9 1.00 root test.t.a, offset:0, count:1", + "└─IndexLookUp_19 1.00 root ", + " ├─TopN_18(Build) 1.00 cop[tikv] test.t.a, offset:0, count:1", + " │ └─Selection_17 3333.33 cop[tikv] gt(test.t.a, 0)", + " │ └─IndexFullScan_15 10000.00 cop[tikv] table:t, index:b(b) keep order:false, stats:pseudo", + " └─TableRowIDScan_16(Probe) 1.00 cop[tikv] table:t keep order:false, stats:pseudo")) +} + +func (s *testRuleStabilizeResultsSerial) TestClusteredIndex(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE t (a int,b int,c int, PRIMARY KEY (a,b))") + tk.MustQuery("explain select * from t limit 10").Check(testkit.Rows( + "TopN_7 10.00 root test.t.a, test.t.b, test.t.c, offset:0, count:10", + "└─TableReader_16 10.00 root data:TopN_15", + " └─TopN_15 10.00 cop[tikv] test.t.a, test.t.b, test.t.c, offset:0, count:10", + " └─TableFullScan_14 10000.00 cop[tikv] table:t keep order:false, stats:pseudo")) + tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff +} + +type testRuleStabilizeResults struct { + store kv.Storage + dom *domain.Domain + + testData testutil.TestData +} + +func (s *testRuleStabilizeResults) SetUpSuite(c *C) { + var err error + s.store, s.dom, err = newStoreWithBootstrap() + c.Assert(err, IsNil) + + s.testData, err = testutil.LoadTestSuiteData("testdata", "stable_result_mode_suite") + c.Assert(err, IsNil) +} + +func (s *testRuleStabilizeResults) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) +} + +func (s *testRuleStabilizeResults) runTestData(c *C, tk *testkit.TestKit, name string) { + var input []string + var output []struct { + Plan []string + } + s.testData.GetTestCasesByName(name, c, &input, &output) + c.Assert(len(input), Equals, len(output)) + for i := range input { + s.testData.OnRecord(func() { + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + input[i]).Rows()) + }) + tk.MustQuery("explain " + input[i]).Check(testkit.Rows(output[i].Plan...)) + } +} + +func (s *testRuleStabilizeResults) TestStableResultMode(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key, b int, c int, d int, key(b))") + s.runTestData(c, tk, "TestStableResultMode") +} + +func (s *testRuleStabilizeResults) TestStableResultModeOnDML(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key, b int, c int, key(b))") + s.runTestData(c, tk, "TestStableResultModeOnDML") +} + +func (s *testRuleStabilizeResults) TestStableResultModeOnSubQuery(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1 (a int primary key, b int, c int, d int, key(b))") + tk.MustExec("create table t2 (a int primary key, b int, c int, d int, key(b))") + s.runTestData(c, tk, "TestStableResultModeOnSubQuery") +} + +func (s *testRuleStabilizeResults) TestStableResultModeOnJoin(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1 (a int primary key, b int, c int, d int, key(b))") + tk.MustExec("create table t2 (a int primary key, b int, c int, d int, key(b))") + s.runTestData(c, tk, "TestStableResultModeOnJoin") +} + +func (s *testRuleStabilizeResults) TestStableResultModeOnOtherOperators(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1 (a int primary key, b int, c int, d int, unique key(b))") + tk.MustExec("create table t2 (a int primary key, b int, c int, d int, unique key(b))") + s.runTestData(c, tk, "TestStableResultModeOnOtherOperators") +} + +func (s *testRuleStabilizeResults) TestStableResultModeOnPartitionTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec(fmt.Sprintf(`set tidb_partition_prune_mode='%v'`, variable.DefTiDBPartitionPruneMode)) + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists thash") + tk.MustExec("drop table if exists trange") + tk.MustExec("create table thash (a int primary key, b int, c int, d int) partition by hash(a) partitions 4") + tk.MustExec(`create table trange (a int primary key, b int, c int, d int) partition by range(a) ( + partition p0 values less than (100), + partition p1 values less than (200), + partition p2 values less than (300), + partition p3 values less than (400))`) + tk.MustQuery("select @@tidb_partition_prune_mode").Check(testkit.Rows("static")) + s.runTestData(c, tk, "TestStableResultModeOnPartitionTable") +} + +func (s *testRuleStabilizeResults) TestHideStableResultSwitch(c *C) { + tk := testkit.NewTestKit(c, s.store) + rs := tk.MustQuery("show variables").Rows() + for _, r := range rs { + c.Assert(strings.ToLower(r[0].(string)), Not(Equals), "tidb_enable_stable_result_mode") + } + c.Assert(len(tk.MustQuery("show variables where variable_name like '%tidb_enable_stable_result_mode%'").Rows()), Equals, 0) +} diff --git a/planner/core/testdata/stable_result_mode_suite_in.json b/planner/core/testdata/stable_result_mode_suite_in.json new file mode 100644 index 0000000000000..7629e80fc3630 --- /dev/null +++ b/planner/core/testdata/stable_result_mode_suite_in.json @@ -0,0 +1,83 @@ +[ + { + "name": "TestStableResultMode", + "cases": [ + "select * from t use index(primary)", + "select b from t use index(b)", + "select a, b from t use index(b)", + "select b, c from t use index(b)", + "select b, c from t use index(primary)", + "select min(b), max(c) from t use index(primary) group by d", + "select min(b), max(c) from t use index(primary) group by a", + "select * from t use index(b) limit 10", + "select * from t use index(primary) limit 10", + "select b from t use index(b) order by b", + "select b, c, d from t use index(b) order by b", + "select t1.a, t2.a from t t1, t t2 where t1.a=t2.a", + "select b from t where a>0", + "select b from t where a>0 limit 1" + ] + }, + { + "name": "TestStableResultModeOnDML", + "cases": [ + "insert into t select * from t", + "insert into t select * from t where a>1", + "insert into t select t1.a, t2.b, t1.c+t2.c from t t1, t t2 where t1.a=t2.a", + "insert into t select min(a), max(b), sum(c) from t group by a", + "delete from t", + "delete from t where a>1", + "update t set a=a+1", + "update t set a=a+1 where a>1" + ] + }, + { + "name": "TestStableResultModeOnSubQuery", + "cases": [ + "select * from t1 where t1.a in (select b from t2)", + "select * from t1 where t1.a not in (select b from t2)", + "select * from t1 where t1.a in (select b from t2 where t2.c>t1.c)", + "select * from t1 where t1.a not in (select b from t2 where t2.c>t1.c)", + "select * from t1 where exists (select 1 from t2 where t2.c>t1.c)", + "select * from t1 where not exists (select 1 from t2 where t2.c>t1.c)", + "select * from t1 where exists (select 1 from t2 where t2.c=t1.c)", + "select * from t1 where not exists (select 1 from t2 where t2.c=t1.c)", + "select t1.* from t1, (select b from t2) tb where t1.b=tb.b" + ] + }, + { + "name": "TestStableResultModeOnJoin", + "cases": [ + "select * from t1, t2 where t1.a = t2.a", + "select * from t1, t2 where t1.a > t2.a and t1.b = t2.b and t1.c < t2.c", + "select t1.* from t1 left outer join t2 on t1.a=t2.a", + "select t1.* from t1 join t2 on t1.a!=t2.a" + ] + }, + { + "name": "TestStableResultModeOnOtherOperators", + "cases": [ + "select * from t1 where a = 1 or a = 222 or a = 33333", + "select * from t1 where a in (1, 2, 3, 4)", + "select b from t1 where b = 1 or b = 222 or b = 33333", + "select b from t1 where b in (1, 2, 3, 4)", + "select * from t1 where a > 10 union all select * from t2 where b > 20", + "select * from t1 where a > 10 union distinct select * from t2 where b > 20", + "select * from t1 where a > 10 intersect select * from t2 where b > 20", + "select * from t1 where a > 10 except select * from t2 where b > 20", + "select row_number() over(partition by a) as row_no, sum(b) over(partition by a) as sum_b from t1", + "select min(a), max(b), sum(c) from t1 group by d", + "select min(a), max(b), sum(c) from t1 group by d having max(b) < 20", + "select case when a=1 then 'a1' when a=2 then 'a2' else 'ax' end from t1 " + ] + }, + { + "name": "TestStableResultModeOnPartitionTable", + "cases": [ + "select * from thash where a in (1, 200)", + "select * from thash where a >= 50 and a <= 150", + "select * from trange where a in (1, 200)", + "select * from trange where a >= 50 and a <= 150" + ] + } +] diff --git a/planner/core/testdata/stable_result_mode_suite_out.json b/planner/core/testdata/stable_result_mode_suite_out.json new file mode 100644 index 0000000000000..6e00e3e1c65db --- /dev/null +++ b/planner/core/testdata/stable_result_mode_suite_out.json @@ -0,0 +1,485 @@ +[ + { + "Name": "TestStableResultMode", + "Cases": [ + { + "Plan": [ + "TableReader_10 10000.00 root data:TableFullScan_9", + "└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "IndexReader_10 10000.00 root index:IndexFullScan_9", + "└─IndexFullScan_9 10000.00 cop[tikv] table:t, index:b(b) keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_5 10000.00 root test.t.a", + "└─IndexReader_8 10000.00 root index:IndexFullScan_7", + " └─IndexFullScan_7 10000.00 cop[tikv] table:t, index:b(b) keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_5 10000.00 root test.t.b, test.t.c", + "└─IndexLookUp_9 10000.00 root ", + " ├─IndexFullScan_7(Build) 10000.00 cop[tikv] table:t, index:b(b) keep order:false, stats:pseudo", + " └─TableRowIDScan_8(Probe) 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_5 10000.00 root test.t.b, test.t.c", + "└─TableReader_8 10000.00 root data:TableFullScan_7", + " └─TableFullScan_7 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_6 8000.00 root Column#5, Column#6", + "└─HashAgg_12 8000.00 root group by:test.t.d, funcs:min(Column#7)->Column#5, funcs:max(Column#8)->Column#6", + " └─TableReader_13 8000.00 root data:HashAgg_8", + " └─HashAgg_8 8000.00 cop[tikv] group by:test.t.d, funcs:min(test.t.b)->Column#7, funcs:max(test.t.c)->Column#8", + " └─TableFullScan_11 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_7 10000.00 root test.t.b, test.t.c", + "└─TableReader_10 10000.00 root data:TableFullScan_9", + " └─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "TopN_8 10.00 root test.t.a, offset:0, count:10", + "└─IndexLookUp_17 10.00 root ", + " ├─TopN_16(Build) 10.00 cop[tikv] test.t.a, offset:0, count:10", + " │ └─IndexFullScan_14 10000.00 cop[tikv] table:t, index:b(b) keep order:false, stats:pseudo", + " └─TableRowIDScan_15(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Limit_11 10.00 root offset:0, count:10", + "└─TableReader_21 10.00 root data:Limit_20", + " └─Limit_20 10.00 cop[tikv] offset:0, count:10", + " └─TableFullScan_19 10.00 cop[tikv] table:t keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "IndexReader_11 10000.00 root index:IndexFullScan_10", + "└─IndexFullScan_10 10000.00 cop[tikv] table:t, index:b(b) keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_4 10000.00 root test.t.b, test.t.c, test.t.d", + "└─IndexLookUp_9 10000.00 root ", + " ├─IndexFullScan_7(Build) 10000.00 cop[tikv] table:t, index:b(b) keep order:false, stats:pseudo", + " └─TableRowIDScan_8(Probe) 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_9 12500.00 root test.t.a, test.t.a", + "└─HashJoin_30 12500.00 root inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─IndexReader_43(Build) 10000.00 root index:IndexFullScan_42", + " │ └─IndexFullScan_42 10000.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─IndexReader_39(Probe) 10000.00 root index:IndexFullScan_38", + " └─IndexFullScan_38 10000.00 cop[tikv] table:t1, index:b(b) keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_5 3333.33 root test.t.b", + "└─TableReader_11 3333.33 root data:TableRangeScan_10", + " └─TableRangeScan_10 3333.33 cop[tikv] table:t range:(0,+inf], keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_7 1.00 root test.t.b", + "└─Limit_12 1.00 root offset:0, count:1", + " └─TableReader_22 1.00 root data:Limit_21", + " └─Limit_21 1.00 cop[tikv] offset:0, count:1", + " └─TableRangeScan_20 1.00 cop[tikv] table:t range:(0,+inf], keep order:true, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestStableResultModeOnDML", + "Cases": [ + { + "Plan": [ + "Insert_1 N/A root N/A", + "└─TableReader_7 10000.00 root data:TableFullScan_6", + " └─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Insert_1 N/A root N/A", + "└─TableReader_8 3333.33 root data:TableRangeScan_7", + " └─TableRangeScan_7 3333.33 cop[tikv] table:t range:(1,+inf], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Insert_1 N/A root N/A", + "└─Projection_9 12500.00 root test.t.a, test.t.b, plus(test.t.c, test.t.c)->Column#10", + " └─MergeJoin_10 12500.00 root inner join, left key:test.t.a, right key:test.t.a", + " ├─TableReader_34(Build) 10000.00 root data:TableFullScan_33", + " │ └─TableFullScan_33 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo", + " └─TableReader_32(Probe) 10000.00 root data:TableFullScan_31", + " └─TableFullScan_31 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Insert_1 N/A root N/A", + "└─Projection_7 10000.00 root test.t.a, test.t.b, cast(test.t.c, decimal(32,0) BINARY)->Column#9", + " └─TableReader_9 10000.00 root data:TableFullScan_8", + " └─TableFullScan_8 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Delete_3 N/A root N/A", + "└─TableReader_6 10000.00 root data:TableFullScan_5", + " └─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Delete_4 N/A root N/A", + "└─TableReader_7 3333.33 root data:TableRangeScan_6", + " └─TableRangeScan_6 3333.33 cop[tikv] table:t range:(1,+inf], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Update_3 N/A root N/A", + "└─TableReader_6 10000.00 root data:TableFullScan_5", + " └─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Update_4 N/A root N/A", + "└─TableReader_7 3333.33 root data:TableRangeScan_6", + " └─TableRangeScan_6 3333.33 cop[tikv] table:t range:(1,+inf], keep order:false, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestStableResultModeOnSubQuery", + "Cases": [ + { + "Plan": [ + "Sort_11 9990.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_23 9990.00 root inner join, equal:[eq(test.t1.a, test.t2.b)]", + " ├─HashAgg_36(Build) 7992.00 root group by:test.t2.b, funcs:firstrow(test.t2.b)->test.t2.b", + " │ └─IndexReader_43 9990.00 root index:IndexFullScan_42", + " │ └─IndexFullScan_42 9990.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─TableReader_47(Probe) 10000.00 root data:TableFullScan_46", + " └─TableFullScan_46 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_9 8000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_11 8000.00 root CARTESIAN anti semi join, other cond:eq(test.t1.a, test.t2.b)", + " ├─IndexReader_17(Build) 10000.00 root index:IndexFullScan_16", + " │ └─IndexFullScan_16 10000.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─TableReader_13(Probe) 10000.00 root data:TableFullScan_12", + " └─TableFullScan_12 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 7992.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_29 7992.00 root semi join, equal:[eq(test.t1.a, test.t2.b)], other cond:gt(test.t2.c, test.t1.c)", + " ├─TableReader_43(Build) 9980.01 root data:Selection_42", + " │ └─Selection_42 9980.01 cop[tikv] not(isnull(test.t2.b)), not(isnull(test.t2.c))", + " │ └─TableFullScan_41 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_40(Probe) 9990.00 root data:Selection_39", + " └─Selection_39 9990.00 cop[tikv] not(isnull(test.t1.c))", + " └─TableFullScan_38 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 8000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_12 8000.00 root CARTESIAN anti semi join, other cond:eq(test.t1.a, test.t2.b), gt(test.t2.c, test.t1.c)", + " ├─TableReader_16(Build) 10000.00 root data:TableFullScan_15", + " │ └─TableFullScan_15 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_14(Probe) 10000.00 root data:TableFullScan_13", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 7992.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_12 7992.00 root CARTESIAN semi join, other cond:gt(test.t2.c, test.t1.c)", + " ├─TableReader_18(Build) 9990.00 root data:Selection_17", + " │ └─Selection_17 9990.00 cop[tikv] not(isnull(test.t2.c))", + " │ └─TableFullScan_16 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_15(Probe) 9990.00 root data:Selection_14", + " └─Selection_14 9990.00 cop[tikv] not(isnull(test.t1.c))", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 8000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_12 8000.00 root CARTESIAN anti semi join, other cond:gt(test.t2.c, test.t1.c)", + " ├─TableReader_16(Build) 10000.00 root data:TableFullScan_15", + " │ └─TableFullScan_15 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_14(Probe) 10000.00 root data:TableFullScan_13", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 7992.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_12 7992.00 root semi join, equal:[eq(test.t1.c, test.t2.c)]", + " ├─TableReader_18(Build) 9990.00 root data:Selection_17", + " │ └─Selection_17 9990.00 cop[tikv] not(isnull(test.t2.c))", + " │ └─TableFullScan_16 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_15(Probe) 9990.00 root data:Selection_14", + " └─Selection_14 9990.00 cop[tikv] not(isnull(test.t1.c))", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 8000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_12 8000.00 root anti semi join, equal:[eq(test.t1.c, test.t2.c)]", + " ├─TableReader_16(Build) 10000.00 root data:TableFullScan_15", + " │ └─TableFullScan_15 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_14(Probe) 10000.00 root data:TableFullScan_13", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_9 12487.50 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─Sort_10 12487.50 root test.t1.a, test.t1.b, test.t1.c, test.t1.d, test.t2.b", + " └─HashJoin_37 12487.50 root inner join, equal:[eq(test.t1.b, test.t2.b)]", + " ├─IndexReader_51(Build) 9990.00 root index:IndexFullScan_50", + " │ └─IndexFullScan_50 9990.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─TableReader_46(Probe) 9990.00 root data:Selection_45", + " └─Selection_45 9990.00 cop[tikv] not(isnull(test.t1.b))", + " └─TableFullScan_44 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestStableResultModeOnJoin", + "Cases": [ + { + "Plan": [ + "Sort_9 12500.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d, test.t2.a, test.t2.b, test.t2.c, test.t2.d", + "└─MergeJoin_11 12500.00 root inner join, left key:test.t1.a, right key:test.t2.a", + " ├─TableReader_35(Build) 10000.00 root data:TableFullScan_34", + " │ └─TableFullScan_34 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo", + " └─TableReader_33(Probe) 10000.00 root data:TableFullScan_32", + " └─TableFullScan_32 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_9 12475.01 root test.t1.a, test.t1.b, test.t1.c, test.t1.d, test.t2.a, test.t2.b, test.t2.c, test.t2.d", + "└─HashJoin_42 12475.01 root inner join, equal:[eq(test.t1.b, test.t2.b)], other cond:gt(test.t1.a, test.t2.a), lt(test.t1.c, test.t2.c)", + " ├─TableReader_61(Build) 9980.01 root data:Selection_60", + " │ └─Selection_60 9980.01 cop[tikv] not(isnull(test.t2.b)), not(isnull(test.t2.c))", + " │ └─TableFullScan_59 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_54(Probe) 9980.01 root data:Selection_53", + " └─Selection_53 9980.01 cop[tikv] not(isnull(test.t1.b)), not(isnull(test.t1.c))", + " └─TableFullScan_52 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_7 12500.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_19 12500.00 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─IndexReader_30(Build) 10000.00 root index:IndexFullScan_29", + " │ └─IndexFullScan_29 10000.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─TableReader_26(Probe) 10000.00 root data:TableFullScan_25", + " └─TableFullScan_25 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_8 100000000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─Sort_9 100000000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d, test.t2.a", + " └─HashJoin_11 100000000.00 root CARTESIAN inner join, other cond:ne(test.t1.a, test.t2.a)", + " ├─IndexReader_18(Build) 10000.00 root index:IndexFullScan_17", + " │ └─IndexFullScan_17 10000.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─TableReader_14(Probe) 10000.00 root data:TableFullScan_13", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestStableResultModeOnOtherOperators", + "Cases": [ + { + "Plan": [ + "Batch_Point_Get_9 3.00 root table:t1 handle:[1 222 33333], keep order:true, desc:false" + ] + }, + { + "Plan": [ + "Batch_Point_Get_9 4.00 root table:t1 handle:[1 2 3 4], keep order:true, desc:false" + ] + }, + { + "Plan": [ + "Batch_Point_Get_9 3.00 root table:t1, index:b(b) keep order:true, desc:false" + ] + }, + { + "Plan": [ + "Batch_Point_Get_9 4.00 root table:t1, index:b(b) keep order:true, desc:false" + ] + }, + { + "Plan": [ + "Sort_11 6666.67 root Column#9, Column#10, Column#11, Column#12", + "└─Union_13 6666.67 root ", + " ├─TableReader_16 3333.33 root data:TableRangeScan_15", + " │ └─TableRangeScan_15 3333.33 cop[tikv] table:t1 range:(10,+inf], keep order:false, stats:pseudo", + " └─TableReader_20 3333.33 root data:Selection_19", + " └─Selection_19 3333.33 cop[tikv] gt(test.t2.b, 20)", + " └─TableFullScan_18 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_12 5333.33 root Column#9, Column#10, Column#11, Column#12", + "└─HashAgg_14 5333.33 root group by:Column#10, Column#11, Column#12, Column#9, funcs:firstrow(Column#9)->Column#9, funcs:firstrow(Column#10)->Column#10, funcs:firstrow(Column#11)->Column#11, funcs:firstrow(Column#12)->Column#12", + " └─Union_15 6666.67 root ", + " ├─TableReader_18 3333.33 root data:TableRangeScan_17", + " │ └─TableRangeScan_17 3333.33 cop[tikv] table:t1 range:(10,+inf], keep order:false, stats:pseudo", + " └─TableReader_22 3333.33 root data:Selection_21", + " └─Selection_21 3333.33 cop[tikv] gt(test.t2.b, 20)", + " └─TableFullScan_20 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_11 2666.67 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_13 2666.67 root CARTESIAN semi join, other cond:nulleq(test.t1.a, test.t2.a), nulleq(test.t1.b, test.t2.b), nulleq(test.t1.c, test.t2.c), nulleq(test.t1.d, test.t2.d)", + " ├─TableReader_20(Build) 3333.33 root data:Selection_19", + " │ └─Selection_19 3333.33 cop[tikv] gt(test.t2.b, 20)", + " │ └─TableFullScan_18 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_16(Probe) 3333.33 root data:TableRangeScan_15", + " └─TableRangeScan_15 3333.33 cop[tikv] table:t1 range:(10,+inf], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_11 2666.67 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_13 2666.67 root anti semi join, equal:[nulleq(test.t1.a, test.t2.a) nulleq(test.t1.b, test.t2.b) nulleq(test.t1.c, test.t2.c) nulleq(test.t1.d, test.t2.d)]", + " ├─TableReader_20(Build) 3333.33 root data:Selection_19", + " │ └─Selection_19 3333.33 cop[tikv] gt(test.t2.b, 20)", + " │ └─TableFullScan_18 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_16(Probe) 3333.33 root data:TableRangeScan_15", + " └─TableRangeScan_15 3333.33 cop[tikv] table:t1 range:(10,+inf], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_10 10000.00 root Column#8, Column#7", + "└─Sort_11 10000.00 root test.t1.a, Column#7, Column#8", + " └─Window_13 10000.00 root row_number()->Column#8 over(partition by test.t1.a rows between current row and current row)", + " └─Window_14 10000.00 root sum(cast(test.t1.b, decimal(32,0) BINARY))->Column#7 over(partition by test.t1.a)", + " └─TableReader_17 10000.00 root data:TableFullScan_16", + " └─TableFullScan_16 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_6 8000.00 root Column#5, Column#6, Column#7", + "└─HashAgg_12 8000.00 root group by:test.t1.d, funcs:min(Column#8)->Column#5, funcs:max(Column#9)->Column#6, funcs:sum(Column#10)->Column#7", + " └─TableReader_13 8000.00 root data:HashAgg_8", + " └─HashAgg_8 8000.00 cop[tikv] group by:test.t1.d, funcs:min(test.t1.a)->Column#8, funcs:max(test.t1.b)->Column#9, funcs:sum(test.t1.c)->Column#10", + " └─TableFullScan_11 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_9 6400.00 root Column#5, Column#6, Column#7", + "└─Selection_11 6400.00 root lt(Column#6, 20)", + " └─HashAgg_16 8000.00 root group by:test.t1.d, funcs:min(Column#11)->Column#5, funcs:max(Column#12)->Column#6, funcs:sum(Column#13)->Column#7", + " └─TableReader_17 8000.00 root data:HashAgg_12", + " └─HashAgg_12 8000.00 cop[tikv] group by:test.t1.d, funcs:min(test.t1.a)->Column#11, funcs:max(test.t1.b)->Column#12, funcs:sum(test.t1.c)->Column#13", + " └─TableFullScan_15 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_4 10000.00 root case(eq(test.t1.a, 1), a1, eq(test.t1.a, 2), a2, ax)->Column#5", + "└─TableReader_12 10000.00 root data:TableFullScan_11", + " └─TableFullScan_11 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestStableResultModeOnPartitionTable", + "Cases": [ + { + "Plan": [ + "Sort_10 4.00 root test.thash.a", + "└─PartitionUnion_12 4.00 root ", + " ├─Batch_Point_Get_13 2.00 root table:thash handle:[1 200], keep order:false, desc:false", + " └─Batch_Point_Get_14 2.00 root table:thash handle:[1 200], keep order:false, desc:false" + ] + }, + { + "Plan": [ + "Sort_12 400.00 root test.thash.a", + "└─PartitionUnion_14 400.00 root ", + " ├─TableReader_16 100.00 root data:TableRangeScan_15", + " │ └─TableRangeScan_15 100.00 cop[tikv] table:thash, partition:p0 range:[50,150], keep order:false, stats:pseudo", + " ├─TableReader_18 100.00 root data:TableRangeScan_17", + " │ └─TableRangeScan_17 100.00 cop[tikv] table:thash, partition:p1 range:[50,150], keep order:false, stats:pseudo", + " ├─TableReader_20 100.00 root data:TableRangeScan_19", + " │ └─TableRangeScan_19 100.00 cop[tikv] table:thash, partition:p2 range:[50,150], keep order:false, stats:pseudo", + " └─TableReader_22 100.00 root data:TableRangeScan_21", + " └─TableRangeScan_21 100.00 cop[tikv] table:thash, partition:p3 range:[50,150], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 4.00 root test.trange.a", + "└─PartitionUnion_12 4.00 root ", + " ├─TableReader_14 2.00 root data:TableRangeScan_13", + " │ └─TableRangeScan_13 2.00 cop[tikv] table:trange, partition:p0 range:[1,1], [200,200], keep order:false, stats:pseudo", + " └─TableReader_16 2.00 root data:TableRangeScan_15", + " └─TableRangeScan_15 2.00 cop[tikv] table:trange, partition:p2 range:[1,1], [200,200], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 200.00 root test.trange.a", + "└─PartitionUnion_12 200.00 root ", + " ├─TableReader_14 100.00 root data:TableRangeScan_13", + " │ └─TableRangeScan_13 100.00 cop[tikv] table:trange, partition:p0 range:[50,150], keep order:false, stats:pseudo", + " └─TableReader_16 100.00 root data:TableRangeScan_15", + " └─TableRangeScan_15 100.00 cop[tikv] table:trange, partition:p1 range:[50,150], keep order:false, stats:pseudo" + ] + } + ] + } +] diff --git a/privilege/privilege.go b/privilege/privilege.go index f3fbade9cfc0b..b7a098171708e 100644 --- a/privilege/privilege.go +++ b/privilege/privilege.go @@ -50,7 +50,7 @@ type Manager interface { // Dynamic privileges are only assignable globally, and have their own grantable attribute. RequestDynamicVerification(activeRoles []*auth.RoleIdentity, privName string, grantable bool) bool - // RequestDynamicVerification verifies a DYNAMIC privilege for a specific user. + // RequestDynamicVerificationWithUser verifies a DYNAMIC privilege for a specific user. RequestDynamicVerificationWithUser(privName string, grantable bool, user *auth.UserIdentity) bool // ConnectionVerification verifies user privilege for connection. diff --git a/server/conn.go b/server/conn.go index 130e7768bf702..b73e51543b13f 100644 --- a/server/conn.go +++ b/server/conn.go @@ -185,6 +185,7 @@ type clientConn struct { collation uint8 // collation used by client, may be different from the collation used by database. lastActive time.Time // last active time authPlugin string // default authentication plugin + isUnixSocket bool // connection is Unix Socket file // mu is used for cancelling the execution of current transaction. mu struct { @@ -837,7 +838,7 @@ func (cc *clientConn) PeerHost(hasPassword string) (host, port string, err error return cc.peerHost, "", nil } host = variable.DefHostname - if cc.server.isUnixSocket() { + if cc.isUnixSocket { cc.peerHost = host return } diff --git a/server/server.go b/server/server.go index 177e5f94b311d..9b6f6314d2f68 100644 --- a/server/server.go +++ b/server/server.go @@ -33,7 +33,6 @@ import ( "crypto/tls" "flag" "fmt" - "io" "math/rand" "net" "net/http" @@ -184,44 +183,6 @@ func (s *Server) newConn(conn net.Conn) *clientConn { return cc } -// isUnixSocket should ideally be a function of clientConnection! -// But currently since unix-socket connections are forwarded to TCP when the server listens on both, it can really only be accurate on a server-level. -// If the server is listening on both, it *must* return FALSE for remote-host authentication to be performed correctly. See #23460. -func (s *Server) isUnixSocket() bool { - return s.cfg.Socket != "" && s.cfg.Port == 0 -} - -func (s *Server) forwardUnixSocketToTCP() { - addr := fmt.Sprintf("%s:%d", s.cfg.Host, s.cfg.Port) - for { - if s.listener == nil { - return // server shutdown has started - } - if uconn, err := s.socket.Accept(); err == nil { - logutil.BgLogger().Info("server socket forwarding", zap.String("from", s.cfg.Socket), zap.String("to", addr)) - go s.handleForwardedConnection(uconn, addr) - } else if s.listener != nil { - logutil.BgLogger().Error("server failed to forward", zap.String("from", s.cfg.Socket), zap.String("to", addr), zap.Error(err)) - } - } -} - -func (s *Server) handleForwardedConnection(uconn net.Conn, addr string) { - defer terror.Call(uconn.Close) - if tconn, err := net.Dial("tcp", addr); err == nil { - go func() { - if _, err := io.Copy(uconn, tconn); err != nil { - logutil.BgLogger().Warn("copy server to socket failed", zap.Error(err)) - } - }() - if _, err := io.Copy(tconn, uconn); err != nil { - logutil.BgLogger().Warn("socket forward copy failed", zap.Error(err)) - } - } else { - logutil.BgLogger().Warn("socket forward failed: could not connect", zap.String("addr", addr), zap.Error(err)) - } -} - // NewServer creates a new Server. func NewServer(cfg *config.Config, driver IDriver) (*Server, error) { s := &Server{ @@ -257,42 +218,52 @@ func NewServer(cfg *config.Config, driver IDriver) (*Server, error) { if s.cfg.EnableTCP4Only { tcpProto = "tcp4" } - if s.listener, err = net.Listen(tcpProto, addr); err == nil { - logutil.BgLogger().Info("server is running MySQL protocol", zap.String("addr", addr)) - if cfg.Socket != "" { - if s.socket, err = net.Listen("unix", s.cfg.Socket); err == nil { - logutil.BgLogger().Info("server redirecting", zap.String("from", s.cfg.Socket), zap.String("to", addr)) - go s.forwardUnixSocketToTCP() - } - } - if runInGoTest && s.cfg.Port == 0 { - s.cfg.Port = uint(s.listener.Addr().(*net.TCPAddr).Port) - } + if s.listener, err = net.Listen(tcpProto, addr); err != nil { + return nil, errors.Trace(err) } - } else if cfg.Socket != "" { - if s.listener, err = net.Listen("unix", cfg.Socket); err == nil { - logutil.BgLogger().Info("server is running MySQL protocol", zap.String("socket", cfg.Socket)) + logutil.BgLogger().Info("server is running MySQL protocol", zap.String("addr", addr)) + if runInGoTest && s.cfg.Port == 0 { + s.cfg.Port = uint(s.listener.Addr().(*net.TCPAddr).Port) } - } else { + } + + if s.cfg.Socket != "" { + if s.socket, err = net.Listen("unix", s.cfg.Socket); err != nil { + return nil, errors.Trace(err) + } + logutil.BgLogger().Info("server is running MySQL protocol", zap.String("socket", s.cfg.Socket)) + } + + if s.socket == nil && s.listener == nil { err = errors.New("Server not configured to listen on either -socket or -host and -port") + return nil, errors.Trace(err) } - if cfg.ProxyProtocol.Networks != "" { - pplistener, errProxy := proxyprotocol.NewListener(s.listener, cfg.ProxyProtocol.Networks, - int(cfg.ProxyProtocol.HeaderTimeout)) - if errProxy != nil { + if s.cfg.ProxyProtocol.Networks != "" { + proxyTarget := s.listener + if proxyTarget == nil { + proxyTarget = s.socket + } + pplistener, err := proxyprotocol.NewListener(proxyTarget, s.cfg.ProxyProtocol.Networks, + int(s.cfg.ProxyProtocol.HeaderTimeout)) + if err != nil { logutil.BgLogger().Error("ProxyProtocol networks parameter invalid") - return nil, errors.Trace(errProxy) + return nil, errors.Trace(err) + } + if s.listener != nil { + s.listener = pplistener + logutil.BgLogger().Info("server is running MySQL protocol (through PROXY protocol)", zap.String("host", s.cfg.Host)) + } else { + s.socket = pplistener + logutil.BgLogger().Info("server is running MySQL protocol (through PROXY protocol)", zap.String("socket", s.cfg.Socket)) } - logutil.BgLogger().Info("server is running MySQL protocol (through PROXY protocol)", zap.String("host", s.cfg.Host)) - s.listener = pplistener } - if s.cfg.Status.ReportStatus && err == nil { + if s.cfg.Status.ReportStatus { err = s.listenStatusHTTPServer() - } - if err != nil { - return nil, errors.Trace(err) + if err != nil { + return nil, errors.Trace(err) + } } // Init rand seed for randomBuf() @@ -336,12 +307,34 @@ func (s *Server) Run() error { if s.cfg.Status.ReportStatus { s.startStatusHTTP() } + // If error should be reported and exit the server it can be sent on this + // channel. Otherwise end with sending a nil error to signal "done" + errChan := make(chan error) + go s.startNetworkListener(s.listener, false, errChan) + go s.startNetworkListener(s.socket, true, errChan) + err := <-errChan + if err != nil { + return err + } + return <-errChan +} + +func (s *Server) startNetworkListener(listener net.Listener, isUnixSocket bool, errChan chan error) { + if listener == nil { + errChan <- nil + return + } for { - conn, err := s.listener.Accept() + conn, err := listener.Accept() if err != nil { if opErr, ok := err.(*net.OpError); ok { if opErr.Err.Error() == "use of closed network connection" { - return nil + if s.inShutdownMode { + errChan <- nil + } else { + errChan <- err + } + return } } @@ -352,10 +345,14 @@ func (s *Server) Run() error { } logutil.BgLogger().Error("accept failed", zap.Error(err)) - return errors.Trace(err) + errChan <- err + return } clientConn := s.newConn(conn) + if isUnixSocket { + clientConn.isUnixSocket = true + } err = plugin.ForeachPlugin(plugin.Audit, func(p *plugin.Plugin) error { authPlugin := plugin.DeclareAuditManifest(p.Manifest) @@ -506,7 +503,7 @@ func (s *Server) onConn(conn *clientConn) { func (cc *clientConn) connectInfo() *variable.ConnectionInfo { connType := "Socket" - if cc.server.isUnixSocket() { + if cc.isUnixSocket { connType = "UnixSocket" } else if cc.tlsConn != nil { connType = "SSL/TLS" diff --git a/server/server_test.go b/server/server_test.go index 79a1b805f32f9..4bfc6d94719f6 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -131,14 +131,9 @@ func (cli *testServerClient) runTests(c *C, overrider configOverrider, tests ... c.Assert(err, IsNil) }() - _, err = db.Exec("DROP TABLE IF EXISTS test") - c.Assert(err, IsNil) - dbt := &DBTest{c, db} for _, test := range tests { test(dbt) - // fixed query error - _, _ = dbt.db.Exec("DROP TABLE IF EXISTS test") } } diff --git a/server/tidb_test.go b/server/tidb_test.go index 15eb7f53b29fd..48741e3018ce2 100644 --- a/server/tidb_test.go +++ b/server/tidb_test.go @@ -25,6 +25,7 @@ import ( "database/sql" "encoding/pem" "fmt" + "io/ioutil" "math/big" "net/http" "os" @@ -304,7 +305,6 @@ func (ts *tidbTestSuite) TestStatusAPIWithTLS(c *C) { c.Assert(err, IsNil) }() time.Sleep(time.Millisecond * 100) - c.Assert(server.isUnixSocket(), IsFalse) // If listening on tcp-only, return FALSE // https connection should work. ts.runTestStatusAPI(c) @@ -411,7 +411,6 @@ func (ts *tidbTestSuite) TestSocketForwarding(c *C) { c.Assert(err, IsNil) }() time.Sleep(time.Millisecond * 100) - c.Assert(server.isUnixSocket(), IsFalse) // If listening on both, return FALSE defer server.Close() cli.runTestRegression(c, func(config *mysql.Config) { @@ -438,7 +437,6 @@ func (ts *tidbTestSuite) TestSocket(c *C) { c.Assert(err, IsNil) }() time.Sleep(time.Millisecond * 100) - c.Assert(server.isUnixSocket(), IsTrue) // If listening on socket-only, return TRUE defer server.Close() // a fake server client, config is override, just used to run tests @@ -453,6 +451,302 @@ func (ts *tidbTestSuite) TestSocket(c *C) { } +func (ts *tidbTestSuite) TestSocketAndIp(c *C) { + osTempDir := os.TempDir() + tempDir, err := ioutil.TempDir(osTempDir, "tidb-test.*.socket") + c.Assert(err, IsNil) + socketFile := tempDir + "/tidbtest.sock" // Unix Socket does not work on Windows, so '/' should be OK + defer os.RemoveAll(tempDir) + cli := newTestServerClient() + cfg := newTestConfig() + cfg.Socket = socketFile + cfg.Port = cli.port + cfg.Status.ReportStatus = false + + server, err := NewServer(cfg, ts.tidbdrv) + c.Assert(err, IsNil) + cli.port = getPortFromTCPAddr(server.listener.Addr()) + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() + time.Sleep(time.Millisecond * 100) + defer server.Close() + + // Test with Socket connection + Setup user1@% for all host access + cli.port = getPortFromTCPAddr(server.listener.Addr()) + defer func() { + cli.runTests(c, func(config *mysql.Config) { + config.User = "root" + }, + func(dbt *DBTest) { + dbt.mustQuery("DROP USER IF EXISTS 'user1'@'%'") + dbt.mustQuery("DROP USER IF EXISTS 'user1'@'localhost'") + dbt.mustQuery("DROP USER IF EXISTS 'user1'@'127.0.0.1'") + }) + }() + cli.runTests(c, func(config *mysql.Config) { + config.User = "root" + config.Net = "unix" + config.Addr = socketFile + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + cli.checkRows(c, rows, "root@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION") + dbt.mustQuery("CREATE USER user1@'%'") + dbt.mustQuery("GRANT SELECT ON test.* TO user1@'%'") + }) + // Test with Network interface connection with all hosts + cli.runTests(c, func(config *mysql.Config) { + config.User = "user1" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + // NOTICE: this is not compatible with MySQL! (MySQL would report user1@localhost also for 127.0.0.1) + cli.checkRows(c, rows, "user1@127.0.0.1") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT USAGE ON *.* TO 'user1'@'%'\nGRANT Select ON test.* TO 'user1'@'%'") + }) + // Test with unix domain socket file connection with all hosts + cli.runTests(c, func(config *mysql.Config) { + config.Net = "unix" + config.Addr = socketFile + config.User = "user1" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + cli.checkRows(c, rows, "user1@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT USAGE ON *.* TO 'user1'@'%'\nGRANT Select ON test.* TO 'user1'@'%'") + }) + + // Setup user1@127.0.0.1 for loop back network interface access + cli.runTests(c, func(config *mysql.Config) { + config.User = "root" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + // NOTICE: this is not compatible with MySQL! (MySQL would report user1@localhost also for 127.0.0.1) + cli.checkRows(c, rows, "root@127.0.0.1") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION") + dbt.mustQuery("CREATE USER user1@127.0.0.1") + dbt.mustQuery("GRANT SELECT,INSERT ON test.* TO user1@'127.0.0.1'") + }) + // Test with Network interface connection with all hosts + cli.runTests(c, func(config *mysql.Config) { + config.User = "user1" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + // NOTICE: this is not compatible with MySQL! (MySQL would report user1@localhost also for 127.0.0.1) + cli.checkRows(c, rows, "user1@127.0.0.1") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT USAGE ON *.* TO 'user1'@'127.0.0.1'\nGRANT Select,Insert ON test.* TO 'user1'@'127.0.0.1'") + }) + // Test with unix domain socket file connection with all hosts + cli.runTests(c, func(config *mysql.Config) { + config.Net = "unix" + config.Addr = socketFile + config.User = "user1" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + cli.checkRows(c, rows, "user1@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT USAGE ON *.* TO 'user1'@'%'\nGRANT Select ON test.* TO 'user1'@'%'") + }) + + // Setup user1@localhost for socket (and if MySQL compatible; loop back network interface access) + cli.runTests(c, func(config *mysql.Config) { + config.Net = "unix" + config.Addr = socketFile + config.User = "root" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + cli.checkRows(c, rows, "root@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION") + dbt.mustQuery("CREATE USER user1@localhost") + dbt.mustQuery("GRANT SELECT,INSERT,UPDATE,DELETE ON test.* TO user1@localhost") + }) + // Test with Network interface connection with all hosts + cli.runTests(c, func(config *mysql.Config) { + config.User = "user1" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + // NOTICE: this is not compatible with MySQL! (MySQL would report user1@localhost also for 127.0.0.1) + cli.checkRows(c, rows, "user1@127.0.0.1") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT USAGE ON *.* TO 'user1'@'127.0.0.1'\nGRANT Select,Insert ON test.* TO 'user1'@'127.0.0.1'") + }) + // Test with unix domain socket file connection with all hosts + cli.runTests(c, func(config *mysql.Config) { + config.Net = "unix" + config.Addr = socketFile + config.User = "user1" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + cli.checkRows(c, rows, "user1@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT USAGE ON *.* TO 'user1'@'localhost'\nGRANT Select,Insert,Update,Delete ON test.* TO 'user1'@'localhost'") + }) + +} + +// TestOnlySocket for server configuration without network interface for mysql clients +func (ts *tidbTestSuite) TestOnlySocket(c *C) { + osTempDir := os.TempDir() + tempDir, err := ioutil.TempDir(osTempDir, "tidb-test.*.socket") + c.Assert(err, IsNil) + socketFile := tempDir + "/tidbtest.sock" // Unix Socket does not work on Windows, so '/' should be OK + defer os.RemoveAll(tempDir) + cli := newTestServerClient() + cfg := newTestConfig() + cfg.Socket = socketFile + cfg.Host = "" // No network interface listening for mysql traffic + cfg.Status.ReportStatus = false + + server, err := NewServer(cfg, ts.tidbdrv) + c.Assert(err, IsNil) + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() + time.Sleep(time.Millisecond * 100) + defer server.Close() + c.Assert(server.listener, IsNil) + c.Assert(server.socket, NotNil) + + // Test with Socket connection + Setup user1@% for all host access + defer func() { + cli.runTests(c, func(config *mysql.Config) { + config.User = "root" + config.Net = "unix" + config.Addr = socketFile + }, + func(dbt *DBTest) { + dbt.mustQuery("DROP USER IF EXISTS 'user1'@'%'") + dbt.mustQuery("DROP USER IF EXISTS 'user1'@'localhost'") + dbt.mustQuery("DROP USER IF EXISTS 'user1'@'127.0.0.1'") + }) + }() + cli.runTests(c, func(config *mysql.Config) { + config.User = "root" + config.Net = "unix" + config.Addr = socketFile + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + cli.checkRows(c, rows, "root@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION") + dbt.mustQuery("CREATE USER user1@'%'") + dbt.mustQuery("GRANT SELECT ON test.* TO user1@'%'") + }) + // Test with Network interface connection with all hosts, should fail since server not configured + _, err = sql.Open("mysql", cli.getDSN(func(config *mysql.Config) { + config.User = "root" + config.DBName = "test" + config.Addr = "127.0.0.1" + })) + c.Assert(err, IsNil, Commentf("Connect succeeded when not configured!?!")) + _, err = sql.Open("mysql", cli.getDSN(func(config *mysql.Config) { + config.User = "user1" + config.DBName = "test" + config.Addr = "127.0.0.1" + })) + c.Assert(err, IsNil, Commentf("Connect succeeded when not configured!?!")) + // Test with unix domain socket file connection with all hosts + cli.runTests(c, func(config *mysql.Config) { + config.Net = "unix" + config.Addr = socketFile + config.User = "user1" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + cli.checkRows(c, rows, "user1@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT USAGE ON *.* TO 'user1'@'%'\nGRANT Select ON test.* TO 'user1'@'%'") + }) + + // Setup user1@127.0.0.1 for loop back network interface access + cli.runTests(c, func(config *mysql.Config) { + config.Net = "unix" + config.Addr = socketFile + config.User = "root" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + // NOTICE: this is not compatible with MySQL! (MySQL would report user1@localhost also for 127.0.0.1) + cli.checkRows(c, rows, "root@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION") + dbt.mustQuery("CREATE USER user1@127.0.0.1") + dbt.mustQuery("GRANT SELECT,INSERT ON test.* TO user1@'127.0.0.1'") + }) + // Test with unix domain socket file connection with all hosts + cli.runTests(c, func(config *mysql.Config) { + config.Net = "unix" + config.Addr = socketFile + config.User = "user1" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + cli.checkRows(c, rows, "user1@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT USAGE ON *.* TO 'user1'@'%'\nGRANT Select ON test.* TO 'user1'@'%'") + }) + + // Setup user1@localhost for socket (and if MySQL compatible; loop back network interface access) + cli.runTests(c, func(config *mysql.Config) { + config.Net = "unix" + config.Addr = socketFile + config.User = "root" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + cli.checkRows(c, rows, "root@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION") + dbt.mustQuery("CREATE USER user1@localhost") + dbt.mustQuery("GRANT SELECT,INSERT,UPDATE,DELETE ON test.* TO user1@localhost") + }) + // Test with unix domain socket file connection with all hosts + cli.runTests(c, func(config *mysql.Config) { + config.Net = "unix" + config.Addr = socketFile + config.User = "user1" + config.DBName = "test" + }, + func(dbt *DBTest) { + rows := dbt.mustQuery("select user()") + cli.checkRows(c, rows, "user1@localhost") + rows = dbt.mustQuery("show grants") + cli.checkRows(c, rows, "GRANT USAGE ON *.* TO 'user1'@'localhost'\nGRANT Select,Insert,Update,Delete ON test.* TO 'user1'@'localhost'") + }) + +} + // generateCert generates a private key and a certificate in PEM format based on parameters. // If parentCert and parentCertKey is specified, the new certificate will be signed by the parentCert. // Otherwise, the new certificate will be self-signed and is a CA. diff --git a/session/session.go b/session/session.go index b88f1d6c686d1..3b0465e6a24ae 100644 --- a/session/session.go +++ b/session/session.go @@ -479,9 +479,10 @@ func (s *session) doCommit(ctx context.Context) error { } // mockCommitError and mockGetTSErrorInRetry use to test PR #8743. failpoint.Inject("mockCommitError", func(val failpoint.Value) { - if val.(bool) && tikv.IsMockCommitErrorEnable() { - tikv.MockCommitErrorDisable() - failpoint.Return(kv.ErrTxnRetryable) + if val.(bool) { + if _, err := failpoint.Eval("tikvclient/mockCommitErrorOpt"); err == nil { + failpoint.Return(kv.ErrTxnRetryable) + } } }) diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index 563a0d3ffca3e..b30a32e93bb39 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -86,6 +86,7 @@ type StatementContext struct { IgnoreNoPartition bool OptimDependOnMutableConst bool IgnoreExplainIDSuffix bool + IsStaleness bool // mu struct holds variables that change during execution. mu struct { diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 4ede5967f5932..0889812b48762 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -853,6 +853,9 @@ type SessionVars struct { // EnableGlobalTemporaryTable indicates whether to enable global temporary table EnableGlobalTemporaryTable bool + // EnableStableResultMode if stabilize query results. + EnableStableResultMode bool + // LocalTemporaryTables is *infoschema.LocalTemporaryTables, use interface to avoid circle dependency. // It's nil if there is no local temporary table. LocalTemporaryTables interface{} diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index aff18a1623c02..14f31472a557f 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -1757,6 +1757,10 @@ var defaultSysVars = []*SysVar{ }}, {Scope: ScopeGlobal, Name: SkipNameResolve, Value: Off, Type: TypeBool}, {Scope: ScopeGlobal, Name: DefaultAuthPlugin, Value: mysql.AuthNativePassword, Type: TypeEnum, PossibleValues: []string{mysql.AuthNativePassword, mysql.AuthCachingSha2Password}}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableStableResultMode, Value: BoolToOnOff(DefTiDBEnableStableResultMode), Hidden: true, Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + s.EnableStableResultMode = TiDBOptOn(val) + return nil + }}, } // FeedbackProbability points to the FeedbackProbability in statistics package. diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 66c7ae9c18380..8af53a3a813a5 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -564,6 +564,9 @@ const ( TiDBEnableGlobalTemporaryTable = "tidb_enable_global_temporary_table" // TiDBEnableLocalTxn indicates whether to enable Local Txn. TiDBEnableLocalTxn = "tidb_enable_local_txn" + + // TiDBEnableStableResultMode indicates if stabilize query results. + TiDBEnableStableResultMode = "tidb_enable_stable_result_mode" ) // TiDB vars that have only global scope @@ -718,6 +721,7 @@ const ( DefTiDBEnableGlobalTemporaryTable = false DefTMPTableSize = 16777216 DefTiDBEnableLocalTxn = false + DefTiDBEnableStableResultMode = false ) // Process global variables. diff --git a/statistics/handle/update_test.go b/statistics/handle/update_test.go index d23bb51cca050..badb7eb95b860 100644 --- a/statistics/handle/update_test.go +++ b/statistics/handle/update_test.go @@ -583,6 +583,30 @@ func (s *testSerialStatsSuite) TestAutoAnalyzeOnEmptyTable(c *C) { c.Assert(s.do.StatsHandle().HandleAutoAnalyze(s.do.InfoSchema()), IsTrue) } +func (s *testSerialStatsSuite) TestIssue25700(c *C) { + defer cleanEnv(c, s.store, s.do) + tk := testkit.NewTestKit(c, s.store) + oriStart := tk.MustQuery("select @@tidb_auto_analyze_start_time").Rows()[0][0].(string) + oriEnd := tk.MustQuery("select @@tidb_auto_analyze_end_time").Rows()[0][0].(string) + defer func() { + tk.MustExec(fmt.Sprintf("set global tidb_auto_analyze_start_time='%v'", oriStart)) + tk.MustExec(fmt.Sprintf("set global tidb_auto_analyze_end_time='%v'", oriEnd)) + }() + tk.MustExec("set global tidb_auto_analyze_start_time='00:00 +0000'") + tk.MustExec("set global tidb_auto_analyze_end_time='23:59 +0000'") + + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE `t` ( `ldecimal` decimal(32,4) DEFAULT NULL, `rdecimal` decimal(32,4) DEFAULT NULL, `gen_col` decimal(36,4) GENERATED ALWAYS AS (`ldecimal` + `rdecimal`) VIRTUAL, `col_timestamp` timestamp(3) NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;") + tk.MustExec("analyze table t") + tk.MustExec("INSERT INTO `t` (`ldecimal`, `rdecimal`, `col_timestamp`) VALUES (2265.2200, 9843.4100, '1999-12-31 16:00:00')" + strings.Repeat(", (2265.2200, 9843.4100, '1999-12-31 16:00:00')", int(handle.AutoAnalyzeMinCnt))) + c.Assert(s.do.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll), IsNil) + c.Assert(s.do.StatsHandle().Update(s.do.InfoSchema()), IsNil) + + c.Assert(s.do.StatsHandle().HandleAutoAnalyze(s.do.InfoSchema()), IsTrue) + c.Assert(tk.MustQuery("show analyze status").Rows()[1][7], Equals, "finished") +} + func (s *testSerialStatsSuite) TestAutoAnalyzeOnChangeAnalyzeVer(c *C) { defer cleanEnv(c, s.store, s.do) tk := testkit.NewTestKit(c, s.store) diff --git a/types/json/binary_functions_test.go b/types/json/binary_functions_test.go index 12a9c8ece8ae2..cecda34ea468f 100644 --- a/types/json/binary_functions_test.go +++ b/types/json/binary_functions_test.go @@ -22,14 +22,13 @@ var _ = Suite(&testJSONFuncSuite{}) type testJSONFuncSuite struct{} -func (s *testJSONFuncSuite) TestdecodeEscapedUnicode(c *C) { +func (s *testJSONFuncSuite) TestDecodeEscapedUnicode(c *C) { c.Parallel() in := "597d" r, size, err := decodeEscapedUnicode([]byte(in)) c.Assert(string(r[:]), Equals, "好\x00") c.Assert(size, Equals, 3) c.Assert(err, IsNil) - } func BenchmarkDecodeEscapedUnicode(b *testing.B) { diff --git a/types/json/binary_test.go b/types/json/binary_test.go index bd375746a046c..0d1ce274e2f5a 100644 --- a/types/json/binary_test.go +++ b/types/json/binary_test.go @@ -112,24 +112,24 @@ func (s *testJSONSuite) TestBinaryJSONType(c *C) { func (s *testJSONSuite) TestBinaryJSONUnquote(c *C) { var tests = []struct { - j string + json string unquoted string + err error }{ - {j: `3`, unquoted: "3"}, - {j: `"3"`, unquoted: "3"}, - {j: `"[{\"x\":\"{\\\"y\\\":12}\"}]"`, unquoted: `[{"x":"{\"y\":12}"}]`}, - {j: `"hello, \"escaped quotes\" world"`, unquoted: "hello, \"escaped quotes\" world"}, - {j: "\"\\u4f60\"", unquoted: "你"}, - {j: `true`, unquoted: "true"}, - {j: `null`, unquoted: "null"}, - {j: `{"a": [1, 2]}`, unquoted: `{"a": [1, 2]}`}, - {j: `"\""`, unquoted: `"`}, - {j: `"'"`, unquoted: `'`}, - {j: `"''"`, unquoted: `''`}, - {j: `""`, unquoted: ``}, + {json: `3`, unquoted: "3", err: nil}, + {json: `"3"`, unquoted: "3", err: nil}, + {json: `"[{\"x\":\"{\\\"y\\\":12}\"}]"`, unquoted: `[{"x":"{\"y\":12}"}]`, err: nil}, + {json: `"hello, \"escaped quotes\" world"`, unquoted: "hello, \"escaped quotes\" world", err: nil}, + {json: "\"\\u4f60\"", unquoted: "你", err: nil}, + {json: `true`, unquoted: "true", err: nil}, + {json: `null`, unquoted: "null", err: nil}, + {json: `{"a": [1, 2]}`, unquoted: `{"a": [1, 2]}`, err: nil}, + {json: `"'"`, unquoted: `'`, err: nil}, + {json: `"''"`, unquoted: `''`, err: nil}, + {json: `""`, unquoted: ``, err: nil}, } for _, tt := range tests { - bj := mustParseBinaryFromString(c, tt.j) + bj := mustParseBinaryFromString(c, tt.json) unquoted, err := bj.Unquote() c.Assert(err, IsNil) c.Assert(unquoted, Equals, tt.unquoted)