diff --git a/config/config.toml.example b/config/config.toml.example index fa43bc04ef3d5..a8e8a15abd23e 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -274,7 +274,7 @@ enable = false # WriteTimeout specifies how long it will wait for writing binlog to pump. write-timeout = "15s" -# If IgnoreError is true, when writting binlog meets error, TiDB would stop writting binlog, +# If IgnoreError is true, when writing binlog meets error, TiDB would stop writing binlog, # but still provide service. ignore-error = false diff --git a/ddl/db_test.go b/ddl/db_test.go index a26f82a7ae972..d7e00e1381491 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -2671,3 +2671,40 @@ func (s *testDBSuite) TestModifyColumnCharset(c *C) { ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) } + +func (s *testDBSuite) TestAlterShardRowIDBits(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + tk := s.tk + + tk.MustExec("use test") + // Test alter shard_row_id_bits + tk.MustExec("drop table if exists t1") + defer tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (a int) shard_row_id_bits = 5") + tk.MustExec(fmt.Sprintf("alter table t1 auto_increment = %d;", 1<<56)) + tk.MustExec("insert into t1 set a=1;") + + // Test increase shard_row_id_bits failed by overflow global auto ID. + _, err := tk.Exec("alter table t1 SHARD_ROW_ID_BITS = 10;") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[autoid:1467]shard_row_id_bits 10 will cause next global auto ID overflow") + + // Test reduce shard_row_id_bits will be ok. + tk.MustExec("alter table t1 SHARD_ROW_ID_BITS = 3;") + checkShardRowID := func(maxShardRowIDBits, shardRowIDBits uint64) { + tbl := testGetTableByName(c, tk.Se, "test", "t1") + c.Assert(tbl.Meta().MaxShardRowIDBits == maxShardRowIDBits, IsTrue) + c.Assert(tbl.Meta().ShardRowIDBits == shardRowIDBits, IsTrue) + } + checkShardRowID(5, 3) + + // Test reduce shard_row_id_bits but calculate overflow should use the max record shard_row_id_bits. + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (a int) shard_row_id_bits = 10") + tk.MustExec("alter table t1 SHARD_ROW_ID_BITS = 5;") + checkShardRowID(10, 5) + tk.MustExec(fmt.Sprintf("alter table t1 auto_increment = %d;", 1<<56)) + _, err = tk.Exec("insert into t1 set a=1;") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[autoid:1467]Failed to read auto-increment value from storage engine") +} diff --git a/ddl/ddl.go b/ddl/ddl.go index c21ac119c4def..e2ca983d82902 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -242,7 +242,7 @@ type DDL interface { CreateView(ctx sessionctx.Context, stmt *ast.CreateViewStmt) error CreateTableWithLike(ctx sessionctx.Context, ident, referIdent ast.Ident, ifNotExists bool) error DropTable(ctx sessionctx.Context, tableIdent ast.Ident) (err error) - RestoreTable(ctx sessionctx.Context, tbInfo *model.TableInfo, schemaID, autoID, dropJobID int64, snapshotTS uint64) (err error) + RecoverTable(ctx sessionctx.Context, tbInfo *model.TableInfo, schemaID, autoID, dropJobID int64, snapshotTS uint64) (err error) DropView(ctx sessionctx.Context, tableIdent ast.Ident) (err error) CreateIndex(ctx sessionctx.Context, tableIdent ast.Ident, unique bool, indexName model.CIStr, columnNames []*ast.IndexColName, indexOption *ast.IndexOption) error diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index faf2953f56ed2..c7c79bf614136 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1207,7 +1207,7 @@ func (d *ddl) CreateTable(ctx sessionctx.Context, s *ast.CreateTableStmt) (err e return errors.Trace(err) } -func (d *ddl) RestoreTable(ctx sessionctx.Context, tbInfo *model.TableInfo, schemaID, autoID, dropJobID int64, snapshotTS uint64) (err error) { +func (d *ddl) RecoverTable(ctx sessionctx.Context, tbInfo *model.TableInfo, schemaID, autoID, dropJobID int64, snapshotTS uint64) (err error) { is := d.GetInfoSchemaWithInterceptor(ctx) // Check schema exist. schema, ok := is.SchemaByID(schemaID) @@ -1225,9 +1225,9 @@ func (d *ddl) RestoreTable(ctx sessionctx.Context, tbInfo *model.TableInfo, sche job := &model.Job{ SchemaID: schemaID, TableID: tbInfo.ID, - Type: model.ActionRestoreTable, + Type: model.ActionRecoverTable, BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{tbInfo, autoID, dropJobID, snapshotTS, restoreTableCheckFlagNone}, + Args: []interface{}{tbInfo, autoID, dropJobID, snapshotTS, recoverTableCheckFlagNone}, } err = d.doDDLJob(ctx, job) err = d.callHookOnChanged(err) @@ -1563,6 +1563,7 @@ func handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo) err if tbInfo.ShardRowIDBits > shardRowIDBitsMax { tbInfo.ShardRowIDBits = shardRowIDBitsMax } + tbInfo.MaxShardRowIDBits = tbInfo.ShardRowIDBits } } @@ -1804,6 +1805,14 @@ func (d *ddl) ShardRowID(ctx sessionctx.Context, tableIdent ast.Ident, uVal uint if ok && uVal != 0 { return errUnsupportedShardRowIDBits } + if uVal == t.Meta().ShardRowIDBits { + // Nothing need to do. + return nil + } + err = verifyNoOverflowShardBits(d.sessPool, t, uVal) + if err != nil { + return err + } job := &model.Job{ Type: model.ActionShardRowID, SchemaID: schema.ID, diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 7292c42a0da28..62ed299cc45b1 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -285,8 +285,8 @@ func (w *worker) finishDDLJob(t *meta.Meta, job *model.Job) (err error) { } } switch job.Type { - case model.ActionRestoreTable: - err = finishRestoreTable(w, t, job) + case model.ActionRecoverTable: + err = finishRecoverTable(w, t, job) } if err != nil { return errors.Trace(err) @@ -303,15 +303,15 @@ func (w *worker) finishDDLJob(t *meta.Meta, job *model.Job) (err error) { return errors.Trace(err) } -func finishRestoreTable(w *worker, t *meta.Meta, job *model.Job) error { +func finishRecoverTable(w *worker, t *meta.Meta, job *model.Job) error { tbInfo := &model.TableInfo{} - var autoID, dropJobID, restoreTableCheckFlag int64 + var autoID, dropJobID, recoverTableCheckFlag int64 var snapshotTS uint64 - err := job.DecodeArgs(tbInfo, &autoID, &dropJobID, &snapshotTS, &restoreTableCheckFlag) + err := job.DecodeArgs(tbInfo, &autoID, &dropJobID, &snapshotTS, &recoverTableCheckFlag) if err != nil { return errors.Trace(err) } - if restoreTableCheckFlag == restoreTableCheckFlagEnableGC { + if recoverTableCheckFlag == recoverTableCheckFlagEnableGC { err = enableGC(w) if err != nil { return errors.Trace(err) @@ -525,15 +525,15 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, case model.ActionRenameTable: ver, err = onRenameTable(t, job) case model.ActionShardRowID: - ver, err = onShardRowID(t, job) + ver, err = w.onShardRowID(d, t, job) case model.ActionModifyTableComment: ver, err = onModifyTableComment(t, job) case model.ActionAddTablePartition: ver, err = onAddTablePartition(t, job) case model.ActionModifyTableCharsetAndCollate: ver, err = onModifyTableCharsetAndCollate(t, job) - case model.ActionRestoreTable: - ver, err = w.onRestoreTable(d, t, job) + case model.ActionRecoverTable: + ver, err = w.onRecoverTable(d, t, job) default: // Invalid job, cancel it. job.State = model.JobStateCancelled diff --git a/ddl/serial_test.go b/ddl/serial_test.go index 0dbc9c2b20a00..1b1a50313b357 100644 --- a/ddl/serial_test.go +++ b/ddl/serial_test.go @@ -128,10 +128,10 @@ func (s *testSerialSuite) TestCancelAddIndexPanic(c *C) { c.Assert(err.Error(), Equals, "[ddl:12]cancelled DDL job") } -func (s *testSerialSuite) TestRestoreTableByJobID(c *C) { +func (s *testSerialSuite) TestRecoverTableByJobID(c *C) { tk := testkit.NewTestKit(c, s.store) - tk.MustExec("create database if not exists test_restore") - tk.MustExec("use test_restore") + tk.MustExec("create database if not exists test_recover") + tk.MustExec("use test_recover") tk.MustExec("drop table if exists t_recover") tk.MustExec("create table t_recover (a int);") defer func(originGC bool) { @@ -162,19 +162,19 @@ func (s *testSerialSuite) TestRestoreTableByJobID(c *C) { rows, err := session.GetRows4Test(context.Background(), tk.Se, rs) c.Assert(err, IsNil) row := rows[0] - c.Assert(row.GetString(1), Equals, "test_restore") + c.Assert(row.GetString(1), Equals, "test_recover") c.Assert(row.GetString(3), Equals, "drop table") jobID := row.GetInt64(0) // if GC safe point is not exists in mysql.tidb - _, err = tk.Exec(fmt.Sprintf("admin restore table by job %d", jobID)) + _, err = tk.Exec(fmt.Sprintf("recover table by job %d", jobID)) c.Assert(err, NotNil) c.Assert(err.Error(), Equals, "can not get 'tikv_gc_safe_point'") // set GC safe point tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) // if GC enable is not exists in mysql.tidb - _, err = tk.Exec(fmt.Sprintf("admin restore table by job %d", jobID)) + _, err = tk.Exec(fmt.Sprintf("recover table by job %d", jobID)) c.Assert(err, NotNil) c.Assert(err.Error(), Equals, "[ddl:-1]can not get 'tikv_gc_enable'") @@ -183,7 +183,7 @@ func (s *testSerialSuite) TestRestoreTableByJobID(c *C) { // recover job is before GC safe point tk.MustExec(fmt.Sprintf(safePointSQL, timeAfterDrop)) - _, err = tk.Exec(fmt.Sprintf("admin restore table by job %d", jobID)) + _, err = tk.Exec(fmt.Sprintf("recover table by job %d", jobID)) c.Assert(err, NotNil) c.Assert(strings.Contains(err.Error(), "snapshot is older than GC safe point"), Equals, true) @@ -191,14 +191,14 @@ func (s *testSerialSuite) TestRestoreTableByJobID(c *C) { tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) // if there is a new table with the same name, should return failed. tk.MustExec("create table t_recover (a int);") - _, err = tk.Exec(fmt.Sprintf("admin restore table by job %d", jobID)) + _, err = tk.Exec(fmt.Sprintf("recover table by job %d", jobID)) c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_recover").Error()) - // drop the new table with the same name, then restore table. + // drop the new table with the same name, then recover table. tk.MustExec("drop table t_recover") - // do restore table. - tk.MustExec(fmt.Sprintf("admin restore table by job %d", jobID)) + // do recover table. + tk.MustExec(fmt.Sprintf("recover table by job %d", jobID)) // check recover table meta and data record. tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3")) @@ -206,8 +206,8 @@ func (s *testSerialSuite) TestRestoreTableByJobID(c *C) { tk.MustExec("insert into t_recover values (4),(5),(6)") tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3", "4", "5", "6")) - // restore table by none exits job. - _, err = tk.Exec(fmt.Sprintf("admin restore table by job %d", 10000000)) + // recover table by none exits job. + _, err = tk.Exec(fmt.Sprintf("recover table by job %d", 10000000)) c.Assert(err, NotNil) // Disable GC by manual first, then after recover table, the GC enable status should also be disabled. @@ -221,11 +221,11 @@ func (s *testSerialSuite) TestRestoreTableByJobID(c *C) { rows, err = session.GetRows4Test(context.Background(), tk.Se, rs) c.Assert(err, IsNil) row = rows[0] - c.Assert(row.GetString(1), Equals, "test_restore") + c.Assert(row.GetString(1), Equals, "test_recover") c.Assert(row.GetString(3), Equals, "drop table") jobID = row.GetInt64(0) - tk.MustExec(fmt.Sprintf("admin restore table by job %d", jobID)) + tk.MustExec(fmt.Sprintf("recover table by job %d", jobID)) // check recover table meta and data record. tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1")) @@ -238,10 +238,10 @@ func (s *testSerialSuite) TestRestoreTableByJobID(c *C) { c.Assert(gcEnable, Equals, false) } -func (s *testSerialSuite) TestRestoreTableByTableName(c *C) { +func (s *testSerialSuite) TestRecoverTableByTableName(c *C) { tk := testkit.NewTestKit(c, s.store) - tk.MustExec("create database if not exists test_restore") - tk.MustExec("use test_restore") + tk.MustExec("create database if not exists test_recover") + tk.MustExec("use test_recover") tk.MustExec("drop table if exists t_recover, t_recover2") tk.MustExec("create table t_recover (a int);") defer func(originGC bool) { @@ -268,14 +268,14 @@ func (s *testSerialSuite) TestRestoreTableByTableName(c *C) { tk.MustExec("drop table t_recover") // if GC safe point is not exists in mysql.tidb - _, err := tk.Exec("admin restore table t_recover") + _, err := tk.Exec("recover table t_recover") c.Assert(err, NotNil) c.Assert(err.Error(), Equals, "can not get 'tikv_gc_safe_point'") // set GC safe point tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) // if GC enable is not exists in mysql.tidb - _, err = tk.Exec("admin restore table t_recover") + _, err = tk.Exec("recover table t_recover") c.Assert(err, NotNil) c.Assert(err.Error(), Equals, "[ddl:-1]can not get 'tikv_gc_enable'") @@ -284,7 +284,7 @@ func (s *testSerialSuite) TestRestoreTableByTableName(c *C) { // recover job is before GC safe point tk.MustExec(fmt.Sprintf(safePointSQL, timeAfterDrop)) - _, err = tk.Exec("admin restore table t_recover") + _, err = tk.Exec("recover table t_recover") c.Assert(err, NotNil) c.Assert(strings.Contains(err.Error(), "snapshot is older than GC safe point"), Equals, true) @@ -292,14 +292,14 @@ func (s *testSerialSuite) TestRestoreTableByTableName(c *C) { tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) // if there is a new table with the same name, should return failed. tk.MustExec("create table t_recover (a int);") - _, err = tk.Exec("admin restore table t_recover") + _, err = tk.Exec("recover table t_recover") c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_recover").Error()) - // drop the new table with the same name, then restore table. + // drop the new table with the same name, then recover table. tk.MustExec("rename table t_recover to t_recover2") - // do restore table. - tk.MustExec("admin restore table t_recover") + // do recover table. + tk.MustExec("recover table t_recover") // check recover table meta and data record. tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3")) @@ -309,8 +309,8 @@ func (s *testSerialSuite) TestRestoreTableByTableName(c *C) { // check rebase auto id. tk.MustQuery("select a,_tidb_rowid from t_recover;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003")) - // restore table by none exits job. - _, err = tk.Exec(fmt.Sprintf("admin restore table by job %d", 10000000)) + // recover table by none exits job. + _, err = tk.Exec(fmt.Sprintf("recover table by job %d", 10000000)) c.Assert(err, NotNil) // Disable GC by manual first, then after recover table, the GC enable status should also be disabled. @@ -320,7 +320,7 @@ func (s *testSerialSuite) TestRestoreTableByTableName(c *C) { tk.MustExec("delete from t_recover where a > 1") tk.MustExec("drop table t_recover") - tk.MustExec("admin restore table t_recover") + tk.MustExec("recover table t_recover") // check recover table meta and data record. tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1")) @@ -333,10 +333,10 @@ func (s *testSerialSuite) TestRestoreTableByTableName(c *C) { c.Assert(gcEnable, Equals, false) } -func (s *testSerialSuite) TestRestoreTableByJobIDFail(c *C) { +func (s *testSerialSuite) TestRecoverTableByJobIDFail(c *C) { tk := testkit.NewTestKit(c, s.store) - tk.MustExec("create database if not exists test_restore") - tk.MustExec("use test_restore") + tk.MustExec("create database if not exists test_recover") + tk.MustExec("use test_recover") tk.MustExec("drop table if exists t_recover") tk.MustExec("create table t_recover (a int);") defer func(originGC bool) { @@ -364,7 +364,7 @@ func (s *testSerialSuite) TestRestoreTableByJobIDFail(c *C) { rows, err := session.GetRows4Test(context.Background(), tk.Se, rs) c.Assert(err, IsNil) row := rows[0] - c.Assert(row.GetString(1), Equals, "test_restore") + c.Assert(row.GetString(1), Equals, "test_recover") c.Assert(row.GetString(3), Equals, "drop table") jobID := row.GetInt64(0) @@ -376,21 +376,21 @@ func (s *testSerialSuite) TestRestoreTableByJobIDFail(c *C) { // set hook hook := &ddl.TestDDLCallback{} hook.OnJobRunBeforeExported = func(job *model.Job) { - if job.Type == model.ActionRestoreTable { + if job.Type == model.ActionRecoverTable { gofail.Enable("github.com/pingcap/tidb/store/tikv/mockCommitError", `return(true)`) - gofail.Enable("github.com/pingcap/tidb/ddl/mockRestoreTableCommitErr", `return(true)`) + gofail.Enable("github.com/pingcap/tidb/ddl/mockRecoverTableCommitErr", `return(true)`) } } origHook := s.dom.DDL().GetHook() defer s.dom.DDL().(ddl.DDLForTest).SetHook(origHook) s.dom.DDL().(ddl.DDLForTest).SetHook(hook) - // do restore table. - tk.MustExec(fmt.Sprintf("admin restore table by job %d", jobID)) + // do recover table. + tk.MustExec(fmt.Sprintf("recover table by job %d", jobID)) gofail.Disable("github.com/pingcap/tidb/store/tikv/mockCommitError") - gofail.Disable("github.com/pingcap/tidb/ddl/mockRestoreTableCommitErr") + gofail.Disable("github.com/pingcap/tidb/ddl/mockRecoverTableCommitErr") - // make sure enable GC after restore table. + // make sure enable GC after recover table. enable, err := gcutil.CheckGCEnable(tk.Se) c.Assert(err, IsNil) c.Assert(enable, Equals, true) @@ -402,10 +402,10 @@ func (s *testSerialSuite) TestRestoreTableByJobIDFail(c *C) { tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3", "4", "5", "6")) } -func (s *testSerialSuite) TestRestoreTableByTableNameFail(c *C) { +func (s *testSerialSuite) TestRecoverTableByTableNameFail(c *C) { tk := testkit.NewTestKit(c, s.store) - tk.MustExec("create database if not exists test_restore") - tk.MustExec("use test_restore") + tk.MustExec("create database if not exists test_recover") + tk.MustExec("use test_recover") tk.MustExec("drop table if exists t_recover") tk.MustExec("create table t_recover (a int);") defer func(originGC bool) { @@ -436,21 +436,21 @@ func (s *testSerialSuite) TestRestoreTableByTableNameFail(c *C) { // set hook hook := &ddl.TestDDLCallback{} hook.OnJobRunBeforeExported = func(job *model.Job) { - if job.Type == model.ActionRestoreTable { + if job.Type == model.ActionRecoverTable { gofail.Enable("github.com/pingcap/tidb/store/tikv/mockCommitError", `return(true)`) - gofail.Enable("github.com/pingcap/tidb/ddl/mockRestoreTableCommitErr", `return(true)`) + gofail.Enable("github.com/pingcap/tidb/ddl/mockRecoverTableCommitErr", `return(true)`) } } origHook := s.dom.DDL().GetHook() defer s.dom.DDL().(ddl.DDLForTest).SetHook(origHook) s.dom.DDL().(ddl.DDLForTest).SetHook(hook) - // do restore table. - tk.MustExec("admin restore table t_recover") + // do recover table. + tk.MustExec("recover table t_recover") gofail.Disable("github.com/pingcap/tidb/store/tikv/mockCommitError") - gofail.Disable("github.com/pingcap/tidb/ddl/mockRestoreTableCommitErr") + gofail.Disable("github.com/pingcap/tidb/ddl/mockRecoverTableCommitErr") - // make sure enable GC after restore table. + // make sure enable GC after recover table. enable, err := gcutil.CheckGCEnable(tk.Se) c.Assert(err, IsNil) c.Assert(enable, Equals, true) diff --git a/ddl/table.go b/ddl/table.go index 253d2482af2ae..7cb63bf6e6915 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/gcutil" "github.com/pingcap/tidb/util/logutil" @@ -167,17 +168,17 @@ func onDropTableOrView(t *meta.Meta, job *model.Job) (ver int64, _ error) { } const ( - restoreTableCheckFlagNone int64 = iota - restoreTableCheckFlagEnableGC - restoreTableCheckFlagDisableGC + recoverTableCheckFlagNone int64 = iota + recoverTableCheckFlagEnableGC + recoverTableCheckFlagDisableGC ) -func (w *worker) onRestoreTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { +func (w *worker) onRecoverTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { schemaID := job.SchemaID tblInfo := &model.TableInfo{} - var autoID, dropJobID, restoreTableCheckFlag int64 + var autoID, dropJobID, recoverTableCheckFlag int64 var snapshotTS uint64 - if err = job.DecodeArgs(tblInfo, &autoID, &dropJobID, &snapshotTS, &restoreTableCheckFlag); err != nil { + if err = job.DecodeArgs(tblInfo, &autoID, &dropJobID, &snapshotTS, &recoverTableCheckFlag); err != nil { // Invalid arguments, cancel this job. job.State = model.JobStateCancelled return ver, errors.Trace(err) @@ -195,19 +196,19 @@ func (w *worker) onRestoreTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver in return ver, errors.Trace(err) } - // Restore table divide into 2 steps: - // 1. Check GC enable status, to decided whether enable GC after restore table. + // Recover table divide into 2 steps: + // 1. Check GC enable status, to decided whether enable GC after recover table. // a. Why not disable GC before put the job to DDL job queue? - // Think about concurrency problem. If a restore job-1 is doing and already disabled GC, - // then, another restore table job-2 check GC enable will get disable before into the job queue. - // then, after restore table job-2 finished, the GC will be disabled. - // b. Why split into 2 steps? 1 step also can finish this job: check GC -> disable GC -> restore table -> finish job. + // Think about concurrency problem. If a recover job-1 is doing and already disabled GC, + // then, another recover table job-2 check GC enable will get disable before into the job queue. + // then, after recover table job-2 finished, the GC will be disabled. + // b. Why split into 2 steps? 1 step also can finish this job: check GC -> disable GC -> recover table -> finish job. // What if the transaction commit failed? then, the job will retry, but the GC already disabled when first running. // So, after this job retry succeed, the GC will be disabled. - // 2. Do restore table job. + // 2. Do recover table job. // a. Check whether GC enabled, if enabled, disable GC first. - // b. Check GC safe point. If drop table time if after safe point time, then can do restore. - // otherwise, can't restore table, because the records of the table may already delete by gc. + // b. Check GC safe point. If drop table time if after safe point time, then can do recover. + // otherwise, can't recover table, because the records of the table may already delete by gc. // c. Remove GC task of the table from gc_delete_range table. // d. Create table and rebase table auto ID. // e. Finish. @@ -216,9 +217,9 @@ func (w *worker) onRestoreTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver in // none -> write only // check GC enable and update flag. if gcEnable { - job.Args[len(job.Args)-1] = restoreTableCheckFlagEnableGC + job.Args[len(job.Args)-1] = recoverTableCheckFlagEnableGC } else { - job.Args[len(job.Args)-1] = restoreTableCheckFlagDisableGC + job.Args[len(job.Args)-1] = recoverTableCheckFlagDisableGC } job.SchemaState = model.StateWriteOnly @@ -229,7 +230,7 @@ func (w *worker) onRestoreTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver in } case model.StateWriteOnly: // write only -> public - // do restore table. + // do recover table. if gcEnable { err = disableGC(w) if err != nil { @@ -256,9 +257,9 @@ func (w *worker) onRestoreTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver in return ver, errors.Trace(err) } - // gofail: var mockRestoreTableCommitErr bool - // if mockRestoreTableCommitErr && mockRestoreTableCommitErrOnce { - // mockRestoreTableCommitErrOnce = false + // gofail: var mockRecoverTableCommitErr bool + // if mockRecoverTableCommitErr && mockRecoverTableCommitErrOnce { + // mockRecoverTableCommitErrOnce = false // kv.MockCommitErrorEnable() // } @@ -270,13 +271,13 @@ func (w *worker) onRestoreTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver in // Finish this job. job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) default: - return ver, ErrInvalidTableState.GenWithStack("invalid restore table state %v", tblInfo.State) + return ver, ErrInvalidTableState.GenWithStack("invalid recover table state %v", tblInfo.State) } return ver, nil } -// mockRestoreTableCommitErrOnce uses to make sure `mockRestoreTableCommitErr` only mock error once. -var mockRestoreTableCommitErrOnce = true +// mockRecoverTableCommitErrOnce uses to make sure `mockRecoverTableCommitErr` only mock error once. +var mockRecoverTableCommitErrOnce = true func enableGC(w *worker) error { ctx, err := w.sessPool.get() @@ -469,7 +470,7 @@ func onRebaseAutoID(store kv.Storage, t *meta.Meta, job *model.Job) (ver int64, return ver, nil } -func onShardRowID(t *meta.Meta, job *model.Job) (ver int64, _ error) { +func (w *worker) onShardRowID(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { var shardRowIDBits uint64 err := job.DecodeArgs(&shardRowIDBits) if err != nil { @@ -481,7 +482,22 @@ func onShardRowID(t *meta.Meta, job *model.Job) (ver int64, _ error) { job.State = model.JobStateCancelled return ver, errors.Trace(err) } - tblInfo.ShardRowIDBits = shardRowIDBits + if shardRowIDBits < tblInfo.ShardRowIDBits { + tblInfo.ShardRowIDBits = shardRowIDBits + } else { + tbl, err := getTable(d.store, job.SchemaID, tblInfo) + if err != nil { + return ver, errors.Trace(err) + } + err = verifyNoOverflowShardBits(w.sessPool, tbl, shardRowIDBits) + if err != nil { + job.State = model.JobStateCancelled + return ver, err + } + tblInfo.ShardRowIDBits = shardRowIDBits + // MaxShardRowIDBits use to check the overflow of auto ID. + tblInfo.MaxShardRowIDBits = shardRowIDBits + } ver, err = updateVersionAndTableInfo(t, job, tblInfo, true) if err != nil { job.State = model.JobStateCancelled @@ -491,6 +507,24 @@ func onShardRowID(t *meta.Meta, job *model.Job) (ver int64, _ error) { return ver, nil } +func verifyNoOverflowShardBits(s *sessionPool, tbl table.Table, shardRowIDBits uint64) error { + ctx, err := s.get() + if err != nil { + return errors.Trace(err) + } + defer s.put(ctx) + + // Check next global max auto ID first. + autoIncID, err := tbl.Allocator(ctx).NextGlobalAutoID(tbl.Meta().ID) + if err != nil { + return errors.Trace(err) + } + if tables.OverflowShardBits(autoIncID, shardRowIDBits) { + return autoid.ErrAutoincReadFailed.GenWithStack("shard_row_id_bits %d will cause next global auto ID overflow", shardRowIDBits) + } + return nil +} + func onRenameTable(t *meta.Meta, job *model.Job) (ver int64, _ error) { var oldSchemaID int64 var tableName model.CIStr diff --git a/docs/design/2018-07-22-enhance-propagations.md b/docs/design/2018-07-22-enhance-propagations.md index ab5dc46a5810b..2209c4735230c 100644 --- a/docs/design/2018-07-22-enhance-propagations.md +++ b/docs/design/2018-07-22-enhance-propagations.md @@ -153,7 +153,7 @@ Constraint propagation is commonly used as logical plan optimization in traditio * For the columnar storage format to be supported in the future, we may apply some filters directly when accessing the raw storage. - * Reduce the data transfered from TiKV to TiDB. + * Reduce the data transferred from TiKV to TiDB. ### Disadvantages: diff --git a/docs/design/2018-09-10-adding-tz-env.md b/docs/design/2018-09-10-adding-tz-env.md index b95cfef472f6c..3efd2fe26344f 100644 --- a/docs/design/2018-09-10-adding-tz-env.md +++ b/docs/design/2018-09-10-adding-tz-env.md @@ -6,13 +6,13 @@ ## Abstract -When it comes to time-related calculation, it is hard for the distributed system. This proposal tries to resolve two problems: 1. timezone may be inconsistent across multiple `TiDB` instances, 2. performance degradation casued by pushing `System` down to `TiKV`. The impact of this proposal is changing the way of `TiDB` inferring system's timezone name. Before this proposal, the default timezone name pushed down to TiKV is `System` when session's timezone is not set. After this, TiDB evaluates system's timezone name via `TZ` environment variable and the path of the soft link of `/etc/localtime`. If both of them are failed, `TiDB` then push `UTC` to `TiKV`. +When it comes to time-related calculation, it is hard for the distributed system. This proposal tries to resolve two problems: 1. timezone may be inconsistent across multiple `TiDB` instances, 2. performance degradation caused by pushing `System` down to `TiKV`. The impact of this proposal is changing the way of `TiDB` inferring system's timezone name. Before this proposal, the default timezone name pushed down to TiKV is `System` when session's timezone is not set. After this, TiDB evaluates system's timezone name via `TZ` environment variable and the path of the soft link of `/etc/localtime`. If both of them are failed, `TiDB` then push `UTC` to `TiKV`. ## Background After we solved the daylight saving time issue, we found the performance degradation of TiKV side. Thanks for the investigation done by engineers from TiKV. The root cause of such performance degradation is that TiKV infers `System` timezone name via a third party lib, which calls a syscall and costs a lot. In our internal benchmark system, after [this PR](https://github.com/pingcap/tidb/pull/6823), our codebase is 1000 times slower than before. We have to address this. -Another problem needs also to be addressed is the potentially incosistent timezone name across multiple `TiDB` instances. `TiDB` instances may reside at different timezone which could cause incorrect calculation when it comes to time-related calculation. Just getting `TiDB`'s sytem timezone could be broken. We need find a way to ensure the uniqueness of global timezone name across multiple `TiDB`'s timezone name and also to leverage to resolve the performance degradation. +Another problem needs also to be addressed is the potentially incosistent timezone name across multiple `TiDB` instances. `TiDB` instances may reside at different timezone which could cause incorrect calculation when it comes to time-related calculation. Just getting `TiDB`'s system timezone could be broken. We need find a way to ensure the uniqueness of global timezone name across multiple `TiDB`'s timezone name and also to leverage to resolve the performance degradation. ## Proposal @@ -28,7 +28,7 @@ In our case, which means both `TiDB` and `TiKV`, we need care the first and thir In this proposal, we suggest setting `TZ` to a valid IANA timezone name which can be read from `TiDB` later. If `TiDB` can't get `TZ` or the supply of `TZ` is invalid, `TiDB` just falls back to evaluate the path of the soft link of `/etc/localtime`. In addition, a warning message telling the user you should set `TZ` properly will be printed. Setting `TZ` can be done in our `tidb-ansible` project, it is also can be done at user side by `export TZ="Asia/Shanghai"`. If both of them are failed, `TiDB` will use `UTC` as timezone name. -The positive side of this change is resolving performance degradation issue and ensuring the uniqueness of gloabl timezone name in multiple `TiDB` instances. +The positive side of this change is resolving performance degradation issue and ensuring the uniqueness of global timezone name in multiple `TiDB` instances. The negative side is just adding a config item which is a very small matter and the user probably does not care it if we can take care of it and more importantly guarantee the correctness. @@ -46,9 +46,9 @@ The upgrading process need to be handled in particular. `TZ` environment variabl ## Implementation -The implementation is relatively easy. We just get `TZ` environment from system and check whether it is valid or not. If it is invalid, TiDB evaluates the path of soft link of `/etc/localtime`. In addition, a warning message needs to be printed indicating user has to set `TZ` variable properly. For example, if `/etc/localtime` links to `/usr/share/zoneinfo/Asia/Shanghai`, then timezone name `TiDB` gets should be `Asia/Shangahi`. +The implementation is relatively easy. We just get `TZ` environment from system and check whether it is valid or not. If it is invalid, TiDB evaluates the path of soft link of `/etc/localtime`. In addition, a warning message needs to be printed indicating user has to set `TZ` variable properly. For example, if `/etc/localtime` links to `/usr/share/zoneinfo/Asia/Shanghai`, then timezone name `TiDB` gets should be `Asia/Shanghai`. -In order to ensure the uniqueness of global timezone across multiple `TiDB` instances, we need to write timezone name into `variable_value` with variable name `system_tz` in `mysql.tidb`. This cached value can be read once `TiDB` finishs its bootstrap stage. A method `loadLocalStr` can do this job. +In order to ensure the uniqueness of global timezone across multiple `TiDB` instances, we need to write timezone name into `variable_value` with variable name `system_tz` in `mysql.tidb`. This cached value can be read once `TiDB` finishes its bootstrap stage. A method `loadLocalStr` can do this job. ## Open issues (if applicable) diff --git a/executor/builder.go b/executor/builder.go index 31ac09a75c354..1960fac2096bb 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -92,8 +92,6 @@ func (b *executorBuilder) build(p plannercore.Plan) Executor { return b.buildCheckIndexRange(v) case *plannercore.ChecksumTable: return b.buildChecksumTable(v) - case *plannercore.RestoreTable: - return b.buildRestoreTable(v) case *plannercore.DDL: return b.buildDDL(v) case *plannercore.Deallocate: @@ -357,16 +355,6 @@ func (b *executorBuilder) buildRecoverIndex(v *plannercore.RecoverIndex) Executo return e } -func (b *executorBuilder) buildRestoreTable(v *plannercore.RestoreTable) Executor { - e := &RestoreTableExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), - jobID: v.JobID, - Table: v.Table, - JobNum: v.JobNum, - } - return e -} - func buildCleanupIndexCols(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) []*model.ColumnInfo { columns := make([]*model.ColumnInfo, 0, len(indexInfo.Columns)+1) for _, idxCol := range indexInfo.Columns { diff --git a/executor/ddl.go b/executor/ddl.go index d1e5f582795d2..e157bce138775 100644 --- a/executor/ddl.go +++ b/executor/ddl.go @@ -105,6 +105,8 @@ func (e *DDLExec) Next(ctx context.Context, req *chunk.RecordBatch) (err error) err = e.executeAlterTable(x) case *ast.RenameTableStmt: err = e.executeRenameTable(x) + case *ast.RecoverTableStmt: + err = e.executeRecoverTable(x) } if err != nil { return e.toErr(err) @@ -296,46 +298,10 @@ func (e *DDLExec) executeAlterTable(s *ast.AlterTableStmt) error { return err } -// RestoreTableExec represents a recover table executor. -// It is built from "admin restore table by job" statement, +// executeRecoverTable represents a recover table executor. +// It is built from "recover table" statement, // is used to recover the table that deleted by mistake. -type RestoreTableExec struct { - baseExecutor - jobID int64 - Table *ast.TableName - JobNum int64 -} - -// Open implements the Executor Open interface. -func (e *RestoreTableExec) Open(ctx context.Context) error { - return e.baseExecutor.Open(ctx) -} - -// Next implements the Executor Open interface. -func (e *RestoreTableExec) Next(ctx context.Context, req *chunk.RecordBatch) (err error) { - // Should commit the previous transaction and create a new transaction. - if err = e.ctx.NewTxn(ctx); err != nil { - return err - } - defer func() { e.ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue = false }() - - err = e.executeRestoreTable() - if err != nil { - return err - } - - dom := domain.GetDomain(e.ctx) - // Update InfoSchema in TxnCtx, so it will pass schema check. - is := dom.InfoSchema() - txnCtx := e.ctx.GetSessionVars().TxnCtx - txnCtx.InfoSchema = is - txnCtx.SchemaVersion = is.SchemaMetaVersion() - // DDL will force commit old transaction, after DDL, in transaction status should be false. - e.ctx.GetSessionVars().SetStatusFlag(mysql.ServerStatusInTrans, false) - return nil -} - -func (e *RestoreTableExec) executeRestoreTable() error { +func (e *DDLExec) executeRecoverTable(s *ast.RecoverTableStmt) error { txn, err := e.ctx.Txn(true) if err != nil { return err @@ -344,10 +310,10 @@ func (e *RestoreTableExec) executeRestoreTable() error { dom := domain.GetDomain(e.ctx) var job *model.Job var tblInfo *model.TableInfo - if e.jobID != 0 { - job, tblInfo, err = getRestoreTableByJobID(e, t, dom) + if s.JobID != 0 { + job, tblInfo, err = e.getRecoverTableByJobID(s, t, dom) } else { - job, tblInfo, err = getRestoreTableByTableName(e, t, dom) + job, tblInfo, err = e.getRecoverTableByTableName(s, t, dom) } if err != nil { return err @@ -361,18 +327,18 @@ func (e *RestoreTableExec) executeRestoreTable() error { if err != nil { return errors.Errorf("recover table_id: %d, get original autoID from snapshot meta err: %s", job.TableID, err.Error()) } - // Call DDL RestoreTable - err = domain.GetDomain(e.ctx).DDL().RestoreTable(e.ctx, tblInfo, job.SchemaID, autoID, job.ID, job.StartTS) + // Call DDL RecoverTable + err = domain.GetDomain(e.ctx).DDL().RecoverTable(e.ctx, tblInfo, job.SchemaID, autoID, job.ID, job.StartTS) return err } -func getRestoreTableByJobID(e *RestoreTableExec, t *meta.Meta, dom *domain.Domain) (*model.Job, *model.TableInfo, error) { - job, err := t.GetHistoryDDLJob(e.jobID) +func (e *DDLExec) getRecoverTableByJobID(s *ast.RecoverTableStmt, t *meta.Meta, dom *domain.Domain) (*model.Job, *model.TableInfo, error) { + job, err := t.GetHistoryDDLJob(s.JobID) if err != nil { return nil, nil, err } if job == nil { - return nil, nil, admin.ErrDDLJobNotFound.GenWithStackByArgs(e.jobID) + return nil, nil, admin.ErrDDLJobNotFound.GenWithStackByArgs(s.JobID) } if job.Type != model.ActionDropTable { return nil, nil, errors.Errorf("Job %v type is %v, not drop table", job.ID, job.Type) @@ -400,7 +366,7 @@ func getRestoreTableByJobID(e *RestoreTableExec, t *meta.Meta, dom *domain.Domai return job, table.Meta(), nil } -func getRestoreTableByTableName(e *RestoreTableExec, t *meta.Meta, dom *domain.Domain) (*model.Job, *model.TableInfo, error) { +func (e *DDLExec) getRecoverTableByTableName(s *ast.RecoverTableStmt, t *meta.Meta, dom *domain.Domain) (*model.Job, *model.TableInfo, error) { jobs, err := t.GetAllHistoryDDLJobs() if err != nil { return nil, nil, err @@ -411,7 +377,7 @@ func getRestoreTableByTableName(e *RestoreTableExec, t *meta.Meta, dom *domain.D if err != nil { return nil, nil, err } - schemaName := e.Table.Schema.L + schemaName := s.Table.Schema.L if schemaName == "" { schemaName = e.ctx.GetSessionVars().CurrentDB } @@ -442,7 +408,7 @@ func getRestoreTableByTableName(e *RestoreTableExec, t *meta.Meta, dom *domain.D fmt.Sprintf("(Table ID %d)", job.TableID), ) } - if table.Meta().Name.L == e.Table.Name.L { + if table.Meta().Name.L == s.Table.Name.L { schema, ok := dom.InfoSchema().SchemaByID(job.SchemaID) if !ok { return nil, nil, infoschema.ErrDatabaseNotExists.GenWithStackByArgs( @@ -456,7 +422,7 @@ func getRestoreTableByTableName(e *RestoreTableExec, t *meta.Meta, dom *domain.D } } if tblInfo == nil { - return nil, nil, errors.Errorf("Can't found drop table: %v in ddl history jobs", e.Table.Name) + return nil, nil, errors.Errorf("Can't found drop table: %v in ddl history jobs", s.Table.Name) } return job, tblInfo, nil } diff --git a/executor/executor_pkg_test.go b/executor/executor_pkg_test.go index 612f9edaf1b28..bff4f3eae7396 100644 --- a/executor/executor_pkg_test.go +++ b/executor/executor_pkg_test.go @@ -50,6 +50,15 @@ func (msm *mockSessionManager) ShowProcessList() map[uint64]util.ProcessInfo { return ret } +func (msm *mockSessionManager) GetProcessInfo(id uint64) (util.ProcessInfo, bool) { + for _, item := range msm.PS { + if item.ID == id { + return item, true + } + } + return util.ProcessInfo{}, false +} + // Kill implements the SessionManager.Kill interface. func (msm *mockSessionManager) Kill(cid uint64, query bool) { diff --git a/executor/executor_test.go b/executor/executor_test.go index b6b95217a235e..0c289add4101e 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -3433,9 +3433,9 @@ func (s *testSuite3) TestSelectPartition(c *C) { tk.MustQuery("select b from tr partition (r1,R3) order by a").Check(testkit.Rows("4", "7", "8")) // test select unknown partition error - _, err := tk.Exec("select b from th partition (p0,p4)") + err := tk.ExecToErr("select b from th partition (p0,p4)") c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p4' in table 'th'") - _, err = tk.Exec("select b from tr partition (r1,r4)") + err = tk.ExecToErr("select b from tr partition (r1,r4)") c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'r4' in table 'tr'") } @@ -3452,11 +3452,11 @@ func (s *testSuite) TestSelectView(c *C) { tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2")) tk.MustExec("drop table view_t;") tk.MustExec("create table view_t(c int,d int)") - _, err := tk.Exec("select * from view1") + err := tk.ExecToErr("select * from view1") c.Assert(err.Error(), Equals, plannercore.ErrViewInvalid.GenWithStackByArgs("test", "view1").Error()) - _, err = tk.Exec("select * from view2") + err = tk.ExecToErr("select * from view2") c.Assert(err.Error(), Equals, plannercore.ErrViewInvalid.GenWithStackByArgs("test", "view2").Error()) - _, err = tk.Exec("select * from view3") + err = tk.ExecToErr("select * from view3") c.Assert(err.Error(), Equals, "[planner:1054]Unknown column 'a' in 'field list'") tk.MustExec("drop table view_t;") tk.MustExec("create table view_t(a int,b int,c int)") diff --git a/executor/explainfor_test.go b/executor/explainfor_test.go new file mode 100644 index 0000000000000..04a64ce3ffc2a --- /dev/null +++ b/executor/explainfor_test.go @@ -0,0 +1,81 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "fmt" + + . "github.com/pingcap/check" + "github.com/pingcap/parser/auth" + "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/testkit" +) + +// mockSessionManager is a mocked session manager which is used for test. +type mockSessionManager1 struct { + PS []util.ProcessInfo +} + +// ShowProcessList implements the SessionManager.ShowProcessList interface. +func (msm *mockSessionManager1) ShowProcessList() map[uint64]util.ProcessInfo { + ret := make(map[uint64]util.ProcessInfo) + for _, item := range msm.PS { + ret[item.ID] = item + } + return ret +} + +func (msm *mockSessionManager1) GetProcessInfo(id uint64) (util.ProcessInfo, bool) { + for _, item := range msm.PS { + if item.ID == id { + return item, true + } + } + return util.ProcessInfo{}, false +} + +// Kill implements the SessionManager.Kill interface. +func (msm *mockSessionManager1) Kill(cid uint64, query bool) { + +} + +func (s *testSuite) TestExplainFor(c *C) { + tkRoot := testkit.NewTestKitWithInit(c, s.store) + tkUser := testkit.NewTestKitWithInit(c, s.store) + tkRoot.MustExec("create table t1(c1 int, c2 int)") + tkRoot.MustExec("create table t2(c1 int, c2 int)") + tkRoot.MustExec("create user tu@'%'") + tkRoot.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost", CurrentUser: true, AuthUsername: "root", AuthHostname: "%"}, nil, []byte("012345678901234567890")) + tkUser.Se.Auth(&auth.UserIdentity{Username: "tu", Hostname: "localhost", CurrentUser: true, AuthUsername: "tu", AuthHostname: "%"}, nil, []byte("012345678901234567890")) + + tkRoot.MustQuery("select * from t1;") + tkRootProcess := tkRoot.Se.ShowProcess() + ps := []util.ProcessInfo{tkRootProcess} + tkRoot.Se.SetSessionManager(&mockSessionManager1{PS: ps}) + tkUser.Se.SetSessionManager(&mockSessionManager1{PS: ps}) + tkRoot.MustQuery(fmt.Sprintf("explain for connection %d", tkRootProcess.ID)).Check(testkit.Rows( + "TableReader_5 10000.00 root data:TableScan_4", + "└─TableScan_4 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + )) + err := tkUser.ExecToErr(fmt.Sprintf("explain for connection %d", tkRootProcess.ID)) + c.Check(core.ErrAccessDenied.Equal(err), IsTrue) + err = tkUser.ExecToErr("explain for connection 42") + c.Check(core.ErrNoSuchThread.Equal(err), IsTrue) + + tkRootProcess.Plan = nil + ps = []util.ProcessInfo{tkRootProcess} + tkRoot.Se.SetSessionManager(&mockSessionManager1{PS: ps}) + tkRoot.MustExec(fmt.Sprintf("explain for connection %d", tkRootProcess.ID)) +} diff --git a/executor/seqtest/seq_executor_test.go b/executor/seqtest/seq_executor_test.go index 053a1db23ac0f..5ae4f7479996d 100644 --- a/executor/seqtest/seq_executor_test.go +++ b/executor/seqtest/seq_executor_test.go @@ -344,7 +344,7 @@ func (s *seqTestSuite) TestShow(c *C) { tk.MustExec(`drop table if exists show_test_comment`) tk.MustExec(`create table show_test_comment (id int not null default 0 comment "show_test_comment_id")`) tk.MustQuery(`show full columns from show_test_comment`).Check(testutil.RowsWithSep("|", - "id|int(11)|binary|NO||0||select,insert,update,references|show_test_comment_id", + "id|int(11)||NO||0||select,insert,update,references|show_test_comment_id", )) // Test show create table with AUTO_INCREMENT option diff --git a/executor/set_test.go b/executor/set_test.go index 6fdb59fef08ab..d7e28bdd271fe 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -602,8 +602,8 @@ func (s *testSuite2) TestSelectGlobalVar(c *C) { tk.MustExec("set @@global.max_connections=151;") // test for unknown variable. - _, err := tk.Exec("select @@invalid") + err := tk.ExecToErr("select @@invalid") c.Assert(terror.ErrorEqual(err, variable.UnknownSystemVar), IsTrue, Commentf("err %v", err)) - _, err = tk.Exec("select @@global.invalid") + err = tk.ExecToErr("select @@global.invalid") c.Assert(terror.ErrorEqual(err, variable.UnknownSystemVar), IsTrue, Commentf("err %v", err)) } diff --git a/executor/show.go b/executor/show.go index a0ae0ce9484bc..c4322a876eb1e 100644 --- a/executor/show.go +++ b/executor/show.go @@ -370,13 +370,6 @@ func (e *ShowExec) fetchShowColumns() error { columnDefault = defaultValStr } } - // issue #9807 - // Some types in show full columns should print other collations. - switch col.Tp { - case mysql.TypeTimestamp, mysql.TypeDate, mysql.TypeDuration, mysql.TypeDatetime, - mysql.TypeYear, mysql.TypeNewDate: - desc.Collation = "NULL" - } // The FULL keyword causes the output to include the column collation and comments, // as well as the privileges you have for each column. diff --git a/executor/show_test.go b/executor/show_test.go index 147b376b18d15..395a5e12817c1 100644 --- a/executor/show_test.go +++ b/executor/show_test.go @@ -170,18 +170,66 @@ func (s *testSuite2) TestShow2(c *C) { // TODO: In MySQL, the result is "autocommit ON". tk2.MustQuery("show global variables where variable_name = 'autocommit'").Check(testkit.Rows("autocommit 1")) + // TODO: Specifying the charset for national char/varchar should not be supported. tk.MustExec("drop table if exists test_full_column") - tk.MustExec(`create table test_full_column( a date , b datetime , c year(4), d timestamp,e time ,f year, h datetime(2) );`) + tk.MustExec(`create table test_full_column( + c_int int, + c_float float, + c_bit bit, + c_bool bool, + c_char char(1) charset ascii collate ascii_bin, + c_nchar national char(1) charset ascii collate ascii_bin, + c_binary binary, + c_varchar varchar(1) charset ascii collate ascii_bin, + c_nvarchar national varchar(1) charset ascii collate ascii_bin, + c_varbinary varbinary(1), + c_year year, + c_date date, + c_time time, + c_datetime datetime, + c_timestamp timestamp, + c_blob blob, + c_tinyblob tinyblob, + c_mediumblob mediumblob, + c_longblob longblob, + c_text text charset ascii collate ascii_bin, + c_tinytext tinytext charset ascii collate ascii_bin, + c_mediumtext mediumtext charset ascii collate ascii_bin, + c_longtext longtext charset ascii collate ascii_bin, + c_json json, + c_enum enum('1') charset ascii collate ascii_bin, + c_set set('1') charset ascii collate ascii_bin + );`) tk.MustQuery(`show full columns from test_full_column`).Check(testkit.Rows( "" + - "a date NULL YES select,insert,update,references ]\n" + - "[b datetime NULL YES select,insert,update,references ]\n" + - "[c year(4) NULL YES select,insert,update,references ]\n" + - "[d timestamp NULL YES select,insert,update,references ]\n" + - "[e time NULL YES select,insert,update,references ]\n" + - "[f year(4) NULL YES select,insert,update,references ]\n" + - "[h datetime(2) NULL YES select,insert,update,references ")) + "c_int int(11) YES select,insert,update,references ]\n" + + "[c_float float YES select,insert,update,references ]\n" + + "[c_bit bit(1) YES select,insert,update,references ]\n" + + "[c_bool tinyint(1) YES select,insert,update,references ]\n" + + "[c_char char(1) ascii_bin YES select,insert,update,references ]\n" + + "[c_nchar char(1) ascii_bin YES select,insert,update,references ]\n" + + "[c_binary binary(1) YES select,insert,update,references ]\n" + + "[c_varchar varchar(1) ascii_bin YES select,insert,update,references ]\n" + + "[c_nvarchar varchar(1) ascii_bin YES select,insert,update,references ]\n" + + "[c_varbinary varbinary(1) YES select,insert,update,references ]\n" + + "[c_year year(4) YES select,insert,update,references ]\n" + + "[c_date date YES select,insert,update,references ]\n" + + "[c_time time YES select,insert,update,references ]\n" + + "[c_datetime datetime YES select,insert,update,references ]\n" + + "[c_timestamp timestamp YES select,insert,update,references ]\n" + + "[c_blob blob YES select,insert,update,references ]\n" + + "[c_tinyblob tinyblob YES select,insert,update,references ]\n" + + "[c_mediumblob mediumblob YES select,insert,update,references ]\n" + + "[c_longblob longblob YES select,insert,update,references ]\n" + + "[c_text text ascii_bin YES select,insert,update,references ]\n" + + "[c_tinytext tinytext ascii_bin YES select,insert,update,references ]\n" + + "[c_mediumtext mediumtext ascii_bin YES select,insert,update,references ]\n" + + "[c_longtext longtext ascii_bin YES select,insert,update,references ]\n" + + "[c_json json YES select,insert,update,references ]\n" + + "[c_enum enum('1') ascii_bin YES select,insert,update,references ]\n" + + "[c_set set('1') ascii_bin YES select,insert,update,references ")) + tk.MustExec("drop table if exists test_full_column") tk.MustExec("drop table if exists t") diff --git a/expression/builtin_time.go b/expression/builtin_time.go index da1601859ec56..44842541a41ff 100644 --- a/expression/builtin_time.go +++ b/expression/builtin_time.go @@ -4682,6 +4682,10 @@ func (b *builtinMakeTimeSig) evalDuration(row chunk.Row) (types.Duration, bool, var overflow bool // MySQL TIME datatype: https://dev.mysql.com/doc/refman/5.7/en/time.html // ranges from '-838:59:59.000000' to '838:59:59.000000' + if hour < 0 && mysql.HasUnsignedFlag(b.args[0].GetType().Flag) { + hour = 838 + overflow = true + } if hour < -838 { hour = -838 overflow = true diff --git a/expression/builtin_time_test.go b/expression/builtin_time_test.go index 72f1b3b16e5f8..dda5dbfc9d100 100644 --- a/expression/builtin_time_test.go +++ b/expression/builtin_time_test.go @@ -1939,6 +1939,23 @@ func (s *testEvaluatorSuite) TestMakeTime(c *C) { } } + // MAKETIME(CAST(-1 AS UNSIGNED),0,0); + tp1 := &types.FieldType{ + Tp: mysql.TypeLonglong, + Flag: mysql.UnsignedFlag, + Charset: charset.CharsetBin, + Collate: charset.CollationBin, + Flen: mysql.MaxIntWidth, + } + f := BuildCastFunction(s.ctx, &Constant{Value: types.NewDatum("-1"), RetType: types.NewFieldType(mysql.TypeString)}, tp1) + res, err := f.Eval(chunk.Row{}) + c.Assert(err, IsNil) + f1, err := maketime.getFunction(s.ctx, s.datumsToConstants([]types.Datum{res, makeDatums(0)[0], makeDatums(0)[0]})) + c.Assert(err, IsNil) + got, err := evalBuiltinFunc(f1, chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(got.GetMysqlDuration().String(), Equals, "838:59:59") + tbl = []struct { Args []interface{} Want interface{} diff --git a/expression/helper.go b/expression/helper.go index 6b815f7b5a512..cd150fcb6cc84 100644 --- a/expression/helper.go +++ b/expression/helper.go @@ -49,15 +49,15 @@ func GetTimeValue(ctx sessionctx.Context, v interface{}, tp byte, fsp int) (d ty Fsp: fsp, } - defaultTime, err := getSystemTimestamp(ctx) - if err != nil { - return d, err - } sc := ctx.GetSessionVars().StmtCtx switch x := v.(type) { case string: upperX := strings.ToUpper(x) if upperX == strings.ToUpper(ast.CurrentTimestamp) { + defaultTime, err := getSystemTimestamp(ctx) + if err != nil { + return d, err + } value.Time = types.FromGoTime(defaultTime.Truncate(time.Duration(math.Pow10(9-fsp)) * time.Nanosecond)) if tp == mysql.TypeTimestamp || tp == mysql.TypeDatetime { err = value.ConvertTimeZone(time.Local, ctx.GetSessionVars().Location()) diff --git a/go.mod b/go.mod index 0e842c9ea0399..ca535624af6d6 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e github.com/pingcap/kvproto v0.0.0-20190215154024-7f2fc73ef562 github.com/pingcap/log v0.0.0-20190307075452-bd41d9273596 - github.com/pingcap/parser v0.0.0-20190404052804-c29af83ede25 + github.com/pingcap/parser v0.0.0-20190409044748-a0b301443a30 github.com/pingcap/pd v2.1.0-rc.4+incompatible github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible github.com/pingcap/tipb v0.0.0-20190107072121-abbec73437b7 diff --git a/go.sum b/go.sum index b5cf095c86cf2..6cf70f55d8c18 100644 --- a/go.sum +++ b/go.sum @@ -119,8 +119,8 @@ github.com/pingcap/kvproto v0.0.0-20190215154024-7f2fc73ef562 h1:32oF1/8lVnBR2JV github.com/pingcap/kvproto v0.0.0-20190215154024-7f2fc73ef562/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= github.com/pingcap/log v0.0.0-20190307075452-bd41d9273596 h1:t2OQTpPJnrPDGlvA+3FwJptMTt6MEPdzK1Wt99oaefQ= github.com/pingcap/log v0.0.0-20190307075452-bd41d9273596/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= -github.com/pingcap/parser v0.0.0-20190404052804-c29af83ede25 h1:K7DB0kOkSETe3/4rpbzF/Iv4IgfkGBNu5EfaXxaiBuc= -github.com/pingcap/parser v0.0.0-20190404052804-c29af83ede25/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/parser v0.0.0-20190409044748-a0b301443a30 h1:Cu+VJBHLUqI0TFj/0Kya4L1iHIJZ3VbtZcEwv+3zOxQ= +github.com/pingcap/parser v0.0.0-20190409044748-a0b301443a30/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/pd v2.1.0-rc.4+incompatible h1:/buwGk04aHO5odk/+O8ZOXGs4qkUjYTJ2UpCJXna8NE= github.com/pingcap/pd v2.1.0-rc.4+incompatible/go.mod h1:nD3+EoYes4+aNNODO99ES59V83MZSI+dFbhyr667a0E= github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU= diff --git a/infoschema/builder.go b/infoschema/builder.go index 42046820fa321..65eba8279c1b0 100644 --- a/infoschema/builder.go +++ b/infoschema/builder.go @@ -53,7 +53,7 @@ func (b *Builder) ApplyDiff(m *meta.Meta, diff *model.SchemaDiff) ([]int64, erro var oldTableID, newTableID int64 tblIDs := make([]int64, 0, 2) switch diff.Type { - case model.ActionCreateTable, model.ActionRestoreTable: + case model.ActionCreateTable, model.ActionRecoverTable: newTableID = diff.TableID tblIDs = append(tblIDs, newTableID) case model.ActionDropTable, model.ActionDropView: diff --git a/infoschema/tables.go b/infoschema/tables.go index 25b87de5512a5..04d7b1845d42f 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -1155,8 +1155,8 @@ func dataForColumnsInTable(schema *model.DBInfo, tbl *model.TableInfo) [][]types numericPrecision, // NUMERIC_PRECISION numericScale, // NUMERIC_SCALE datetimePrecision, // DATETIME_PRECISION - col.Charset, // CHARACTER_SET_NAME - col.Collate, // COLLATION_NAME + columnDesc.Charset, // CHARACTER_SET_NAME + columnDesc.Collation, // COLLATION_NAME columnType, // COLUMN_TYPE columnDesc.Key, // COLUMN_KEY columnDesc.Extra, // EXTRA @@ -1164,12 +1164,6 @@ func dataForColumnsInTable(schema *model.DBInfo, tbl *model.TableInfo) [][]types columnDesc.Comment, // COLUMN_COMMENT col.GeneratedExprString, // GENERATION_EXPRESSION ) - // In mysql, 'character_set_name' and 'collation_name' are setted to null when column type is non-varchar or non-blob in information_schema. - if col.Tp != mysql.TypeVarchar && col.Tp != mysql.TypeBlob { - record[13].SetNull() - record[14].SetNull() - } - rows = append(rows, record) } return rows diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 3faed0f2fc609..5a805d3b1aa8f 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -126,6 +126,7 @@ func (s *testTableSuite) TestDataForTableStatsField(c *C) { do := s.dom h := do.StatsHandle() + h.Clear() is := do.InfoSchema() tk := testkit.NewTestKit(c, s.store) @@ -168,6 +169,74 @@ func (s *testTableSuite) TestCharacterSetCollations(c *C) { // but the id's are used by client libraries and must be stable tk.MustQuery("SELECT character_set_name, id, sortlen FROM information_schema.collations ORDER BY collation_name").Check( testkit.Rows("armscii8 64 1", "armscii8 32 1", "ascii 65 1", "ascii 11 1", "big5 84 1", "big5 1 1", "binary 63 1", "cp1250 66 1", "cp1250 44 1", "cp1250 34 1", "cp1250 26 1", "cp1250 99 1", "cp1251 50 1", "cp1251 14 1", "cp1251 51 1", "cp1251 52 1", "cp1251 23 1", "cp1256 67 1", "cp1256 57 1", "cp1257 58 1", "cp1257 59 1", "cp1257 29 1", "cp850 80 1", "cp850 4 1", "cp852 81 1", "cp852 40 1", "cp866 68 1", "cp866 36 1", "cp932 96 1", "cp932 95 1", "dec8 69 1", "dec8 3 1", "eucjpms 98 1", "eucjpms 97 1", "euckr 85 1", "euckr 19 1", "gb2312 86 1", "gb2312 24 1", "gbk 87 1", "gbk 28 1", "geostd8 93 1", "geostd8 92 1", "greek 70 1", "greek 25 1", "hebrew 71 1", "hebrew 16 1", "hp8 72 1", "hp8 6 1", "keybcs2 73 1", "keybcs2 37 1", "koi8r 74 1", "koi8r 7 1", "koi8u 75 1", "koi8u 22 1", "latin1 47 1", "latin1 15 1", "latin1 48 1", "latin1 49 1", "latin1 5 1", "latin1 31 1", "latin1 94 1", "latin1 8 1", "latin2 77 1", "latin2 27 1", "latin2 2 1", "latin2 9 1", "latin2 21 1", "latin5 78 1", "latin5 30 1", "latin7 79 1", "latin7 20 1", "latin7 41 1", "latin7 42 1", "macce 43 1", "macce 38 1", "macroman 53 1", "macroman 39 1", "sjis 88 1", "sjis 13 1", "swe7 82 1", "swe7 10 1", "tis620 89 1", "tis620 18 1", "ucs2 90 1", "ucs2 149 1", "ucs2 138 1", "ucs2 139 1", "ucs2 145 1", "ucs2 134 1", "ucs2 35 1", "ucs2 159 1", "ucs2 148 1", "ucs2 146 1", "ucs2 129 1", "ucs2 130 1", "ucs2 140 1", "ucs2 144 1", "ucs2 133 1", "ucs2 143 1", "ucs2 131 1", "ucs2 147 1", "ucs2 141 1", "ucs2 132 1", "ucs2 142 1", "ucs2 135 1", "ucs2 136 1", "ucs2 137 1", "ucs2 150 1", "ucs2 128 1", "ucs2 151 1", "ujis 91 1", "ujis 12 1", "utf16 55 1", "utf16 122 1", "utf16 111 1", "utf16 112 1", "utf16 118 1", "utf16 107 1", "utf16 54 1", "utf16 121 1", "utf16 119 1", "utf16 102 1", "utf16 103 1", "utf16 113 1", "utf16 117 1", "utf16 106 1", "utf16 116 1", "utf16 104 1", "utf16 120 1", "utf16 114 1", "utf16 105 1", "utf16 115 1", "utf16 108 1", "utf16 109 1", "utf16 110 1", "utf16 123 1", "utf16 101 1", "utf16 124 1", "utf16le 62 1", "utf16le 56 1", "utf32 61 1", "utf32 181 1", "utf32 170 1", "utf32 171 1", "utf32 177 1", "utf32 166 1", "utf32 60 1", "utf32 180 1", "utf32 178 1", "utf32 161 1", "utf32 162 1", "utf32 172 1", "utf32 176 1", "utf32 165 1", "utf32 175 1", "utf32 163 1", "utf32 179 1", "utf32 173 1", "utf32 164 1", "utf32 174 1", "utf32 167 1", "utf32 168 1", "utf32 169 1", "utf32 182 1", "utf32 160 1", "utf32 183 1", "utf8 83 1", "utf8 213 1", "utf8 202 1", "utf8 203 1", "utf8 209 1", "utf8 198 1", "utf8 33 1", "utf8 223 1", "utf8 212 1", "utf8 210 1", "utf8 193 1", "utf8 194 1", "utf8 204 1", "utf8 208 1", "utf8 197 1", "utf8 207 1", "utf8 195 1", "utf8 211 1", "utf8 205 1", "utf8 196 1", "utf8 206 1", "utf8 199 1", "utf8 200 1", "utf8 201 1", "utf8 214 1", "utf8 192 1", "utf8 215 1", "utf8mb4 46 1", "utf8mb4 245 1", "utf8mb4 234 1", "utf8mb4 235 1", "utf8mb4 241 1", "utf8mb4 230 1", "utf8mb4 45 1", "utf8mb4 244 1", "utf8mb4 242 1", "utf8mb4 225 1", "utf8mb4 226 1", "utf8mb4 236 1", "utf8mb4 240 1", "utf8mb4 229 1", "utf8mb4 239 1", "utf8mb4 227 1", "utf8mb4 243 1", "utf8mb4 237 1", "utf8mb4 228 1", "utf8mb4 238 1", "utf8mb4 231 1", "utf8mb4 232 1", "utf8mb4 233 1", "utf8mb4 246 1", "utf8mb4 224 1", "utf8mb4 247 1")) + + // Test charset/collation in information_schema.COLUMNS table. + tk.MustExec("DROP DATABASE IF EXISTS charset_collate_test") + tk.MustExec("CREATE DATABASE charset_collate_test; USE charset_collate_test") + + // TODO: Specifying the charset for national char/varchar should not be supported. + tk.MustExec(`CREATE TABLE charset_collate_col_test( + c_int int, + c_float float, + c_bit bit, + c_bool bool, + c_char char(1) charset ascii collate ascii_bin, + c_nchar national char(1) charset ascii collate ascii_bin, + c_binary binary, + c_varchar varchar(1) charset ascii collate ascii_bin, + c_nvarchar national varchar(1) charset ascii collate ascii_bin, + c_varbinary varbinary(1), + c_year year, + c_date date, + c_time time, + c_datetime datetime, + c_timestamp timestamp, + c_blob blob, + c_tinyblob tinyblob, + c_mediumblob mediumblob, + c_longblob longblob, + c_text text charset ascii collate ascii_bin, + c_tinytext tinytext charset ascii collate ascii_bin, + c_mediumtext mediumtext charset ascii collate ascii_bin, + c_longtext longtext charset ascii collate ascii_bin, + c_json json, + c_enum enum('1') charset ascii collate ascii_bin, + c_set set('1') charset ascii collate ascii_bin + )`) + + tk.MustQuery(`SELECT column_name, character_set_name, collation_name + FROM information_schema.COLUMNS + WHERE table_schema = "charset_collate_test" AND table_name = "charset_collate_col_test" + ORDER BY column_name`, + ).Check(testkit.Rows( + "c_binary ", + "c_bit ", + "c_blob ", + "c_bool ", + "c_char ascii ascii_bin", + "c_date ", + "c_datetime ", + "c_enum ascii ascii_bin", + "c_float ", + "c_int ", + "c_json ", + "c_longblob ", + "c_longtext ascii ascii_bin", + "c_mediumblob ", + "c_mediumtext ascii ascii_bin", + "c_nchar ascii ascii_bin", + "c_nvarchar ascii ascii_bin", + "c_set ascii ascii_bin", + "c_text ascii ascii_bin", + "c_time ", + "c_timestamp ", + "c_tinyblob ", + "c_tinytext ascii ascii_bin", + "c_varbinary ", + "c_varchar ascii ascii_bin", + "c_year ", + )) + tk.MustExec("DROP DATABASE charset_collate_test") } type mockSessionManager struct { @@ -176,6 +245,11 @@ type mockSessionManager struct { func (sm *mockSessionManager) ShowProcessList() map[uint64]util.ProcessInfo { return sm.processInfoMap } +func (sm *mockSessionManager) GetProcessInfo(id uint64) (util.ProcessInfo, bool) { + rs, ok := sm.processInfoMap[id] + return rs, ok +} + func (sm *mockSessionManager) Kill(connectionID uint64, query bool) {} func (s *testTableSuite) TestSomeTables(c *C) { diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index bdabe02558e4e..8f52cb1e4adbb 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -290,59 +290,6 @@ func (alloc *allocator) Alloc(tableID int64) (int64, error) { return alloc.alloc4Signed(tableID) } -var ( - memID int64 - memIDLock sync.Mutex -) - -type memoryAllocator struct { - mu sync.Mutex - base int64 - end int64 - dbID int64 -} - -// Base implements autoid.Allocator Base interface. -func (alloc *memoryAllocator) Base() int64 { - return alloc.base -} - -// End implements autoid.Allocator End interface. -func (alloc *memoryAllocator) End() int64 { - return alloc.end -} - -// NextGlobalAutoID implements autoid.Allocator NextGlobalAutoID interface. -func (alloc *memoryAllocator) NextGlobalAutoID(tableID int64) (int64, error) { - memIDLock.Lock() - defer memIDLock.Unlock() - return memID + 1, nil -} - -// Rebase implements autoid.Allocator Rebase interface. -func (alloc *memoryAllocator) Rebase(tableID, newBase int64, allocIDs bool) error { - // TODO: implement it. - return nil -} - -// Alloc implements autoid.Allocator Alloc interface. -func (alloc *memoryAllocator) Alloc(tableID int64) (int64, error) { - if tableID == 0 { - return 0, errInvalidTableID.GenWithStack("Invalid tableID") - } - alloc.mu.Lock() - defer alloc.mu.Unlock() - if alloc.base == alloc.end { // step - memIDLock.Lock() - memID = memID + step - alloc.end = memID - alloc.base = alloc.end - step - memIDLock.Unlock() - } - alloc.base++ - return alloc.base, nil -} - // NewAllocator returns a new auto increment id generator on the store. func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool) Allocator { return &allocator{ @@ -352,13 +299,6 @@ func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool) Allocator { } } -// NewMemoryAllocator returns a new auto increment id generator in memory. -func NewMemoryAllocator(dbID int64) Allocator { - return &memoryAllocator{ - dbID: dbID, - } -} - //autoid error codes. const codeInvalidTableID terror.ErrCode = 1 diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go index 909a338eaa0fb..5173349e42b8c 100644 --- a/metrics/metrics_test.go +++ b/metrics/metrics_test.go @@ -17,6 +17,8 @@ import ( "testing" . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/parser/terror" ) func TestT(t *testing.T) { @@ -32,3 +34,18 @@ func (s *testSuite) TestMetrics(c *C) { // Make sure it doesn't panic. PanicCounter.WithLabelValues(LabelDomain).Inc() } + +func (s *testSuite) TestRegisterMetrics(c *C) { + // Make sure it doesn't panic. + RegisterMetrics() +} + +func (s *testSuite) TestRetLabel(c *C) { + c.Assert(RetLabel(nil), Equals, opSucc) + c.Assert(RetLabel(errors.New("test error")), Equals, opFailed) +} + +func (s *testSuite) TestExecuteErrorToLabel(c *C) { + c.Assert(ExecuteErrorToLabel(errors.New("test")), Equals, `unknown`) + c.Assert(ExecuteErrorToLabel(terror.ErrResultUndetermined), Equals, `global:2`) +} diff --git a/planner/core/cbo_test.go b/planner/core/cbo_test.go index a29fb980a07d7..caefa22d9f1eb 100644 --- a/planner/core/cbo_test.go +++ b/planner/core/cbo_test.go @@ -924,3 +924,65 @@ func (s *testAnalyzeSuite) TestIssue9562(c *C) { " └─TableScan_13 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", )) } + +func (s *testAnalyzeSuite) TestIssue9805(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + tk := testkit.NewTestKit(c, store) + defer func() { + dom.Close() + store.Close() + }() + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec(` + create table t1 ( + id bigint primary key, + a bigint not null, + b varchar(100) not null, + c varchar(10) not null, + d bigint as (a % 30) not null, + key (d, b, c) + ) + `) + tk.MustExec(` + create table t2 ( + id varchar(50) primary key, + a varchar(100) unique, + b datetime, + c varchar(45), + d int not null unique auto_increment + ) + `) + rs := tk.MustQuery("explain analyze select t1.id, t2.a from t1 join t2 on t1.a = t2.d where t1.b = 't2' and t1.d = 4") + + // Expected output is like: + // + // +--------------------------------+----------+------+----------------------------------------------------------------------------------+----------------------------------+ + // | id | count | task | operator info | execution info | + // +--------------------------------+----------+------+----------------------------------------------------------------------------------+----------------------------------+ + // | Projection_9 | 10.00 | root | test.t1.id, test.t2.a | time:203.355µs, loops:1, rows:0 | + // | └─IndexJoin_13 | 10.00 | root | inner join, inner:IndexLookUp_12, outer key:test.t1.a, inner key:test.t2.d | time:199.633µs, loops:1, rows:0 | + // | ├─Projection_16 | 8.00 | root | test.t1.id, test.t1.a, test.t1.b, cast(mod(test.t1.a, 30)) | time:164.587µs, loops:1, rows:0 | + // | │ └─Selection_17 | 8.00 | root | eq(cast(mod(test.t1.a, 30)), 4) | time:157.768µs, loops:1, rows:0 | + // | │ └─TableReader_20 | 10.00 | root | data:Selection_19 | time:154.61µs, loops:1, rows:0 | + // | │ └─Selection_19 | 10.00 | cop | eq(test.t1.b, "t2") | time:28.824µs, loops:1, rows:0 | + // | │ └─TableScan_18 | 10000.00 | cop | table:t1, range:[-inf,+inf], keep order:false, stats:pseudo | time:27.654µs, loops:1, rows:0 | + // | └─IndexLookUp_12 | 10.00 | root | | time:0ns, loops:0, rows:0 | + // | ├─IndexScan_10 | 10.00 | cop | table:t2, index:d, range: decided by [test.t1.a], keep order:false, stats:pseudo | time:0ns, loops:0, rows:0 | + // | └─TableScan_11 | 10.00 | cop | table:t2, keep order:false, stats:pseudo | time:0ns, loops:0, rows:0 | + // +--------------------------------+----------+------+----------------------------------------------------------------------------------+----------------------------------+ + // 10 rows in set (0.00 sec) + // + c.Assert(rs.Rows(), HasLen, 10) + hasIndexLookUp12 := false + for _, row := range rs.Rows() { + c.Assert(row, HasLen, 5) + if strings.HasSuffix(row[0].(string), "IndexLookUp_12") { + hasIndexLookUp12 = true + c.Assert(row[4], Equals, "time:0ns, loops:0, rows:0") + } + } + c.Assert(hasIndexLookUp12, IsTrue) +} diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index b761b7ca6cada..9a6234d673ec6 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -29,7 +29,7 @@ import ( "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" - "github.com/pingcap/tidb/types/parser_driver" + driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/kvcache" "github.com/pingcap/tidb/util/ranger" @@ -84,14 +84,6 @@ type RecoverIndex struct { IndexName string } -// RestoreTable is used for recover deleted files by mistake. -type RestoreTable struct { - baseSchemaProducer - JobID int64 - Table *ast.TableName - JobNum int64 -} - // CleanupIndex is used to delete dangling index data. type CleanupIndex struct { baseSchemaProducer @@ -492,6 +484,9 @@ func (e *Explain) prepareSchema() error { // RenderResult renders the explain result as specified format. func (e *Explain) RenderResult() error { + if e.StmtPlan == nil { + return nil + } switch strings.ToLower(e.Format) { case ast.ExplainFormatROW: e.explainedPlans = map[int]bool{} @@ -543,6 +538,8 @@ func (e *Explain) prepareOperatorInfo(p PhysicalPlan, taskType string, indent st row = append(row, runtimeStatsColl.GetCopStats(p.ExplainID()).String()) } else if runtimeStatsColl.ExistsRootStats(p.ExplainID()) { row = append(row, runtimeStatsColl.GetRootStats(p.ExplainID()).String()) + } else { + row = append(row, "time:0ns, loops:0, rows:0") } } e.Rows = append(e.Rows, row) diff --git a/planner/core/errors.go b/planner/core/errors.go index d3ff18f10988d..6612bcc0dcd71 100644 --- a/planner/core/errors.go +++ b/planner/core/errors.go @@ -136,6 +136,9 @@ var ( ErrWindowRangeBoundNotConstant = terror.ClassOptimizer.New(codeWindowRangeBoundNotConstant, mysql.MySQLErrName[mysql.ErrWindowRangeBoundNotConstant]) ErrWindowRowsIntervalUse = terror.ClassOptimizer.New(codeWindowRowsIntervalUse, mysql.MySQLErrName[mysql.ErrWindowRowsIntervalUse]) ErrWindowFunctionIgnoresFrame = terror.ClassOptimizer.New(codeWindowFunctionIgnoresFrame, mysql.MySQLErrName[mysql.ErrWindowFunctionIgnoresFrame]) + ErrNoSuchThread = terror.ClassOptimizer.New(mysql.ErrNoSuchThread, mysql.MySQLErrName[mysql.ErrNoSuchThread]) + // Since we cannot know if user loggined with a password, use message of ErrAccessDeniedNoPassword instead + ErrAccessDenied = terror.ClassOptimizer.New(mysql.ErrAccessDenied, mysql.MySQLErrName[mysql.ErrAccessDeniedNoPassword]) ) func init() { @@ -187,6 +190,9 @@ func init() { codeWindowRangeBoundNotConstant: mysql.ErrWindowRangeBoundNotConstant, codeWindowRowsIntervalUse: mysql.ErrWindowRowsIntervalUse, codeWindowFunctionIgnoresFrame: mysql.ErrWindowFunctionIgnoresFrame, + + mysql.ErrNoSuchThread: mysql.ErrNoSuchThread, + mysql.ErrAccessDenied: mysql.ErrAccessDenied, } terror.ErrClassToMySQLCodes[terror.ClassOptimizer] = mysqlErrCodeMap } diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 737a2dc67aacb..ef0fb653d0702 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -33,7 +33,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/types/parser_driver" + driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/ranger" ) @@ -238,6 +238,8 @@ func (b *PlanBuilder) Build(node ast.Node) (Plan, error) { return b.buildExecute(x) case *ast.ExplainStmt: return b.buildExplain(x) + case *ast.ExplainForStmt: + return b.buildExplainFor(x) case *ast.TraceStmt: return b.buildTrace(x) case *ast.InsertStmt: @@ -625,14 +627,6 @@ func (b *PlanBuilder) buildAdmin(as *ast.AdminStmt) (Plan, error) { p := &ShowSlow{ShowSlow: as.ShowSlow} p.SetSchema(buildShowSlowSchema()) ret = p - case ast.AdminRestoreTable: - if len(as.JobIDs) > 0 { - ret = &RestoreTable{JobID: as.JobIDs[0]} - } else if len(as.Tables) > 0 { - ret = &RestoreTable{Table: as.Tables[0], JobNum: as.JobNumber} - } else { - return nil, ErrUnsupportedType.GenWithStack("Unsupported ast.AdminStmt(%T) for buildAdmin", as) - } default: return nil, ErrUnsupportedType.GenWithStack("Unsupported ast.AdminStmt(%T) for buildAdmin", as) } @@ -1098,8 +1092,7 @@ func (b *PlanBuilder) buildSimple(node ast.StmtNode) (Plan, error) { // Otherwise, you can kill only your own threads and statements. sm := b.ctx.GetSessionManager() if sm != nil { - processList := sm.ShowProcessList() - if pi, ok := processList[raw.ConnectionID]; ok { + if pi, ok := sm.GetProcessInfo(raw.ConnectionID); ok { loginUser := b.ctx.GetSessionVars().User if pi.User != loginUser.Username { b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SuperPriv, "", "", "", nil) @@ -1701,6 +1694,9 @@ func (b *PlanBuilder) buildDDL(node ast.DDLNode) (Plan, error) { } b.visitInfo = appendVisitInfo(b.visitInfo, mysql.InsertPriv, v.NewTable.Schema.L, v.NewTable.Name.L, "", authErr) + case *ast.RecoverTableStmt: + // Recover table command can only be executed by administrator. + b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SuperPriv, "", "", "", nil) } p := &DDL{Statement: node} return p, nil @@ -1735,15 +1731,7 @@ func (b *PlanBuilder) buildTrace(trace *ast.TraceStmt) (Plan, error) { return p, nil } -func (b *PlanBuilder) buildExplain(explain *ast.ExplainStmt) (Plan, error) { - if show, ok := explain.Stmt.(*ast.ShowStmt); ok { - return b.buildShow(show) - } - targetPlan, err := OptimizeAstNode(b.ctx, explain.Stmt, b.is) - if err != nil { - return nil, err - } - +func (b *PlanBuilder) buildExplainPlan(targetPlan Plan, format string, analyze bool, execStmt ast.StmtNode) (Plan, error) { pp, ok := targetPlan.(PhysicalPlan) if !ok { switch x := targetPlan.(type) { @@ -1760,15 +1748,51 @@ func (b *PlanBuilder) buildExplain(explain *ast.ExplainStmt) (Plan, error) { return nil, ErrUnsupportedType.GenWithStackByArgs(targetPlan) } } - p := &Explain{StmtPlan: pp, Analyze: explain.Analyze, Format: explain.Format, ExecStmt: explain.Stmt, ExecPlan: targetPlan} + + p := &Explain{StmtPlan: pp, Analyze: analyze, Format: format, ExecStmt: execStmt, ExecPlan: targetPlan} p.ctx = b.ctx - err = p.prepareSchema() + err := p.prepareSchema() if err != nil { return nil, err } return p, nil } +// buildExplainFor gets *last* (maybe running or finished) query plan from connection #connection id. +// See https://dev.mysql.com/doc/refman/8.0/en/explain-for-connection.html. +func (b *PlanBuilder) buildExplainFor(explainFor *ast.ExplainForStmt) (Plan, error) { + processInfo, ok := b.ctx.GetSessionManager().GetProcessInfo(explainFor.ConnectionID) + if !ok { + return nil, ErrNoSuchThread.GenWithStackByArgs(explainFor.ConnectionID) + } + if b.ctx.GetSessionVars() != nil && b.ctx.GetSessionVars().User != nil { + if b.ctx.GetSessionVars().User.Username != processInfo.User { + err := ErrAccessDenied.GenWithStackByArgs(b.ctx.GetSessionVars().User.Username, b.ctx.GetSessionVars().User.Hostname) + // Different from MySQL's behavior and document. + b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SuperPriv, "", "", "", err) + } + } + + targetPlan, ok := processInfo.Plan.(Plan) + if !ok || targetPlan == nil { + return &Explain{Format: explainFor.Format}, nil + } + + return b.buildExplainPlan(targetPlan, explainFor.Format, false, nil) +} + +func (b *PlanBuilder) buildExplain(explain *ast.ExplainStmt) (Plan, error) { + if show, ok := explain.Stmt.(*ast.ShowStmt); ok { + return b.buildShow(show) + } + targetPlan, err := OptimizeAstNode(b.ctx, explain.Stmt, b.is) + if err != nil { + return nil, err + } + + return b.buildExplainPlan(targetPlan, explain.Format, explain.Analyze, explain.Stmt) +} + func buildShowProcedureSchema() *expression.Schema { tblName := "ROUTINES" schema := expression.NewSchema(make([]*expression.Column, 0, 11)...) diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index f69d94b3111e7..3ebd9f3eb9de2 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -111,11 +111,11 @@ func (p *preprocessor) Enter(in ast.Node) (out ast.Node, skipChildren bool) { return in, true case *ast.Join: p.checkNonUniqTableAlias(node) - case *ast.AdminStmt: - // The specified table in admin restore syntax maybe already been dropped. - // So skip check table name here, otherwise, admin restore table [table_name] syntax will return - // table not exists error. But admin restore is use to restore the dropped table. So skip children here. - return in, node.Tp == ast.AdminRestoreTable + case *ast.RecoverTableStmt: + // The specified table in recover table statement maybe already been dropped. + // So skip check table name here, otherwise, recover table [table_name] syntax will return + // table not exists error. But recover table statement is use to recover the dropped table. So skip children here. + return in, true default: p.flag &= ^parentIsJoin } diff --git a/server/http_handler.go b/server/http_handler.go index 6378d6f4212fb..db8f2428aefdd 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -45,6 +45,7 @@ import ( "github.com/pingcap/tidb/sessionctx/binloginfo" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/helper" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/tikvrpc" "github.com/pingcap/tidb/table" @@ -110,55 +111,30 @@ func writeData(w http.ResponseWriter, data interface{}) { } type tikvHandlerTool struct { - regionCache *tikv.RegionCache - store kvStore + helper.Helper } // newTikvHandlerTool checks and prepares for tikv handler. // It would panic when any error happens. func (s *Server) newTikvHandlerTool() *tikvHandlerTool { - var tikvStore kvStore + var tikvStore tikv.Storage store, ok := s.driver.(*TiDBDriver) if !ok { panic("Invalid KvStore with illegal driver") } - if tikvStore, ok = store.store.(kvStore); !ok { + if tikvStore, ok = store.store.(tikv.Storage); !ok { panic("Invalid KvStore with illegal store") } regionCache := tikvStore.GetRegionCache() return &tikvHandlerTool{ - regionCache: regionCache, - store: tikvStore, - } -} - -func (t *tikvHandlerTool) getMvccByEncodedKey(encodedKey kv.Key) (*kvrpcpb.MvccGetByKeyResponse, error) { - keyLocation, err := t.regionCache.LocateKey(tikv.NewBackoffer(context.Background(), 500), encodedKey) - if err != nil { - return nil, errors.Trace(err) - } - - tikvReq := &tikvrpc.Request{ - Type: tikvrpc.CmdMvccGetByKey, - MvccGetByKey: &kvrpcpb.MvccGetByKeyRequest{ - Key: encodedKey, + helper.Helper{ + RegionCache: regionCache, + Store: tikvStore, }, } - kvResp, err := t.store.SendReq(tikv.NewBackoffer(context.Background(), 500), tikvReq, keyLocation.Region, time.Minute) - if err != nil { - logutil.Logger(context.Background()).Info("get MVCC by encoded key failed", - zap.Binary("encodeKey", encodedKey), - zap.Reflect("region", keyLocation.Region), - zap.Binary("startKey", keyLocation.StartKey), - zap.Binary("endKey", keyLocation.EndKey), - zap.Reflect("kvResp", kvResp), - zap.Error(err)) - return nil, errors.Trace(err) - } - return kvResp.MvccGetByKey, nil } type mvccKV struct { @@ -168,14 +144,14 @@ type mvccKV struct { func (t *tikvHandlerTool) getMvccByHandle(tableID, handle int64) (*mvccKV, error) { encodedKey := tablecodec.EncodeRowKeyWithHandle(tableID, handle) - data, err := t.getMvccByEncodedKey(encodedKey) - return &mvccKV{strings.ToUpper(hex.EncodeToString(encodedKey)), data}, err + data, err := t.GetMvccByEncodedKey(encodedKey) + return &mvccKV{Key: strings.ToUpper(hex.EncodeToString(encodedKey)), Value: data}, err } func (t *tikvHandlerTool) getMvccByStartTs(startTS uint64, startKey, endKey []byte) (*kvrpcpb.MvccGetByStartTsResponse, error) { bo := tikv.NewBackoffer(context.Background(), 5000) for { - curRegion, err := t.regionCache.LocateKey(bo, startKey) + curRegion, err := t.RegionCache.LocateKey(bo, startKey) if err != nil { logutil.Logger(context.Background()).Error("get MVCC by startTS failed", zap.Uint64("txnStartTS", startTS), zap.Binary("startKey", startKey), zap.Error(err)) return nil, errors.Trace(err) @@ -188,7 +164,7 @@ func (t *tikvHandlerTool) getMvccByStartTs(startTS uint64, startKey, endKey []by }, } tikvReq.Context.Priority = kvrpcpb.CommandPri_Low - kvResp, err := t.store.SendReq(bo, tikvReq, curRegion.Region, time.Hour) + kvResp, err := t.Store.SendReq(bo, tikvReq, curRegion.Region, time.Hour) if err != nil { logutil.Logger(context.Background()).Error("get MVCC by startTS failed", zap.Uint64("txnStartTS", startTS), @@ -257,7 +233,7 @@ func (t *tikvHandlerTool) getMvccByIdxValue(idx table.Index, values url.Values, if err != nil { return nil, errors.Trace(err) } - data, err := t.getMvccByEncodedKey(encodedKey) + data, err := t.GetMvccByEncodedKey(encodedKey) return &mvccKV{strings.ToUpper(hex.EncodeToString(encodedKey)), data}, err } @@ -310,7 +286,7 @@ func (t *tikvHandlerTool) getTable(dbName, tableName string) (*model.TableInfo, } func (t *tikvHandlerTool) schema() (infoschema.InfoSchema, error) { - session, err := session.CreateSession(t.store.(kv.Storage)) + session, err := session.CreateSession(t.Store.(kv.Storage)) if err != nil { return nil, errors.Trace(err) } @@ -322,36 +298,11 @@ func (t *tikvHandlerTool) handleMvccGetByHex(params map[string]string) (interfac if err != nil { return nil, errors.Trace(err) } - return t.getMvccByEncodedKey(encodedKey) -} - -func (t *tikvHandlerTool) getAllHistoryDDL() ([]*model.Job, error) { - s, err := session.CreateSession(t.store.(kv.Storage)) - if err != nil { - return nil, errors.Trace(err) - } - - if s != nil { - defer s.Close() - } - - store := domain.GetDomain(s.(sessionctx.Context)).Store() - txn, err := store.Begin() - - if err != nil { - return nil, errors.Trace(err) - } - txnMeta := meta.NewMeta(txn) - - jobs, err := txnMeta.GetAllHistoryDDLJobs() - if err != nil { - return nil, errors.Trace(err) - } - return jobs, nil + return t.GetMvccByEncodedKey(encodedKey) } -func (t *tikvHandlerTool) scrapeHotInfo(rw string) (map[tblIndex]regionMetric, error) { - regionMetrics, err := t.fetchHotRegion(rw) +func (t *tikvHandlerTool) scrapeHotInfo(rw string) (map[tblIndex]helper.RegionMetric, error) { + regionMetrics, err := t.FetchHotRegion(rw) if err != nil { return nil, err } @@ -363,34 +314,6 @@ func (t *tikvHandlerTool) scrapeHotInfo(rw string) (map[tblIndex]regionMetric, e return tblIdx, nil } -// storeHotRegionInfos records all hog region stores. -// it's the response of PD. -type storeHotRegionInfos struct { - AsPeer map[uint64]*hotRegionsStat `json:"as_peer"` - AsLeader map[uint64]*hotRegionsStat `json:"as_leader"` -} - -// hotRegions records echo store's hot region. -// it's the response of PD. -type hotRegionsStat struct { - RegionsStat []regionStat `json:"statistics"` -} - -// regionStat records each hot region's statistics -// it's the response of PD. -type regionStat struct { - RegionID uint64 `json:"region_id"` - FlowBytes uint64 `json:"flow_bytes"` - HotDegree int `json:"hot_degree"` -} - -// regionMetric presents the final metric output entry. -type regionMetric struct { - FlowBytes uint64 `json:"flow_bytes"` - MaxHotDegree int `json:"max_hot_degree"` - Count int `json:"region_count"` -} - // tblIndex presents the aggregate key that combined with db,table,index type tblIndex struct { DbName string `json:"db_name"` @@ -398,54 +321,15 @@ type tblIndex struct { IndexName string `json:"index_name"` } -func (t *tikvHandlerTool) fetchHotRegion(rw string) (map[uint64]regionMetric, error) { - etcd, ok := t.store.(domain.EtcdBackend) - if !ok { - return nil, errors.New("not implemented") - } - pdHosts := etcd.EtcdAddrs() - if len(pdHosts) == 0 { - return nil, errors.New("pd unavailable") - } - req, err := http.NewRequest("GET", protocol+pdHosts[0]+rw, nil) - if err != nil { - return nil, errors.Trace(err) - } - timeout, cancelFunc := context.WithTimeout(context.Background(), 50*time.Millisecond) - resp, err := http.DefaultClient.Do(req.WithContext(timeout)) - cancelFunc() - if err != nil { - return nil, errors.Trace(err) - } - defer func() { - err = resp.Body.Close() - if err != nil { - logutil.Logger(context.Background()).Error("close body failed", zap.Error(err)) - } - }() - var regionResp storeHotRegionInfos - err = json.NewDecoder(resp.Body).Decode(®ionResp) - if err != nil { - return nil, errors.Trace(err) - } - metric := make(map[uint64]regionMetric) - for _, hotRegions := range regionResp.AsLeader { - for _, region := range hotRegions.RegionsStat { - metric[region.RegionID] = regionMetric{FlowBytes: region.FlowBytes, MaxHotDegree: region.HotDegree} - } - } - return metric, nil -} - -func (t *tikvHandlerTool) fetchRegionTableIndex(metrics map[uint64]regionMetric) (map[tblIndex]regionMetric, error) { +func (t *tikvHandlerTool) fetchRegionTableIndex(metrics map[uint64]helper.RegionMetric) (map[tblIndex]helper.RegionMetric, error) { schema, err := t.schema() if err != nil { return nil, err } - idxMetrics := make(map[tblIndex]regionMetric) + idxMetrics := make(map[tblIndex]helper.RegionMetric) for regionID, regionMetric := range metrics { - region, err := t.regionCache.LocateRegionByID(tikv.NewBackoffer(context.Background(), 500), regionID) + region, err := t.RegionCache.LocateRegionByID(tikv.NewBackoffer(context.Background(), 500), regionID) if err != nil { logutil.Logger(context.Background()).Error("locate region failed", zap.Error(err)) continue @@ -713,7 +597,7 @@ type RegionFrameRange struct { func (t *tikvHandlerTool) getRegionsMeta(regionIDs []uint64) ([]RegionMeta, error) { regions := make([]RegionMeta, len(regionIDs)) for i, regionID := range regionIDs { - meta, leader, err := t.regionCache.PDClient().GetRegionByID(context.TODO(), regionID) + meta, leader, err := t.RegionCache.PDClient().GetRegionByID(context.TODO(), regionID) if err != nil { return nil, errors.Trace(err) } @@ -925,6 +809,31 @@ func (h ddlHistoryJobHandler) ServeHTTP(w http.ResponseWriter, req *http.Request writeData(w, jobs) } +func (h ddlHistoryJobHandler) getAllHistoryDDL() ([]*model.Job, error) { + s, err := session.CreateSession(h.Store.(kv.Storage)) + if err != nil { + return nil, errors.Trace(err) + } + + if s != nil { + defer s.Close() + } + + store := domain.GetDomain(s.(sessionctx.Context)).Store() + txn, err := store.Begin() + + if err != nil { + return nil, errors.Trace(err) + } + txnMeta := meta.NewMeta(txn) + + jobs, err := txnMeta.GetAllHistoryDDLJobs() + if err != nil { + return nil, errors.Trace(err) + } + return jobs, nil +} + func (h ddlResignOwnerHandler) resignDDLOwner() error { dom, err := session.GetDomain(h.store) if err != nil { @@ -958,7 +867,7 @@ func (h ddlResignOwnerHandler) ServeHTTP(w http.ResponseWriter, req *http.Reques func (h tableHandler) getPDAddr() ([]string, error) { var pdAddrs []string - etcd, ok := h.store.(domain.EtcdBackend) + etcd, ok := h.Store.(domain.EtcdBackend) if !ok { return nil, errors.New("not implemented") } @@ -1069,7 +978,7 @@ func (h tableHandler) handleRegionRequest(schema infoschema.InfoSchema, tbl tabl tableID := tbl.Meta().ID // for record startKey, endKey := tablecodec.GetTableHandleKeyRange(tableID) - recordRegionIDs, err := h.regionCache.ListRegionIDsInKeyRange(tikv.NewBackoffer(context.Background(), 500), startKey, endKey) + recordRegionIDs, err := h.RegionCache.ListRegionIDsInKeyRange(tikv.NewBackoffer(context.Background(), 500), startKey, endKey) if err != nil { writeError(w, err) return @@ -1087,7 +996,7 @@ func (h tableHandler) handleRegionRequest(schema infoschema.InfoSchema, tbl tabl indices[i].Name = index.Meta().Name.String() indices[i].ID = indexID startKey, endKey := tablecodec.GetTableIndexKeyRange(tableID, indexID) - rIDs, err := h.regionCache.ListRegionIDsInKeyRange(tikv.NewBackoffer(context.Background(), 500), startKey, endKey) + rIDs, err := h.RegionCache.ListRegionIDsInKeyRange(tikv.NewBackoffer(context.Background(), 500), startKey, endKey) if err != nil { writeError(w, err) return @@ -1161,7 +1070,7 @@ func (h tableHandler) handleDiskUsageRequest(schema infoschema.InfoSchema, tbl t type hotRegion struct { tblIndex - regionMetric + helper.RegionMetric } type hotRegions []hotRegion @@ -1187,7 +1096,7 @@ func (h regionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { startKey := []byte{'m'} endKey := []byte{'n'} - recordRegionIDs, err := h.regionCache.ListRegionIDsInKeyRange(tikv.NewBackoffer(context.Background(), 500), startKey, endKey) + recordRegionIDs, err := h.RegionCache.ListRegionIDsInKeyRange(tikv.NewBackoffer(context.Background(), 500), startKey, endKey) if err != nil { writeError(w, err) return @@ -1212,7 +1121,7 @@ func (h regionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { writeError(w, err) return } - asSortedEntry := func(metric map[tblIndex]regionMetric) hotRegions { + asSortedEntry := func(metric map[tblIndex]helper.RegionMetric) hotRegions { hs := make(hotRegions, 0, len(metric)) for key, value := range metric { hs = append(hs, hotRegion{key, value}) @@ -1237,7 +1146,7 @@ func (h regionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { regionID := uint64(regionIDInt) // locate region - region, err := h.regionCache.LocateRegionByID(tikv.NewBackoffer(context.Background(), 500), regionID) + region, err := h.RegionCache.LocateRegionByID(tikv.NewBackoffer(context.Background(), 500), regionID) if err != nil { writeError(w, err) return @@ -1607,7 +1516,7 @@ type serverInfo struct { // ServeHTTP handles request of ddl server info. func (h serverInfoHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - do, err := session.GetDomain(h.store.(kv.Storage)) + do, err := session.GetDomain(h.Store.(kv.Storage)) if err != nil { writeError(w, errors.New("create session error")) log.Error(err) @@ -1630,7 +1539,7 @@ type clusterServerInfo struct { // ServeHTTP handles request of all ddl servers info. func (h allServerInfoHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - do, err := session.GetDomain(h.store.(kv.Storage)) + do, err := session.GetDomain(h.Store.(kv.Storage)) if err != nil { writeError(w, errors.New("create session error")) log.Error(err) diff --git a/server/http_handler_test.go b/server/http_handler_test.go index 1b90271a0aa75..826ce61481776 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -600,7 +600,7 @@ func (ts *HTTPHandlerTestSuite) TestAllHistory(c *C) { decoder := json.NewDecoder(resp.Body) var jobs []*model.Job - s, _ := session.CreateSession(ts.server.newTikvHandlerTool().store.(kv.Storage)) + s, _ := session.CreateSession(ts.server.newTikvHandlerTool().Store.(kv.Storage)) defer s.Close() store := domain.GetDomain(s.(sessionctx.Context)).Store() txn, _ := store.Begin() @@ -714,7 +714,7 @@ func (ts *HTTPHandlerTestSuite) TestServerInfo(c *C) { c.Assert(info.Version, Equals, mysql.ServerVersion) c.Assert(info.GitHash, Equals, printer.TiDBGitHash) - store := ts.server.newTikvHandlerTool().store.(kv.Storage) + store := ts.server.newTikvHandlerTool().Store.(kv.Storage) do, err := session.GetDomain(store.(kv.Storage)) c.Assert(err, IsNil) ddl := do.DDL() @@ -737,7 +737,7 @@ func (ts *HTTPHandlerTestSuite) TestAllServerInfo(c *C) { c.Assert(clusterInfo.IsAllServerVersionConsistent, IsTrue) c.Assert(clusterInfo.ServersNum, Equals, 1) - store := ts.server.newTikvHandlerTool().store.(kv.Storage) + store := ts.server.newTikvHandlerTool().Store.(kv.Storage) do, err := session.GetDomain(store.(kv.Storage)) c.Assert(err, IsNil) ddl := do.DDL() diff --git a/server/http_status.go b/server/http_status.go index 0a79ba0a95d70..62bd51c548c4f 100644 --- a/server/http_status.go +++ b/server/http_status.go @@ -68,7 +68,7 @@ func (s *Server) startHTTPServer() { router.Handle("/schema/{db}/{table}", schemaHandler{tikvHandlerTool}) router.Handle("/tables/{colID}/{colTp}/{colFlag}/{colLen}", valueHandler{}) router.Handle("/ddl/history", ddlHistoryJobHandler{tikvHandlerTool}).Name("DDL_History") - router.Handle("/ddl/owner/resign", ddlResignOwnerHandler{tikvHandlerTool.store.(kv.Storage)}).Name("DDL_Owner_Resign") + router.Handle("/ddl/owner/resign", ddlResignOwnerHandler{tikvHandlerTool.Store.(kv.Storage)}).Name("DDL_Owner_Resign") // HTTP path for get server info. router.Handle("/info", serverInfoHandler{tikvHandlerTool}).Name("Info") diff --git a/server/server.go b/server/server.go index 7ef69ffacf168..58705781fef14 100644 --- a/server/server.go +++ b/server/server.go @@ -43,6 +43,7 @@ import ( "sync" "sync/atomic" "time" + // For pprof _ "net/http/pprof" @@ -496,6 +497,17 @@ func (s *Server) ShowProcessList() map[uint64]util.ProcessInfo { return rs } +// GetProcessInfo implements the SessionManager interface. +func (s *Server) GetProcessInfo(id uint64) (util.ProcessInfo, bool) { + s.rwlock.RLock() + conn, ok := s.clients[uint32(id)] + s.rwlock.RUnlock() + if !ok || atomic.LoadInt32(&conn.status) == connStatusWaitShutdown { + return util.ProcessInfo{}, false + } + return conn.ctx.ShowProcess(), ok +} + // Kill implements the SessionManager interface. func (s *Server) Kill(connectionID uint64, query bool) { s.rwlock.Lock() diff --git a/session/session.go b/session/session.go index 145b5f521a8c4..ecf1ac65d7c5e 100644 --- a/session/session.go +++ b/session/session.go @@ -141,6 +141,8 @@ type session struct { values map[fmt.Stringer]interface{} } + currentPlan plannercore.Plan + store kv.Storage parser *parser.Parser @@ -881,6 +883,7 @@ func (s *session) SetProcessInfo(sql string, t time.Time, command byte) { ID: s.sessionVars.ConnectionID, DB: s.sessionVars.CurrentDB, Command: command, + Plan: s.currentPlan, Time: t, State: s.Status(), Info: sql, @@ -972,6 +975,7 @@ func (s *session) execute(ctx context.Context, sql string) (recordSets []sqlexec return nil, err } metrics.SessionExecuteCompileDuration.WithLabelValues(label).Observe(time.Since(startTS).Seconds()) + s.currentPlan = stmt.Plan // Step3: Execute the physical plan. if recordSets, err = s.executeStatement(ctx, connID, stmtNode, stmt, recordSets); err != nil { @@ -1672,14 +1676,14 @@ func logStmt(node ast.StmtNode, vars *variable.SessionVars) { user := vars.User schemaVersion := vars.TxnCtx.SchemaVersion if ss, ok := node.(ast.SensitiveStmtNode); ok { - logutil.Logger(context.Background()).Info("[CRUCIAL OPERATION]", - zap.Uint64("con", vars.ConnectionID), + logutil.Logger(context.Background()).Info("CRUCIAL OPERATION", + zap.Uint64("conn", vars.ConnectionID), zap.Int64("schemaVersion", schemaVersion), zap.String("secure text", ss.SecureText()), zap.Stringer("user", user)) } else { - logutil.Logger(context.Background()).Info("[CRUCIAL OPERATION]", - zap.Uint64("con", vars.ConnectionID), + logutil.Logger(context.Background()).Info("CRUCIAL OPERATION", + zap.Uint64("conn", vars.ConnectionID), zap.Int64("schemaVersion", schemaVersion), zap.String("cur_db", vars.CurrentDB), zap.String("sql", stmt.Text()), @@ -1693,8 +1697,8 @@ func logStmt(node ast.StmtNode, vars *variable.SessionVars) { func logQuery(query string, vars *variable.SessionVars) { if atomic.LoadUint32(&variable.ProcessGeneralLog) != 0 && !vars.InRestrictedSQL { query = executor.QueryReplacer.Replace(query) - logutil.Logger(context.Background()).Info("[GENERAL_LOG]", - zap.Uint64("con", vars.ConnectionID), + logutil.Logger(context.Background()).Info("GENERAL_LOG", + zap.Uint64("conn", vars.ConnectionID), zap.Stringer("user", vars.User), zap.Int64("schemaVersion", vars.TxnCtx.SchemaVersion), zap.Uint64("txnStartTS", vars.TxnCtx.StartTS), diff --git a/store/helper/helper.go b/store/helper/helper.go new file mode 100644 index 0000000000000..ee1ff6284eff9 --- /dev/null +++ b/store/helper/helper.go @@ -0,0 +1,135 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "context" + "encoding/json" + "net/http" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv" + "github.com/pingcap/tidb/store/tikv/tikvrpc" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +const ( + protocol = "http://" +) + +// Helper is a middleware to get some information from tikv/pd. It can be used for TiDB's http api or mem table. +type Helper struct { + Store tikv.Storage + RegionCache *tikv.RegionCache +} + +// GetMvccByEncodedKey get the MVCC value by the specific encoded key. +func (h *Helper) GetMvccByEncodedKey(encodedKey kv.Key) (*kvrpcpb.MvccGetByKeyResponse, error) { + keyLocation, err := h.RegionCache.LocateKey(tikv.NewBackoffer(context.Background(), 500), encodedKey) + if err != nil { + return nil, errors.Trace(err) + } + + tikvReq := &tikvrpc.Request{ + Type: tikvrpc.CmdMvccGetByKey, + MvccGetByKey: &kvrpcpb.MvccGetByKeyRequest{ + Key: encodedKey, + }, + } + kvResp, err := h.Store.SendReq(tikv.NewBackoffer(context.Background(), 500), tikvReq, keyLocation.Region, time.Minute) + if err != nil { + logutil.Logger(context.Background()).Info("get MVCC by encoded key failed", + zap.Binary("encodeKey", encodedKey), + zap.Reflect("region", keyLocation.Region), + zap.Binary("startKey", keyLocation.StartKey), + zap.Binary("endKey", keyLocation.EndKey), + zap.Reflect("kvResp", kvResp), + zap.Error(err)) + return nil, errors.Trace(err) + } + return kvResp.MvccGetByKey, nil +} + +// StoreHotRegionInfos records all hog region stores. +// it's the response of PD. +type StoreHotRegionInfos struct { + AsPeer map[uint64]*hotRegionsStat `json:"as_peer"` + AsLeader map[uint64]*hotRegionsStat `json:"as_leader"` +} + +// hotRegions records echo store's hot region. +// it's the response of PD. +type hotRegionsStat struct { + RegionsStat []regionStat `json:"statistics"` +} + +// regionStat records each hot region's statistics +// it's the response of PD. +type regionStat struct { + RegionID uint64 `json:"region_id"` + FlowBytes uint64 `json:"flow_bytes"` + HotDegree int `json:"hot_degree"` +} + +// RegionMetric presents the final metric output entry. +type RegionMetric struct { + FlowBytes uint64 `json:"flow_bytes"` + MaxHotDegree int `json:"max_hot_degree"` + Count int `json:"region_count"` +} + +// FetchHotRegion fetches the hot region information from PD's http api. +func (h *Helper) FetchHotRegion(rw string) (map[uint64]RegionMetric, error) { + etcd, ok := h.Store.(domain.EtcdBackend) + if !ok { + return nil, errors.WithStack(errors.New("not implemented")) + } + pdHosts := etcd.EtcdAddrs() + if len(pdHosts) == 0 { + return nil, errors.New("pd unavailable") + } + req, err := http.NewRequest("GET", protocol+pdHosts[0]+rw, nil) + if err != nil { + return nil, errors.Trace(err) + } + timeout, cancelFunc := context.WithTimeout(context.Background(), 50*time.Millisecond) + resp, err := http.DefaultClient.Do(req.WithContext(timeout)) + cancelFunc() + if err != nil { + return nil, errors.Trace(err) + } + defer func() { + err = resp.Body.Close() + if err != nil { + logutil.Logger(context.Background()).Error("close body failed", zap.Error(err)) + } + }() + var regionResp StoreHotRegionInfos + err = json.NewDecoder(resp.Body).Decode(®ionResp) + if err != nil { + return nil, errors.Trace(err) + } + metric := make(map[uint64]RegionMetric) + for _, hotRegions := range regionResp.AsLeader { + for _, region := range hotRegions.RegionsStat { + metric[region.RegionID] = RegionMetric{FlowBytes: region.FlowBytes, MaxHotDegree: region.HotDegree} + } + } + return metric, nil +} diff --git a/store/helper/helper_test.go b/store/helper/helper_test.go new file mode 100644 index 0000000000000..697a46c5b5b81 --- /dev/null +++ b/store/helper/helper_test.go @@ -0,0 +1,118 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "net/http" + "testing" + "time" + + "github.com/gorilla/mux" + . "github.com/pingcap/check" + "github.com/pingcap/log" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/store/tikv" + "go.uber.org/zap" +) + +type HelperTestSuite struct { + store tikv.Storage +} + +var _ = Suite(new(HelperTestSuite)) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +type mockStore struct { + tikv.Storage + pdAddrs []string +} + +func (s *mockStore) EtcdAddrs() []string { + return s.pdAddrs +} + +func (s *mockStore) StartGCWorker() error { + panic("not implemented") +} + +func (s *mockStore) TLSConfig() *tls.Config { + panic("not implemented") +} + +func (s *HelperTestSuite) SetUpSuite(c *C) { + go s.mockPDHTTPServer(c) + time.Sleep(100 * time.Millisecond) + mvccStore := mocktikv.MustNewMVCCStore() + mockTikvStore, err := mockstore.NewMockTikvStore(mockstore.WithMVCCStore(mvccStore)) + s.store = &mockStore{ + mockTikvStore.(tikv.Storage), + []string{"127.0.0.1:10090/"}, + } + c.Assert(err, IsNil) +} + +func (s *HelperTestSuite) TestHotRegion(c *C) { + helper := Helper{ + Store: s.store, + RegionCache: s.store.GetRegionCache(), + } + regionMetric, err := helper.FetchHotRegion("/pd/api/v1/hotspot/regions/read") + c.Assert(err, IsNil, Commentf("err: %+v", err)) + c.Assert(fmt.Sprintf("%v", regionMetric), Equals, "map[1:{100 1 0}]") +} + +func (s *HelperTestSuite) mockPDHTTPServer(c *C) { + router := mux.NewRouter() + router.HandleFunc("/pd/api/v1/hotspot/regions/read", s.mockHotRegionResponse) + serverMux := http.NewServeMux() + serverMux.Handle("/", router) + server := &http.Server{Addr: "127.0.0.1:10090", Handler: serverMux} + err := server.ListenAndServe() + c.Assert(err, IsNil) +} + +func (s *HelperTestSuite) mockHotRegionResponse(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + regionsStat := hotRegionsStat{ + []regionStat{ + { + FlowBytes: 100, + RegionID: 1, + HotDegree: 1, + }, + }, + } + resp := StoreHotRegionInfos{ + AsLeader: make(map[uint64]*hotRegionsStat), + } + resp.AsLeader[0] = ®ionsStat + data, err := json.MarshalIndent(resp, "", " ") + if err != nil { + log.Panic("json marshal failed", zap.Error(err)) + } + _, err = w.Write(data) + if err != nil { + log.Panic("write http response failed", zap.Error(err)) + } + +} diff --git a/table/column.go b/table/column.go index a63cbcdaaa4d1..f4fc28d360b78 100644 --- a/table/column.go +++ b/table/column.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/parser/charset" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" + field_types "github.com/pingcap/parser/types" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/sessionctx" @@ -205,9 +206,12 @@ func CastValue(ctx sessionctx.Context, val types.Datum, col *model.ColumnInfo) ( // ColDesc describes column information like MySQL desc and show columns do. type ColDesc struct { - Field string - Type string - Collation string + Field string + Type string + // Charset is nil if the column doesn't have a charset, or a string indicating the charset name. + Charset interface{} + // Collation is nil if the column doesn't have a collation, or a string indicating the collation name. + Collation interface{} Null string Key string DefaultValue interface{} @@ -267,9 +271,10 @@ func NewColDesc(col *Column) *ColDesc { } } - return &ColDesc{ + desc := &ColDesc{ Field: name.O, Type: col.GetTypeDesc(), + Charset: col.Charset, Collation: col.Collate, Null: nullFlag, Key: keyFlag, @@ -278,6 +283,11 @@ func NewColDesc(col *Column) *ColDesc { Privileges: defaultPrivileges, Comment: col.Comment, } + if !field_types.HasCharset(&col.ColumnInfo.FieldType) { + desc.Charset = nil + desc.Collation = nil + } + return desc } // ColDescFieldNames returns the fields name in result set for desc and show columns. diff --git a/table/tables/tables.go b/table/tables/tables.go index 3afdc2b0247ae..dec149c6a3bcc 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -925,7 +925,8 @@ func (t *tableCommon) AllocAutoID(ctx sessionctx.Context) (int64, error) { return 0, err } if t.meta.ShardRowIDBits > 0 { - if t.overflowShardBits(rowID) { + // Use max record ShardRowIDBits to check overflow. + if OverflowShardBits(rowID, t.meta.MaxShardRowIDBits) { // If overflow, the rowID may be duplicated. For examples, // t.meta.ShardRowIDBits = 4 // rowID = 0010111111111111111111111111111111111111111111111111111111111111 @@ -945,9 +946,9 @@ func (t *tableCommon) AllocAutoID(ctx sessionctx.Context) (int64, error) { return rowID, nil } -// overflowShardBits check whether the rowID overflow `1<<(64-t.meta.ShardRowIDBits-1) -1`. -func (t *tableCommon) overflowShardBits(rowID int64) bool { - mask := (1< 0 } diff --git a/types/binary_literal_test.go b/types/binary_literal_test.go index a8788ecd422bc..b1547fec5f24f 100644 --- a/types/binary_literal_test.go +++ b/types/binary_literal_test.go @@ -106,6 +106,9 @@ func (s *testBinaryLiteralSuite) TestParseBitStr(c *C) { c.Assert([]byte(b), DeepEquals, t.Expected, Commentf("%#v", t)) } } + b, err := ParseBitStr("") + c.Assert(b, IsNil) + c.Assert(err, ErrorMatches, "invalid empty .*") } func (s *testBinaryLiteralSuite) TestParseHexStr(c *C) { @@ -139,6 +142,9 @@ func (s *testBinaryLiteralSuite) TestParseHexStr(c *C) { c.Assert([]byte(hex), DeepEquals, t.Expected, Commentf("%#v", t)) } } + hex, err := ParseHexStr("") + c.Assert(hex, IsNil) + c.Assert(err, ErrorMatches, "invalid empty .*") } func (s *testBinaryLiteralSuite) TestString(c *C) { @@ -243,6 +249,12 @@ func (s *testBinaryLiteralSuite) TestNewBinaryLiteralFromUint(c *C) { hex := NewBinaryLiteralFromUint(t.Input, t.ByteSize) c.Assert([]byte(hex), DeepEquals, t.Expected, Commentf("%#v", t)) } + + defer func() { + r := recover() + c.Assert(r, NotNil) + }() + NewBinaryLiteralFromUint(0x123, -2) } func (s *testBinaryLiteralSuite) TestCompare(c *C) { diff --git a/types/convert_test.go b/types/convert_test.go index 6398b9b87eeac..262b76862cb9c 100644 --- a/types/convert_test.go +++ b/types/convert_test.go @@ -884,3 +884,22 @@ func (s *testTypeConvertSuite) TestNumberToDuration(c *C) { c.Assert(dur.Duration, Equals, tc.dur) } } + +func (s *testTypeConvertSuite) TestStrToDuration(c *C) { + sc := new(stmtctx.StatementContext) + var tests = []struct { + str string + fsp int + isDuration bool + }{ + {"20190412120000", 4, false}, + {"20190101180000", 6, false}, + {"20190101180000", 1, false}, + {"20190101181234", 3, false}, + } + for _, tt := range tests { + _, _, isDuration, err := StrToDuration(sc, tt.str, tt.fsp) + c.Assert(err, IsNil) + c.Assert(isDuration, Equals, tt.isDuration) + } +} diff --git a/types/datum_test.go b/types/datum_test.go index 3e67866bfe1a5..a5a145ba55e4e 100644 --- a/types/datum_test.go +++ b/types/datum_test.go @@ -40,9 +40,13 @@ func (ts *testDatumSuite) TestDatum(c *C) { } for _, val := range values { var d Datum + d.SetMinNotNull() d.SetValue(val) x := d.GetValue() c.Assert(x, DeepEquals, val) + d.SetCollation(d.Collation()) + c.Assert(d.Collation(), NotNil) + c.Assert(d.Length(), Equals, int(d.length)) } } @@ -194,6 +198,34 @@ func (ts *testTypeConvertSuite) TestToFloat32(c *C) { c.Assert(converted.GetFloat64(), Equals, datum.GetFloat64()) } +func (ts *testTypeConvertSuite) TestToFloat64(c *C) { + testCases := []struct { + d Datum + errMsg string + result float64 + }{ + {NewDatum(float32(3.00)), "", 3.00}, + {NewDatum(float64(12345.678)), "", 12345.678}, + {NewDatum("12345.678"), "", 12345.678}, + {NewDatum([]byte("12345.678")), "", 12345.678}, + {NewDatum(int64(12345)), "", 12345}, + {NewDatum(uint64(123456)), "", 123456}, + {NewDatum(byte(123)), "cannot convert .*", 0}, + } + + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + for _, t := range testCases { + converted, err := t.d.ToFloat64(sc) + if t.errMsg == "" { + c.Assert(err, IsNil) + } else { + c.Assert(err, ErrorMatches, t.errMsg) + } + c.Assert(converted, Equals, t.result) + } +} + // mustParseTimeIntoDatum is similar to ParseTime but panic if any error occurs. func mustParseTimeIntoDatum(s string, tp byte, fsp int) (d Datum) { t, err := ParseTime(&stmtctx.StatementContext{TimeZone: time.UTC}, s, tp, fsp) diff --git a/types/helper_test.go b/types/helper_test.go index fa39980252901..434a88c7d0e2f 100644 --- a/types/helper_test.go +++ b/types/helper_test.go @@ -26,6 +26,7 @@ type testTypeHelperSuite struct { } func (s *testTypeHelperSuite) TestStrToInt(c *C) { + c.Parallel() tests := []struct { input string output string @@ -44,3 +45,21 @@ func (s *testTypeHelperSuite) TestStrToInt(c *C) { c.Check(strconv.FormatInt(output, 10), Equals, tt.output) } } + +func (s *testTypeHelperSuite) TestTruncate(c *C) { + c.Parallel() + tests := []struct { + f float64 + dec int + expected float64 + }{ + {123.45, 0, 123}, + {123.45, 1, 123.4}, + {123.45, 2, 123.45}, + {123.45, 3, 123.450}, + } + for _, tt := range tests { + res := Truncate(tt.f, tt.dec) + c.Assert(res, Equals, tt.expected) + } +} diff --git a/types/json/binary_functions_test.go b/types/json/binary_functions_test.go new file mode 100644 index 0000000000000..1d63e880e8aae --- /dev/null +++ b/types/json/binary_functions_test.go @@ -0,0 +1,31 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. +package json + +import ( + . "github.com/pingcap/check" +) + +var _ = Suite(&testJSONFuncSuite{}) + +type testJSONFuncSuite struct{} + +func (s *testJSONFuncSuite) TestdecodeEscapedUnicode(c *C) { + c.Parallel() + in := "597d" + r, size, err := decodeEscapedUnicode([]byte(in)) + c.Assert(string(r[:]), Equals, "好\x00") + c.Assert(size, Equals, 3) + c.Assert(err, IsNil) + +} diff --git a/types/mytime_test.go b/types/mytime_test.go index 8d9d8d016a68f..f2897c878908b 100644 --- a/types/mytime_test.go +++ b/types/mytime_test.go @@ -14,6 +14,8 @@ package types import ( + "time" + . "github.com/pingcap/check" ) @@ -228,3 +230,44 @@ func (s *testMyTimeSuite) TestGetLastDay(c *C) { c.Assert(day, Equals, t.expectedDay) } } + +func (s *testMyTimeSuite) TestgetFixDays(c *C) { + tests := []struct { + year int + month int + day int + ot time.Time + expectedDay int + }{ + {2000, 1, 0, time.Date(2000, 1, 31, 0, 0, 0, 0, time.UTC), -2}, + {2000, 1, 12, time.Date(2000, 1, 31, 0, 0, 0, 0, time.UTC), 0}, + {2000, 1, 12, time.Date(2000, 1, 0, 0, 0, 0, 0, time.UTC), 0}, + {2000, 2, 24, time.Date(2000, 2, 10, 0, 0, 0, 0, time.UTC), 0}, + {2019, 04, 05, time.Date(2019, 04, 01, 1, 2, 3, 4, time.UTC), 0}, + } + + for _, t := range tests { + res := getFixDays(t.year, t.month, t.day, t.ot) + c.Assert(res, Equals, t.expectedDay) + } +} + +func (s *testMyTimeSuite) TestAddDate(c *C) { + tests := []struct { + year int + month int + day int + ot time.Time + }{ + {01, 1, 0, time.Date(2000, 1, 01, 0, 0, 0, 0, time.UTC)}, + {02, 1, 12, time.Date(2000, 1, 01, 0, 0, 0, 0, time.UTC)}, + {03, 1, 12, time.Date(2000, 1, 01, 0, 0, 0, 0, time.UTC)}, + {04, 2, 24, time.Date(2000, 2, 10, 0, 0, 0, 0, time.UTC)}, + {01, 04, 05, time.Date(2019, 04, 01, 1, 2, 3, 4, time.UTC)}, + } + + for _, t := range tests { + res := AddDate(int64(t.year), int64(t.month), int64(t.day), t.ot) + c.Assert(res.Year(), Equals, t.year+t.ot.Year()) + } +} diff --git a/types/overflow_test.go b/types/overflow_test.go index 0f2fb0fc435c0..43d6d84a88c9a 100644 --- a/types/overflow_test.go +++ b/types/overflow_test.go @@ -305,6 +305,7 @@ func (s *testOverflowSuite) TestDiv(c *C) { {1, -1, 0, true}, {math.MaxInt64, math.MinInt64, 0, false}, {math.MaxInt64, -1, 0, true}, + {100, 20, 5, false}, } for _, t := range tblInt { diff --git a/types/time_test.go b/types/time_test.go index e7cf071ea23ee..d5a88c7c25c59 100644 --- a/types/time_test.go +++ b/types/time_test.go @@ -758,6 +758,21 @@ func (s *testTimeSuite) TestRoundFrac(c *C) { c.Assert(err, IsNil) c.Assert(nv.String(), Equals, t.Except) } + + cols := []struct { + input time.Time + fsp int + output time.Time + }{ + {time.Date(2011, 11, 11, 10, 10, 10, 888888, time.UTC), 0, time.Date(2011, 11, 11, 10, 10, 10, 11, time.UTC)}, + {time.Date(2011, 11, 11, 10, 10, 10, 111111, time.UTC), 0, time.Date(2011, 11, 11, 10, 10, 10, 10, time.UTC)}, + } + + for _, col := range cols { + res, err := types.RoundFrac(col.input, col.fsp) + c.Assert(res.Second(), Equals, col.output.Second()) + c.Assert(err, IsNil) + } } func (s *testTimeSuite) TestConvert(c *C) { @@ -832,6 +847,12 @@ func (s *testTimeSuite) TestCompare(c *C) { c.Assert(ret, Equals, t.Ret) } + v1, err := types.ParseTime(nil, "2011-10-10 11:11:11", mysql.TypeDatetime, types.MaxFsp) + c.Assert(err, IsNil) + res, err := v1.CompareString(nil, "Test should error") + c.Assert(err, NotNil) + c.Assert(res, Equals, 0) + tbl = []struct { Arg1 string Arg2 string @@ -1337,3 +1358,74 @@ func (s *testTimeSuite) TestgetFracIndex(c *C) { c.Assert(index, Equals, testCase.expectIndex) } } + +func (s *testTimeSuite) TestTimeOverflow(c *C) { + sc := mock.NewContext().GetSessionVars().StmtCtx + sc.IgnoreZeroInDate = true + defer testleak.AfterTest(c)() + table := []struct { + Input string + Output bool + }{ + {"2012-12-31 11:30:45", false}, + {"12-12-31 11:30:45", false}, + {"2012-12-31", false}, + {"20121231", false}, + {"2012-02-29", false}, + {"2018-01-01 18", false}, + {"18-01-01 18", false}, + {"2018.01.01", false}, + {"2018.01.01 00:00:00", false}, + {"2018/01/01-00:00:00", false}, + } + + for _, test := range table { + t, err := types.ParseDatetime(sc, test.Input) + c.Assert(err, IsNil) + isOverflow, err := types.DateTimeIsOverflow(sc, t) + c.Assert(err, IsNil) + c.Assert(isOverflow, Equals, test.Output) + } +} + +func (s *testTimeSuite) TestTruncateFrac(c *C) { + cols := []struct { + input time.Time + fsp int + output time.Time + }{ + {time.Date(2011, 11, 11, 10, 10, 10, 888888, time.UTC), 0, time.Date(2011, 11, 11, 10, 10, 10, 11, time.UTC)}, + {time.Date(2011, 11, 11, 10, 10, 10, 111111, time.UTC), 0, time.Date(2011, 11, 11, 10, 10, 10, 10, time.UTC)}, + } + + for _, col := range cols { + res, err := types.TruncateFrac(col.input, col.fsp) + c.Assert(res.Second(), Equals, col.output.Second()) + c.Assert(err, IsNil) + } +} +func (s *testTimeSuite) TestTimeSub(c *C) { + tbl := []struct { + Arg1 string + Arg2 string + Ret string + }{ + {"2017-01-18 01:01:01", "2017-01-18 00:00:01", "01:01:00"}, + {"2017-01-18 01:01:01", "2017-01-18 01:01:01", "00:00:00"}, + {"2019-04-12 18:20:00", "2019-04-12 14:00:00", "04:20:00"}, + } + + sc := &stmtctx.StatementContext{ + TimeZone: time.UTC, + } + for _, t := range tbl { + v1, err := types.ParseTime(nil, t.Arg1, mysql.TypeDatetime, types.MaxFsp) + c.Assert(err, IsNil) + v2, err := types.ParseTime(nil, t.Arg2, mysql.TypeDatetime, types.MaxFsp) + c.Assert(err, IsNil) + dur, err := types.ParseDuration(sc, t.Ret, types.MaxFsp) + c.Assert(err, IsNil) + rec := v1.Sub(sc, &v2) + c.Assert(rec, Equals, dur) + } +} diff --git a/util/processinfo.go b/util/processinfo.go index fa32a405df84c..d6a992da0e318 100644 --- a/util/processinfo.go +++ b/util/processinfo.go @@ -27,6 +27,7 @@ type ProcessInfo struct { Host string DB string Command byte + Plan interface{} Time time.Time State uint16 Info string @@ -58,5 +59,6 @@ func (pi *ProcessInfo) ToRow(full bool) []interface{} { type SessionManager interface { // ShowProcessList returns map[connectionID]ProcessInfo ShowProcessList() map[uint64]ProcessInfo + GetProcessInfo(id uint64) (ProcessInfo, bool) Kill(connectionID uint64, query bool) }