diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index d8e96e056d206..16650d6dbe9d6 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1202,14 +1202,26 @@ func (d *ddl) CreateTable(ctx sessionctx.Context, s *ast.CreateTableStmt) (err e err = d.doDDLJob(ctx, job) if err == nil { + var preSplitAndScatter func() // do pre-split and scatter. if tbInfo.ShardRowIDBits > 0 && tbInfo.PreSplitRegions > 0 { + preSplitAndScatter = func() { preSplitTableRegion(d.store, tbInfo, ctx.GetSessionVars().WaitTableSplitFinish) } + } else if atomic.LoadUint32(&EnableSplitTableRegion) != 0 { + pi := tbInfo.GetPartitionInfo() + if pi != nil { + preSplitAndScatter = func() { splitPartitionTableRegion(d.store, pi) } + } else { + preSplitAndScatter = func() { splitTableRegion(d.store, tbInfo.ID) } + } + } + if preSplitAndScatter != nil { if ctx.GetSessionVars().WaitTableSplitFinish { - preSplitTableRegion(d.store, tbInfo, true) + preSplitAndScatter() } else { - go preSplitTableRegion(d.store, tbInfo, false) + go preSplitAndScatter() } } + if tbInfo.AutoIncID > 1 { // Default tableAutoIncID base is 0. // If the first ID is expected to greater than 1, we need to do rebase. diff --git a/ddl/table.go b/ddl/table.go index cec8c7ca07a8e..dd604a3d8c7c3 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -17,7 +17,6 @@ import ( "fmt" "strconv" "strings" - "sync/atomic" "github.com/pingcap/errors" "github.com/pingcap/failpoint" @@ -74,10 +73,6 @@ func onCreateTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) if err != nil { return ver, errors.Trace(err) } - if atomic.LoadUint32(&EnableSplitTableRegion) != 0 { - // TODO: Add restrictions to this operation. - go splitTableRegion(d.store, tbInfo.ID) - } // Finish this job. job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tbInfo) asyncNotifyEvent(d, &util.Event{Tp: model.ActionCreateTable, TableInfo: tbInfo}) @@ -340,6 +335,13 @@ type splitableStore interface { WaitScatterRegionFinish(regionID uint64) error } +func splitPartitionTableRegion(store kv.Storage, pi *model.PartitionInfo) { + // Max partition count is 4096, should we sample and just choose some of the partition to split? + for _, def := range pi.Definitions { + splitTableRegion(store, def.ID) + } +} + func splitTableRegion(store kv.Storage, tableID int64) { s, ok := store.(splitableStore) if !ok { diff --git a/ddl/table_split_test.go b/ddl/table_split_test.go index ca6aa9b97d236..132255a986dc6 100644 --- a/ddl/table_split_test.go +++ b/ddl/table_split_test.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/testkit" ) type testDDLTableSplitSuite struct{} @@ -41,20 +42,39 @@ func (s *testDDLTableSplitSuite) TestTableSplit(c *C) { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) dom, err := session.BootstrapSession(store) c.Assert(err, IsNil) + tk := testkit.NewTestKit(c, store) + tk.MustExec("use test") + tk.MustExec(`create table t_part (a int key) partition by range(a) ( + partition p0 values less than (10), + partition p1 values less than (20) + )`) defer dom.Close() atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0) infoSchema := dom.InfoSchema() c.Assert(infoSchema, NotNil) t, err := infoSchema.TableByName(model.NewCIStr("mysql"), model.NewCIStr("tidb")) c.Assert(err, IsNil) - regionStartKey := tablecodec.EncodeTablePrefix(t.Meta().ID) + checkRegionStartWithTableID(c, t.Meta().ID, store.(kvStore)) - type kvStore interface { - GetRegionCache() *tikv.RegionCache + t, err = infoSchema.TableByName(model.NewCIStr("test"), model.NewCIStr("t_part")) + c.Assert(err, IsNil) + pi := t.Meta().GetPartitionInfo() + c.Assert(pi, NotNil) + for _, def := range pi.Definitions { + checkRegionStartWithTableID(c, def.ID, store.(kvStore)) } +} + +type kvStore interface { + GetRegionCache() *tikv.RegionCache +} + +func checkRegionStartWithTableID(c *C, id int64, store kvStore) { + regionStartKey := tablecodec.EncodeTablePrefix(id) var loc *tikv.KeyLocation + var err error for i := 0; i < 10; i++ { - cache := store.(kvStore).GetRegionCache() + cache := store.GetRegionCache() loc, err = cache.LocateKey(tikv.NewBackoffer(context.Background(), 5000), regionStartKey) c.Assert(err, IsNil)