Skip to content

Commit

Permalink
executor: enhance global index (#41197)
Browse files Browse the repository at this point in the history
close #40497
  • Loading branch information
L-maple authored Feb 21, 2023
1 parent c1e4702 commit 5cbf6eb
Show file tree
Hide file tree
Showing 9 changed files with 226 additions and 68 deletions.
14 changes: 13 additions & 1 deletion ddl/delete_range.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"sync"

"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/kv"
Expand Down Expand Up @@ -301,7 +302,11 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context,
return errors.Trace(err)
}
}
return nil
// logical table may contain global index regions, so delete the logical table range.
startKey = tablecodec.EncodeTablePrefix(tableID)
endKey := tablecodec.EncodeTablePrefix(tableID + 1)
elemID := ea.allocForPhysicalID(tableID)
return doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("table ID is %d", tableID))
}
startKey = tablecodec.EncodeTablePrefix(tableID)
endKey := tablecodec.EncodeTablePrefix(tableID + 1)
Expand Down Expand Up @@ -364,7 +369,14 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context,
if err := job.DecodeArgs(&indexName, &ifExists, &indexID, &partitionIDs); err != nil {
return errors.Trace(err)
}

// partitionIDs len is 0 if the dropped index is a global index, even if it is a partitioned table.
if len(partitionIDs) > 0 {
failpoint.Inject("checkDropGlobalIndex", func(val failpoint.Value) {
if val.(bool) {
panic("drop global index must not delete partition index range")
}
})
for _, pid := range partitionIDs {
startKey := tablecodec.EncodeTableIndexPrefix(pid, indexID)
endKey := tablecodec.EncodeTableIndexPrefix(pid, indexID+1)
Expand Down
8 changes: 7 additions & 1 deletion ddl/index.go
Original file line number Diff line number Diff line change
Expand Up @@ -1088,7 +1088,13 @@ func onDropIndex(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
// the partition ids were append by convertAddIdxJob2RollbackJob, it is weird, but for the compatibility,
// we should keep appending the partitions in the convertAddIdxJob2RollbackJob.
job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo)
job.Args = append(job.Args, indexInfo.ID, getPartitionIDs(tblInfo))
// Global index key has t{tableID}_ prefix.
// Assign partitionIDs empty to guarantee correct prefix in insertJobIntoDeleteRangeTable.
if indexInfo.Global {
job.Args = append(job.Args, indexInfo.ID, []int64{})
} else {
job.Args = append(job.Args, indexInfo.ID, getPartitionIDs(tblInfo))
}
}
default:
return ver, errors.Trace(dbterror.ErrInvalidDDLState.GenWithStackByArgs("index", indexInfo.State))
Expand Down
2 changes: 1 addition & 1 deletion ddl/sanity_check.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func expectedDeleteRangeCnt(ctx delRangeCntCtx, job *model.Job) (int, error) {
if err := job.DecodeArgs(&startKey, &physicalTableIDs, &ruleIDs); err != nil {
return 0, errors.Trace(err)
}
return mathutil.Max(len(physicalTableIDs), 1), nil
return len(physicalTableIDs) + 1, nil
case model.ActionDropTablePartition, model.ActionTruncateTablePartition,
model.ActionReorganizePartition:
var physicalTableIDs []int64
Expand Down
43 changes: 43 additions & 0 deletions executor/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -3939,6 +3939,22 @@ func (b *executorBuilder) buildIndexLookUpReader(v *plannercore.PhysicalIndexLoo
}

if is.Index.Global {
tmp, ok := b.is.TableByID(ts.Table.ID)
if !ok {
b.err = err
return nil
}
tbl, ok1 := tmp.(table.PartitionedTable)
if !ok1 {
b.err = ErrBuildExecutor
return nil
}
ret.partitionIDMap, err = getPartitionIdsAfterPruning(b.ctx, tbl, &v.PartitionInfo)
if err != nil {
b.err = err
return nil
}

return ret
}
if ok, _ := is.IsPartition(); ok {
Expand Down Expand Up @@ -5141,6 +5157,33 @@ func partitionPruning(ctx sessionctx.Context, tbl table.PartitionedTable, conds
return ret, nil
}

func getPartitionIdsAfterPruning(ctx sessionctx.Context, tbl table.PartitionedTable, partInfo *plannercore.PartitionInfo) (map[int64]struct{}, error) {
if partInfo == nil {
return nil, errors.New("partInfo in getPartitionIdsAfterPruning must not be nil")
}
idxArr, err := plannercore.PartitionPruning(ctx, tbl, partInfo.PruningConds, partInfo.PartitionNames, partInfo.Columns, partInfo.ColumnNames)
if err != nil {
return nil, err
}

var ret map[int64]struct{}

pi := tbl.Meta().GetPartitionInfo()
if fullRangePartition(idxArr) {
ret = make(map[int64]struct{}, len(pi.Definitions))
for _, def := range pi.Definitions {
ret[def.ID] = struct{}{}
}
} else {
ret = make(map[int64]struct{}, len(idxArr))
for _, idx := range idxArr {
pid := pi.Definitions[idx].ID
ret[pid] = struct{}{}
}
}
return ret, nil
}

func fullRangePartition(idxArr []int) bool {
return len(idxArr) == 1 && idxArr[0] == plannercore.FullRange
}
Expand Down
6 changes: 6 additions & 0 deletions executor/distsql.go
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,7 @@ type IndexLookUpExecutor struct {
// fields about accessing partition tables
partitionTableMode bool // if this executor is accessing a partition table
prunedPartitions []table.PhysicalTable // partition tables need to access
partitionIDMap map[int64]struct{} // partitionIDs that global index access
partitionRangeMap map[int64][]*ranger.Range
partitionKVRanges [][]kv.KeyRange // kvRanges of each prunedPartitions

Expand Down Expand Up @@ -990,6 +991,11 @@ func (w *indexWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk,
if err != nil {
return handles, retChk, err
}
if ph, ok := h.(kv.PartitionHandle); ok {
if _, exist := w.idxLookup.partitionIDMap[ph.PartitionID]; !exist {
continue
}
}
handles = append(handles, h)
}
if w.checkIndexValue != nil {
Expand Down
100 changes: 88 additions & 12 deletions executor/partition_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3164,6 +3164,28 @@ partition p2 values less than (10))`)
tk.MustQuery("select * from p use index (idx)").Sort().Check(testkit.Rows("1 3", "3 4", "5 6", "7 9"))
}

func TestDropGlobalIndex(t *testing.T) {
store := testkit.CreateMockStore(t)

tk := testkit.NewTestKit(t, store)
restoreConfig := config.RestoreFunc()
defer restoreConfig()
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableGlobalIndex = true
})
tk.MustExec("use test")
tk.MustExec("drop table if exists p")
tk.MustExec(`create table p (id int, c int) partition by range (c) (
partition p0 values less than (4),
partition p1 values less than (7),
partition p2 values less than (10))`)
tk.MustExec("alter table p add unique idx(id)")

failpoint.Enable("github.com/pingcap/tidb/ddl/checkDropGlobalIndex", `return(true)`)
tk.MustExec("alter table p drop index idx")
failpoint.Disable("github.com/pingcap/tidb/ddl/checkDropGlobalIndex")
}

func TestIssue20028(t *testing.T) {
store := testkit.CreateMockStore(t)

Expand Down Expand Up @@ -3916,21 +3938,75 @@ func TestIssue35181(t *testing.T) {
}

func TestIssue21732(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
restoreConfig := config.RestoreFunc()
defer restoreConfig()
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableGlobalIndex = true
})

store := testkit.CreateMockStore(t)

tk := testkit.NewTestKit(t, store)
for _, mode := range []variable.PartitionPruneMode{variable.StaticOnly, variable.DynamicOnly} {
testkit.WithPruneMode(tk, mode, func() {
tk.MustExec("create database TestIssue21732")
tk.MustExec("use TestIssue21732")
tk.MustExec("drop table if exists p")
tk.MustExec(`create table p (a int, b int GENERATED ALWAYS AS (3*a-2*a) VIRTUAL) partition by hash(b) partitions 2;`)
tk.MustExec("alter table p add unique index idx (a, b);")
tk.MustExec("insert into p (a) values (1),(2),(3);")
tk.MustExec("select * from p ignore index (idx);")
tk.MustQuery("select * from p use index (idx)").Sort().Check(testkit.Rows("1 1", "2 2", "3 3"))
tk.MustExec("drop database TestIssue21732")
})
tk.MustExec("create database TestIssue21732")
tk.MustExec("use TestIssue21732")
tk.MustExec("drop table if exists p")
tk.MustExec(`create table p (a int, b int GENERATED ALWAYS AS (3*a-2*a) VIRTUAL) partition by hash(b) partitions 2;`)
tk.MustExec("alter table p add unique index idx (a);")
tk.MustExec("insert into p (a) values (1),(2),(3);")
tk.MustQuery("select * from p use index (idx)").Sort().Check(testkit.Rows("1 1", "2 2", "3 3"))
tk.MustExec("drop database TestIssue21732")
}

func TestGlobalIndexSelectSpecifiedPartition(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
restoreConfig := config.RestoreFunc()
defer restoreConfig()
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableGlobalIndex = true
})

store := testkit.CreateMockStore(t)

tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists p")
tk.MustExec(`create table p (id int, c int) partition by range (c) (
partition p0 values less than (4),
partition p1 values less than (7),
partition p2 values less than (10))`)
tk.MustExec("alter table p add unique idx(id)")
tk.MustExec("insert into p values (1,3), (3,4), (5,6), (7,9)")
tk.MustQuery("select * from p partition(p0) use index (idx)").Sort().Check(testkit.Rows("1 3"))
}

func TestGlobalIndexForIssue40149(t *testing.T) {
restoreConfig := config.RestoreFunc()
defer restoreConfig()
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableGlobalIndex = true
})

store := testkit.CreateMockStore(t)

tk := testkit.NewTestKit(t, store)
for _, opt := range []string{"true", "false"} {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(`+opt+`)`)
tk.MustExec("use test")
tk.MustExec("drop table if exists test_t1")
tk.MustExec(`CREATE TABLE test_t1 (
a int(11) NOT NULL,
b int(11) DEFAULT NULL,
c int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (10),
PARTITION p1 VALUES LESS THAN (MAXVALUE));`)
tk.MustExec("alter table test_t1 add unique p_a (a);")
tk.MustExec("insert into test_t1 values (1,1,1);")
tk.MustQuery("select * from test_t1 where a = 1;").Sort().Check(testkit.Rows("1 1 1"))
failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
}
}

Expand Down
95 changes: 48 additions & 47 deletions planner/core/logical_plan_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -4524,53 +4524,6 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as
if tblName.L == "" {
tblName = tn.Name
}
possiblePaths, err := getPossibleAccessPaths(b.ctx, b.TableHints(), tn.IndexHints, tbl, dbName, tblName, b.isForUpdateRead, b.is.SchemaMetaVersion())
if err != nil {
return nil, err
}

if tableInfo.IsView() {
if tn.TableSample != nil {
return nil, expression.ErrInvalidTableSample.GenWithStackByArgs("Unsupported TABLESAMPLE in views")
}

// Get the hints belong to the current view.
currentQBNameMap4View := make(map[string][]ast.HintTable)
currentViewHints := make(map[string][]*ast.TableOptimizerHint)
for qbName, viewQBNameHintTable := range b.hintProcessor.QbNameMap4View {
if len(viewQBNameHintTable) == 0 {
continue
}
viewSelectOffset := b.getSelectOffset()

var viewHintSelectOffset int
if viewQBNameHintTable[0].QBName.L == "" {
// If we do not explicit set the qbName, we will set the empty qb name to @sel_1.
viewHintSelectOffset = 1
} else {
viewHintSelectOffset = b.hintProcessor.GetHintOffset(viewQBNameHintTable[0].QBName, viewSelectOffset)
}

// Check whether the current view can match the view name in the hint.
if viewQBNameHintTable[0].TableName.L == tblName.L && viewHintSelectOffset == viewSelectOffset {
// If the view hint can match the current view, we pop the first view table in the query block hint's table list.
// It means the hint belong the current view, the first view name in hint is matched.
// Because of the nested views, so we should check the left table list in hint when build the data source from the view inside the current view.
currentQBNameMap4View[qbName] = viewQBNameHintTable[1:]
currentViewHints[qbName] = b.hintProcessor.QbHints4View[qbName]
b.hintProcessor.QbNameUsed4View[qbName] = struct{}{}
}
}
return b.BuildDataSourceFromView(ctx, dbName, tableInfo, currentQBNameMap4View, currentViewHints)
}

if tableInfo.IsSequence() {
if tn.TableSample != nil {
return nil, expression.ErrInvalidTableSample.GenWithStackByArgs("Unsupported TABLESAMPLE in sequences")
}
// When the source is a Sequence, we convert it to a TableDual, as what most databases do.
return b.buildTableDual(), nil
}

if tableInfo.GetPartitionInfo() != nil {
// If `UseDynamicPruneMode` already been false, then we don't need to check whether execute `flagPartitionProcessor`
Expand Down Expand Up @@ -4624,6 +4577,54 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as
return nil, ErrPartitionClauseOnNonpartitioned
}

possiblePaths, err := getPossibleAccessPaths(b.ctx, b.TableHints(), tn.IndexHints, tbl, dbName, tblName, b.isForUpdateRead, b.optFlag&flagPartitionProcessor > 0)
if err != nil {
return nil, err
}

if tableInfo.IsView() {
if tn.TableSample != nil {
return nil, expression.ErrInvalidTableSample.GenWithStackByArgs("Unsupported TABLESAMPLE in views")
}

// Get the hints belong to the current view.
currentQBNameMap4View := make(map[string][]ast.HintTable)
currentViewHints := make(map[string][]*ast.TableOptimizerHint)
for qbName, viewQBNameHintTable := range b.hintProcessor.QbNameMap4View {
if len(viewQBNameHintTable) == 0 {
continue
}
viewSelectOffset := b.getSelectOffset()

var viewHintSelectOffset int
if viewQBNameHintTable[0].QBName.L == "" {
// If we do not explicit set the qbName, we will set the empty qb name to @sel_1.
viewHintSelectOffset = 1
} else {
viewHintSelectOffset = b.hintProcessor.GetHintOffset(viewQBNameHintTable[0].QBName, viewSelectOffset)
}

// Check whether the current view can match the view name in the hint.
if viewQBNameHintTable[0].TableName.L == tblName.L && viewHintSelectOffset == viewSelectOffset {
// If the view hint can match the current view, we pop the first view table in the query block hint's table list.
// It means the hint belong the current view, the first view name in hint is matched.
// Because of the nested views, so we should check the left table list in hint when build the data source from the view inside the current view.
currentQBNameMap4View[qbName] = viewQBNameHintTable[1:]
currentViewHints[qbName] = b.hintProcessor.QbHints4View[qbName]
b.hintProcessor.QbNameUsed4View[qbName] = struct{}{}
}
}
return b.BuildDataSourceFromView(ctx, dbName, tableInfo, currentQBNameMap4View, currentViewHints)
}

if tableInfo.IsSequence() {
if tn.TableSample != nil {
return nil, expression.ErrInvalidTableSample.GenWithStackByArgs("Unsupported TABLESAMPLE in sequences")
}
// When the source is a Sequence, we convert it to a TableDual, as what most databases do.
return b.buildTableDual(), nil
}

// remain tikv access path to generate point get acceess path if existed
// see detail in issue: https://github.com/pingcap/tidb/issues/39543
if !(b.isForUpdateRead && b.ctx.GetSessionVars().TxnCtx.IsExplicit) {
Expand Down
Loading

0 comments on commit 5cbf6eb

Please sign in to comment.