From 993892861a53dd817d68898157aec090752636ae Mon Sep 17 00:00:00 2001 From: Yiding Cui Date: Wed, 4 Jan 2023 09:18:15 +0000 Subject: [PATCH 1/5] planner, executor: split the range for unsigned pk of partition table when limit is used --- executor/partition_table_test.go | 11 +++++++++++ executor/table_reader.go | 5 +++++ planner/core/task.go | 5 ++++- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index 5696b56f6f730..79e710d7e2e5b 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -637,6 +637,17 @@ func TestOrderByandLimit(t *testing.T) { } } +func TestOrderByOnUnsignedPk(t *testing.T) { + store := testkit.CreateMockStore(t) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table tunsigned_hash(a bigint unsigned primary key) partition by hash(a) partitions 6") + tk.MustExec("insert into tunsigned_hash values(25), (9279808998424041135)") + tk.MustQuery("select min(a) from tunsigned_hash").Check(testkit.Rows("25")) + tk.MustQuery("select max(a) from tunsigned_hash").Check(testkit.Rows("9279808998424041135")) +} + func TestBatchGetandPointGetwithHashPartition(t *testing.T) { store := testkit.CreateMockStore(t) diff --git a/executor/table_reader.go b/executor/table_reader.go index 984212dcf7328..68079e00346e5 100644 --- a/executor/table_reader.go +++ b/executor/table_reader.go @@ -17,6 +17,7 @@ package executor import ( "bytes" "context" + "fmt" "time" "github.com/opentracing/opentracing-go" @@ -40,6 +41,7 @@ import ( "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/stringutil" "github.com/pingcap/tipb/go-tipb" + "go.uber.org/zap" "golang.org/x/exp/slices" ) @@ -186,6 +188,9 @@ func (e *TableReaderExecutor) Open(ctx context.Context) error { } firstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle) + if !e.ctx.GetSessionVars().InRestrictedSQL { + logutil.BgLogger().Warn("build table ranges", zap.String("first part", fmt.Sprintf("%v", firstPartRanges)), zap.String("second part", fmt.Sprintf("%v", secondPartRanges))) + } // Treat temporary table as dummy table, avoid sending distsql request to TiKV. // Calculate the kv ranges here, UnionScan rely on this kv ranges. // cached table and temporary table are similar diff --git a/planner/core/task.go b/planner/core/task.go index c595880c9a37b..5aeee5d4dd7a6 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1075,7 +1075,8 @@ func (p *PhysicalTopN) pushTopNDownToDynamicPartition(copTsk *copTask) (task, bo if !propMatched { return nil, false } - + // SplitRangesAcrossInt64Boundary needs the KeepOrder flag. See that func for more details. + idxScan.KeepOrder = true idxScan.Desc = isDesc childProfile := copTsk.plan().statsInfo() newCount := p.Offset + p.Count @@ -1103,6 +1104,8 @@ func (p *PhysicalTopN) pushTopNDownToDynamicPartition(copTsk *copTask) (task, bo } } tblScan.Desc = isDesc + // SplitRangesAcrossInt64Boundary needs the KeepOrder flag. See that func for more details. + tblScan.KeepOrder = true childProfile := copTsk.plan().statsInfo() newCount := p.Offset + p.Count stats := deriveLimitStats(childProfile, float64(newCount)) From d50bb7cf743265f59f9eff41668243afd18b5a40 Mon Sep 17 00:00:00 2001 From: Yiding Cui Date: Wed, 4 Jan 2023 09:24:38 +0000 Subject: [PATCH 2/5] remove debug log and add comments --- executor/table_reader.go | 5 ----- planner/core/task.go | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/executor/table_reader.go b/executor/table_reader.go index 68079e00346e5..984212dcf7328 100644 --- a/executor/table_reader.go +++ b/executor/table_reader.go @@ -17,7 +17,6 @@ package executor import ( "bytes" "context" - "fmt" "time" "github.com/opentracing/opentracing-go" @@ -41,7 +40,6 @@ import ( "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/stringutil" "github.com/pingcap/tipb/go-tipb" - "go.uber.org/zap" "golang.org/x/exp/slices" ) @@ -188,9 +186,6 @@ func (e *TableReaderExecutor) Open(ctx context.Context) error { } firstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle) - if !e.ctx.GetSessionVars().InRestrictedSQL { - logutil.BgLogger().Warn("build table ranges", zap.String("first part", fmt.Sprintf("%v", firstPartRanges)), zap.String("second part", fmt.Sprintf("%v", secondPartRanges))) - } // Treat temporary table as dummy table, avoid sending distsql request to TiKV. // Calculate the kv ranges here, UnionScan rely on this kv ranges. // cached table and temporary table are similar diff --git a/planner/core/task.go b/planner/core/task.go index 5aeee5d4dd7a6..4613017be8423 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1104,7 +1104,7 @@ func (p *PhysicalTopN) pushTopNDownToDynamicPartition(copTsk *copTask) (task, bo } } tblScan.Desc = isDesc - // SplitRangesAcrossInt64Boundary needs the KeepOrder flag. See that func for more details. + // SplitRangesAcrossInt64Boundary needs the KeepOrder flag. See that func and the struct tableResultHandler for more details. tblScan.KeepOrder = true childProfile := copTsk.plan().statsInfo() newCount := p.Offset + p.Count From 5dd10adae62e36ba3ab4609e4070319433fcb89a Mon Sep 17 00:00:00 2001 From: Yiding Cui Date: Wed, 4 Jan 2023 18:04:35 +0000 Subject: [PATCH 3/5] one way to fix --- executor/builder.go | 2 +- executor/table_reader.go | 10 +++++----- planner/cascades/implementation_rules.go | 2 +- planner/core/exhaust_physical_plans.go | 8 ++++++-- planner/core/explain.go | 2 +- planner/core/find_best_task.go | 22 +++++++++++++++++++--- planner/core/initialize.go | 2 +- planner/core/physical_plans.go | 2 +- planner/core/plan_to_pb.go | 2 +- planner/core/task.go | 4 +--- 10 files changed, 37 insertions(+), 19 deletions(-) diff --git a/executor/builder.go b/executor/builder.go index 137fca5f0b8f5..39b57f530c94e 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -4325,7 +4325,7 @@ func (builder *dataReaderBuilder) buildTableReaderBase(ctx context.Context, e *T SetDAGRequest(e.dagPB). SetStartTS(startTS). SetDesc(e.desc). - SetKeepOrder(e.keepOrder). + SetKeepOrder(e.keepOrder == plannercore.KeepOrderBetweenRequest). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). diff --git a/executor/table_reader.go b/executor/table_reader.go index 984212dcf7328..bf774d73b5406 100644 --- a/executor/table_reader.go +++ b/executor/table_reader.go @@ -103,7 +103,7 @@ type TableReaderExecutor struct { memTracker *memory.Tracker selectResultHook // for testing - keepOrder bool + keepOrder plannercore.KeepOrderTypeForTableReader desc bool paging bool storeType kv.StoreType @@ -184,7 +184,7 @@ func (e *TableReaderExecutor) Open(ctx context.Context) error { e.feedback.Invalidate() } } - firstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle) + firstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges, e.keepOrder != plannercore.NoOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle) // Treat temporary table as dummy table, avoid sending distsql request to TiKV. // Calculate the kv ranges here, UnionScan rely on this kv ranges. @@ -343,7 +343,7 @@ func (e *TableReaderExecutor) buildKVReqSeparately(ctx context.Context, ranges [ SetDAGRequest(e.dagPB). SetStartTS(e.startTS). SetDesc(e.desc). - SetKeepOrder(e.keepOrder). + SetKeepOrder(e.keepOrder == plannercore.KeepOrderBetweenRequest). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -384,7 +384,7 @@ func (e *TableReaderExecutor) buildKVReqForPartitionTableScan(ctx context.Contex SetDAGRequest(e.dagPB). SetStartTS(e.startTS). SetDesc(e.desc). - SetKeepOrder(e.keepOrder). + SetKeepOrder(e.keepOrder == plannercore.KeepOrderBetweenRequest). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -432,7 +432,7 @@ func (e *TableReaderExecutor) buildKVReq(ctx context.Context, ranges []*ranger.R SetDAGRequest(e.dagPB). SetStartTS(e.startTS). SetDesc(e.desc). - SetKeepOrder(e.keepOrder). + SetKeepOrder(e.keepOrder == plannercore.KeepOrderBetweenRequest). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). diff --git a/planner/cascades/implementation_rules.go b/planner/cascades/implementation_rules.go index a1d8ff0ab4c1c..94abb856a0513 100644 --- a/planner/cascades/implementation_rules.go +++ b/planner/cascades/implementation_rules.go @@ -198,7 +198,7 @@ func (*ImplTableScan) OnImplement(expr *memo.GroupExpr, reqProp *property.Physic logicalScan := expr.ExprNode.(*plannercore.LogicalTableScan) ts := logicalScan.GetPhysicalScan(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt)) if !reqProp.IsSortItemEmpty() { - ts.KeepOrder = true + ts.KeepOrder = plannercore.KeepOrderBetweenRequest ts.Desc = reqProp.SortItems[0].Desc } tblCols, tblColHists := logicalScan.Source.TblCols, logicalScan.Source.TblColHists diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index dfd8d9572db81..88f4e162c44b4 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -983,6 +983,10 @@ func (p *LogicalJoin) constructInnerTableScanTask( if keepOrder && ds.tableInfo.GetPartitionInfo() != nil { return nil } + var keepOrderStatus = NoOrder + if keepOrder { + keepOrderStatus = KeepOrderBetweenRequest + } ts := PhysicalTableScan{ Table: ds.tableInfo, Columns: ds.Columns, @@ -991,7 +995,7 @@ func (p *LogicalJoin) constructInnerTableScanTask( filterCondition: ds.pushedDownConds, Ranges: ranges, rangeInfo: rangeInfo, - KeepOrder: keepOrder, + KeepOrder: keepOrderStatus, Desc: desc, physicalTableID: ds.physicalTableID, isPartition: ds.isPartition, @@ -1025,7 +1029,7 @@ func (p *LogicalJoin) constructInnerTableScanTask( tablePlan: ts, indexPlanFinished: true, tblColHists: ds.TblColHists, - keepOrder: ts.KeepOrder, + keepOrder: ts.KeepOrder == KeepOrderBetweenRequest, } copTask.partitionInfo = PartitionInfo{ PruningConds: ds.allConds, diff --git a/planner/core/explain.go b/planner/core/explain.go index 16140495de3e7..7ccba1eb2f26d 100644 --- a/planner/core/explain.go +++ b/planner/core/explain.go @@ -205,7 +205,7 @@ func (p *PhysicalTableScan) OperatorInfo(normalized bool) string { } } buffer.WriteString("keep order:") - buffer.WriteString(strconv.FormatBool(p.KeepOrder)) + buffer.WriteString(strconv.FormatBool(p.KeepOrder == KeepOrderBetweenRequest)) if p.Desc { buffer.WriteString(", desc") } diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index aff1c29997fbd..a658e7e0e6833 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -42,6 +42,22 @@ import ( "go.uber.org/zap" ) +// We need three status for the KEEP ORDER of the table reader. +// It's very strange problem. +// At the very beginning time, TiKV stores the unsigned pk at a wrong order: [MaxInt64+1, MaxUint64] is the beginning, then following [0, MaxInt64]. +// So if we just directly KEEP ORDER, we will get data of [MaxInt64+1, MaxUint64] first, then [0, MaxInt64]. +// We need to specially handle this case. +// And then we have the func pushTopNDownToDynamicPartition. It pushes part of the order property down to the partition table. +// It does not need KEEP ORDER between the request. But only need to nodify that each request itself should read the data in correct order. +// So we introduce the third type KeepOrderInRequest. +type KeepOrderTypeForTableReader uint + +const ( + NoOrder KeepOrderTypeForTableReader = iota + KeepOrderInRequest + KeepOrderBetweenRequest +) + const ( // SelectionFactor is the default factor of the selectivity. // For example, If we have no idea how to estimate the selectivity @@ -1976,7 +1992,7 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid return invalidTask, nil } ts, _ := ds.getOriginalPhysicalTableScan(prop, candidate.path, candidate.isMatchProp) - if ts.KeepOrder && ts.StoreType == kv.TiFlash && (ts.Desc || ds.SCtx().GetSessionVars().TiFlashFastScan) { + if ts.KeepOrder != NoOrder && ts.StoreType == kv.TiFlash && (ts.Desc || ds.SCtx().GetSessionVars().TiFlashFastScan) { // TiFlash fast mode(https://github.com/pingcap/tidb/pull/35851) does not keep order in TableScan return invalidTask, nil } @@ -1998,7 +2014,7 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid isDisaggregatedTiFlashPath := config.GetGlobalConfig().DisaggregatedTiFlash && ts.StoreType == kv.TiFlash canMppConvertToRootForDisaggregatedTiFlash := isDisaggregatedTiFlashPath && prop.TaskTp == property.RootTaskType && ds.SCtx().GetSessionVars().IsMPPAllowed() if prop.TaskTp == property.MppTaskType || canMppConvertToRootForDisaggregatedTiFlash { - if ts.KeepOrder { + if ts.KeepOrder != NoOrder { return invalidTask, nil } if prop.MPPPartitionTp != property.AnyType || ts.isPartition { @@ -2352,7 +2368,7 @@ func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProper ts.stats = ds.tableStats.ScaleByExpectCnt(rowCount) if isMatchProp { ts.Desc = prop.SortItems[0].Desc - ts.KeepOrder = true + ts.KeepOrder = KeepOrderBetweenRequest } return ts, rowCount } diff --git a/planner/core/initialize.go b/planner/core/initialize.go index 4a096b7b49204..ffe8ca9affbae 100644 --- a/planner/core/initialize.go +++ b/planner/core/initialize.go @@ -427,7 +427,7 @@ func (p *PhysicalTableReader) adjustReadReqType(ctx sessionctx.Context) { // and all table scans contained are not keepOrder, try to use batch cop. if len(tableScans) > 0 { for _, tableScan := range tableScans { - if tableScan.KeepOrder { + if !(tableScan.KeepOrder == NoOrder) { return } } diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index 537b0826eb381..3d6f971f9c5f6 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -820,7 +820,7 @@ type PhysicalTableScan struct { // works on the whole partition table, and `isPartition` is not used. isPartition bool // KeepOrder is true, if sort data by scanning pkcol, - KeepOrder bool + KeepOrder KeepOrderTypeForTableReader Desc bool isChildOfIndexLookUp bool diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go index 71a7f11c09a65..f184d8c611751 100644 --- a/planner/core/plan_to_pb.go +++ b/planner/core/plan_to_pb.go @@ -187,7 +187,7 @@ func (p *PhysicalTableScan) ToPB(ctx sessionctx.Context, storeType kv.StoreType) } tsExec := tables.BuildTableScanFromInfos(p.Table, p.Columns) tsExec.Desc = p.Desc - keepOrder := p.KeepOrder + keepOrder := !(p.KeepOrder == NoOrder) tsExec.KeepOrder = &keepOrder tsExec.IsFastScan = &(ctx.GetSessionVars().TiFlashFastScan) diff --git a/planner/core/task.go b/planner/core/task.go index 4613017be8423..af2eb3781898c 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1075,8 +1075,6 @@ func (p *PhysicalTopN) pushTopNDownToDynamicPartition(copTsk *copTask) (task, bo if !propMatched { return nil, false } - // SplitRangesAcrossInt64Boundary needs the KeepOrder flag. See that func for more details. - idxScan.KeepOrder = true idxScan.Desc = isDesc childProfile := copTsk.plan().statsInfo() newCount := p.Offset + p.Count @@ -1105,7 +1103,7 @@ func (p *PhysicalTopN) pushTopNDownToDynamicPartition(copTsk *copTask) (task, bo } tblScan.Desc = isDesc // SplitRangesAcrossInt64Boundary needs the KeepOrder flag. See that func and the struct tableResultHandler for more details. - tblScan.KeepOrder = true + tblScan.KeepOrder = KeepOrderInRequest childProfile := copTsk.plan().statsInfo() newCount := p.Offset + p.Count stats := deriveLimitStats(childProfile, float64(newCount)) From 54fa827db94287b828e1223546dba37fbf24921d Mon Sep 17 00:00:00 2001 From: Yiding Cui Date: Fri, 6 Jan 2023 07:48:30 +0000 Subject: [PATCH 4/5] Revert "one way to fix" This reverts commit 5dd10adae62e36ba3ab4609e4070319433fcb89a. --- executor/builder.go | 2 +- executor/table_reader.go | 10 +++++----- planner/cascades/implementation_rules.go | 2 +- planner/core/exhaust_physical_plans.go | 8 ++------ planner/core/explain.go | 2 +- planner/core/find_best_task.go | 22 +++------------------- planner/core/initialize.go | 2 +- planner/core/physical_plans.go | 2 +- planner/core/plan_to_pb.go | 2 +- planner/core/task.go | 4 +++- 10 files changed, 19 insertions(+), 37 deletions(-) diff --git a/executor/builder.go b/executor/builder.go index 39b57f530c94e..137fca5f0b8f5 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -4325,7 +4325,7 @@ func (builder *dataReaderBuilder) buildTableReaderBase(ctx context.Context, e *T SetDAGRequest(e.dagPB). SetStartTS(startTS). SetDesc(e.desc). - SetKeepOrder(e.keepOrder == plannercore.KeepOrderBetweenRequest). + SetKeepOrder(e.keepOrder). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). diff --git a/executor/table_reader.go b/executor/table_reader.go index bf774d73b5406..984212dcf7328 100644 --- a/executor/table_reader.go +++ b/executor/table_reader.go @@ -103,7 +103,7 @@ type TableReaderExecutor struct { memTracker *memory.Tracker selectResultHook // for testing - keepOrder plannercore.KeepOrderTypeForTableReader + keepOrder bool desc bool paging bool storeType kv.StoreType @@ -184,7 +184,7 @@ func (e *TableReaderExecutor) Open(ctx context.Context) error { e.feedback.Invalidate() } } - firstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges, e.keepOrder != plannercore.NoOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle) + firstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle) // Treat temporary table as dummy table, avoid sending distsql request to TiKV. // Calculate the kv ranges here, UnionScan rely on this kv ranges. @@ -343,7 +343,7 @@ func (e *TableReaderExecutor) buildKVReqSeparately(ctx context.Context, ranges [ SetDAGRequest(e.dagPB). SetStartTS(e.startTS). SetDesc(e.desc). - SetKeepOrder(e.keepOrder == plannercore.KeepOrderBetweenRequest). + SetKeepOrder(e.keepOrder). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -384,7 +384,7 @@ func (e *TableReaderExecutor) buildKVReqForPartitionTableScan(ctx context.Contex SetDAGRequest(e.dagPB). SetStartTS(e.startTS). SetDesc(e.desc). - SetKeepOrder(e.keepOrder == plannercore.KeepOrderBetweenRequest). + SetKeepOrder(e.keepOrder). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -432,7 +432,7 @@ func (e *TableReaderExecutor) buildKVReq(ctx context.Context, ranges []*ranger.R SetDAGRequest(e.dagPB). SetStartTS(e.startTS). SetDesc(e.desc). - SetKeepOrder(e.keepOrder == plannercore.KeepOrderBetweenRequest). + SetKeepOrder(e.keepOrder). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). diff --git a/planner/cascades/implementation_rules.go b/planner/cascades/implementation_rules.go index 94abb856a0513..a1d8ff0ab4c1c 100644 --- a/planner/cascades/implementation_rules.go +++ b/planner/cascades/implementation_rules.go @@ -198,7 +198,7 @@ func (*ImplTableScan) OnImplement(expr *memo.GroupExpr, reqProp *property.Physic logicalScan := expr.ExprNode.(*plannercore.LogicalTableScan) ts := logicalScan.GetPhysicalScan(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt)) if !reqProp.IsSortItemEmpty() { - ts.KeepOrder = plannercore.KeepOrderBetweenRequest + ts.KeepOrder = true ts.Desc = reqProp.SortItems[0].Desc } tblCols, tblColHists := logicalScan.Source.TblCols, logicalScan.Source.TblColHists diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 88f4e162c44b4..dfd8d9572db81 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -983,10 +983,6 @@ func (p *LogicalJoin) constructInnerTableScanTask( if keepOrder && ds.tableInfo.GetPartitionInfo() != nil { return nil } - var keepOrderStatus = NoOrder - if keepOrder { - keepOrderStatus = KeepOrderBetweenRequest - } ts := PhysicalTableScan{ Table: ds.tableInfo, Columns: ds.Columns, @@ -995,7 +991,7 @@ func (p *LogicalJoin) constructInnerTableScanTask( filterCondition: ds.pushedDownConds, Ranges: ranges, rangeInfo: rangeInfo, - KeepOrder: keepOrderStatus, + KeepOrder: keepOrder, Desc: desc, physicalTableID: ds.physicalTableID, isPartition: ds.isPartition, @@ -1029,7 +1025,7 @@ func (p *LogicalJoin) constructInnerTableScanTask( tablePlan: ts, indexPlanFinished: true, tblColHists: ds.TblColHists, - keepOrder: ts.KeepOrder == KeepOrderBetweenRequest, + keepOrder: ts.KeepOrder, } copTask.partitionInfo = PartitionInfo{ PruningConds: ds.allConds, diff --git a/planner/core/explain.go b/planner/core/explain.go index 7ccba1eb2f26d..16140495de3e7 100644 --- a/planner/core/explain.go +++ b/planner/core/explain.go @@ -205,7 +205,7 @@ func (p *PhysicalTableScan) OperatorInfo(normalized bool) string { } } buffer.WriteString("keep order:") - buffer.WriteString(strconv.FormatBool(p.KeepOrder == KeepOrderBetweenRequest)) + buffer.WriteString(strconv.FormatBool(p.KeepOrder)) if p.Desc { buffer.WriteString(", desc") } diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index a658e7e0e6833..aff1c29997fbd 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -42,22 +42,6 @@ import ( "go.uber.org/zap" ) -// We need three status for the KEEP ORDER of the table reader. -// It's very strange problem. -// At the very beginning time, TiKV stores the unsigned pk at a wrong order: [MaxInt64+1, MaxUint64] is the beginning, then following [0, MaxInt64]. -// So if we just directly KEEP ORDER, we will get data of [MaxInt64+1, MaxUint64] first, then [0, MaxInt64]. -// We need to specially handle this case. -// And then we have the func pushTopNDownToDynamicPartition. It pushes part of the order property down to the partition table. -// It does not need KEEP ORDER between the request. But only need to nodify that each request itself should read the data in correct order. -// So we introduce the third type KeepOrderInRequest. -type KeepOrderTypeForTableReader uint - -const ( - NoOrder KeepOrderTypeForTableReader = iota - KeepOrderInRequest - KeepOrderBetweenRequest -) - const ( // SelectionFactor is the default factor of the selectivity. // For example, If we have no idea how to estimate the selectivity @@ -1992,7 +1976,7 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid return invalidTask, nil } ts, _ := ds.getOriginalPhysicalTableScan(prop, candidate.path, candidate.isMatchProp) - if ts.KeepOrder != NoOrder && ts.StoreType == kv.TiFlash && (ts.Desc || ds.SCtx().GetSessionVars().TiFlashFastScan) { + if ts.KeepOrder && ts.StoreType == kv.TiFlash && (ts.Desc || ds.SCtx().GetSessionVars().TiFlashFastScan) { // TiFlash fast mode(https://github.com/pingcap/tidb/pull/35851) does not keep order in TableScan return invalidTask, nil } @@ -2014,7 +1998,7 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid isDisaggregatedTiFlashPath := config.GetGlobalConfig().DisaggregatedTiFlash && ts.StoreType == kv.TiFlash canMppConvertToRootForDisaggregatedTiFlash := isDisaggregatedTiFlashPath && prop.TaskTp == property.RootTaskType && ds.SCtx().GetSessionVars().IsMPPAllowed() if prop.TaskTp == property.MppTaskType || canMppConvertToRootForDisaggregatedTiFlash { - if ts.KeepOrder != NoOrder { + if ts.KeepOrder { return invalidTask, nil } if prop.MPPPartitionTp != property.AnyType || ts.isPartition { @@ -2368,7 +2352,7 @@ func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProper ts.stats = ds.tableStats.ScaleByExpectCnt(rowCount) if isMatchProp { ts.Desc = prop.SortItems[0].Desc - ts.KeepOrder = KeepOrderBetweenRequest + ts.KeepOrder = true } return ts, rowCount } diff --git a/planner/core/initialize.go b/planner/core/initialize.go index ffe8ca9affbae..4a096b7b49204 100644 --- a/planner/core/initialize.go +++ b/planner/core/initialize.go @@ -427,7 +427,7 @@ func (p *PhysicalTableReader) adjustReadReqType(ctx sessionctx.Context) { // and all table scans contained are not keepOrder, try to use batch cop. if len(tableScans) > 0 { for _, tableScan := range tableScans { - if !(tableScan.KeepOrder == NoOrder) { + if tableScan.KeepOrder { return } } diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index 3d6f971f9c5f6..537b0826eb381 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -820,7 +820,7 @@ type PhysicalTableScan struct { // works on the whole partition table, and `isPartition` is not used. isPartition bool // KeepOrder is true, if sort data by scanning pkcol, - KeepOrder KeepOrderTypeForTableReader + KeepOrder bool Desc bool isChildOfIndexLookUp bool diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go index f184d8c611751..71a7f11c09a65 100644 --- a/planner/core/plan_to_pb.go +++ b/planner/core/plan_to_pb.go @@ -187,7 +187,7 @@ func (p *PhysicalTableScan) ToPB(ctx sessionctx.Context, storeType kv.StoreType) } tsExec := tables.BuildTableScanFromInfos(p.Table, p.Columns) tsExec.Desc = p.Desc - keepOrder := !(p.KeepOrder == NoOrder) + keepOrder := p.KeepOrder tsExec.KeepOrder = &keepOrder tsExec.IsFastScan = &(ctx.GetSessionVars().TiFlashFastScan) diff --git a/planner/core/task.go b/planner/core/task.go index a2494e7da4d44..86a7ec4c9d4d1 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1072,6 +1072,8 @@ func (p *PhysicalTopN) pushTopNDownToDynamicPartition(copTsk *copTask) (task, bo if !propMatched { return nil, false } + // SplitRangesAcrossInt64Boundary needs the KeepOrder flag. See that func for more details. + idxScan.KeepOrder = true idxScan.Desc = isDesc childProfile := copTsk.plan().statsInfo() newCount := p.Offset + p.Count @@ -1100,7 +1102,7 @@ func (p *PhysicalTopN) pushTopNDownToDynamicPartition(copTsk *copTask) (task, bo } tblScan.Desc = isDesc // SplitRangesAcrossInt64Boundary needs the KeepOrder flag. See that func and the struct tableResultHandler for more details. - tblScan.KeepOrder = KeepOrderInRequest + tblScan.KeepOrder = true childProfile := copTsk.plan().statsInfo() newCount := p.Offset + p.Count stats := deriveLimitStats(childProfile, float64(newCount)) From 0ada06e4f2ad8ac81a1d75bd861d47294fd8fd4b Mon Sep 17 00:00:00 2001 From: Yiding Cui Date: Fri, 6 Jan 2023 07:49:58 +0000 Subject: [PATCH 5/5] only need to set table scan's keep order --- planner/core/task.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/planner/core/task.go b/planner/core/task.go index 86a7ec4c9d4d1..d68806c5ef1d8 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1072,8 +1072,6 @@ func (p *PhysicalTopN) pushTopNDownToDynamicPartition(copTsk *copTask) (task, bo if !propMatched { return nil, false } - // SplitRangesAcrossInt64Boundary needs the KeepOrder flag. See that func for more details. - idxScan.KeepOrder = true idxScan.Desc = isDesc childProfile := copTsk.plan().statsInfo() newCount := p.Offset + p.Count