diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 973f36fbc6ab3..ed57ee43f6ca0 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -1540,16 +1540,16 @@ func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, } } if candidate.isMatchProp { - if cop.tablePlan != nil && !ds.tableInfo.IsCommonHandle { - col, isNew := cop.tablePlan.(*PhysicalTableScan).appendExtraHandleCol(ds) - cop.extraHandleCol = col - cop.needExtraProj = cop.needExtraProj || isNew - } cop.keepOrder = true // IndexScan on partition table can't keep order. if ds.tableInfo.GetPartitionInfo() != nil { return invalidTask, nil } + if cop.tablePlan != nil && !ds.tableInfo.IsCommonHandle { + col, isNew := cop.tablePlan.(*PhysicalTableScan).appendExtraHandleCol(ds) + cop.extraHandleCol = col + cop.needExtraProj = cop.needExtraProj || isNew + } } if cop.needExtraProj { cop.originSchema = ds.schema diff --git a/planner/core/plan_test.go b/planner/core/plan_test.go index f50a04a796a7e..b3d23a5f9c692 100644 --- a/planner/core/plan_test.go +++ b/planner/core/plan_test.go @@ -1024,6 +1024,17 @@ func TestIssue34863(t *testing.T) { tk.MustQuery("select count(o.c_id) from c right join o on c.c_id=o.c_id;").Check(testkit.Rows("5")) } +func TestIssue40857(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + tk.MustExec("drop table if exists t;") + tk.MustExec("CREATE TABLE t (c1 mediumint(9) DEFAULT '-4747160',c2 year(4) NOT NULL DEFAULT '2075',c3 double DEFAULT '1.1559030660251948',c4 enum('wbv4','eli','d8ym','m3gsx','lz7td','o','d1k7l','y1x','xcxq','bj','n7') DEFAULT 'xcxq',c5 int(11) DEFAULT '255080866',c6 tinyint(1) DEFAULT '1',PRIMARY KEY (c2),KEY `c4d86d54-091c-4307-957b-b164c9652b7f` (c6,c4) );") + tk.MustExec("insert into t values (-4747160, 2075, 722.5719203870632, 'xcxq', 1576824797, 1);") + tk.MustExec("select /*+ stream_agg() */ bit_or(t.c5) as r0 from t where t.c3 in (select c6 from t where not(t.c6 <> 1) and not(t.c3 in(9263.749352636818))) group by t.c1;") + require.Empty(t, tk.Session().LastMessage()) +} + func TestCloneFineGrainedShuffleStreamCount(t *testing.T) { window := &core.PhysicalWindow{} newPlan, err := window.Clone() diff --git a/planner/core/task.go b/planner/core/task.go index cfdadc3d29e05..c4756deced98c 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1918,9 +1918,9 @@ func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task { t := tasks[0].copy() if cop, ok := t.(*copTask); ok { // We should not push agg down across double read, since the data of second read is ordered by handle instead of index. - // The `extraHandleCol` is added if the double read needs to keep order. So we just use it to decided - // whether the following plan is double read with order reserved. - if cop.extraHandleCol != nil || len(cop.rootTaskConds) > 0 || len(cop.idxMergePartPlans) > 0 { + // We use (cop.indexPlan != nil && cop.tablePlan != nil && cop.keepOrder) to decided whether the following plan is double + // read with order reserved. + if (cop.indexPlan != nil && cop.tablePlan != nil && cop.keepOrder) || len(cop.rootTaskConds) > 0 || len(cop.idxMergePartPlans) > 0 { t = cop.convertToRootTask(p.ctx) attachPlan2Task(p, t) } else {