Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -380,10 +380,11 @@ public Cost visitPhysicalHashAggregate(
exprCost / 100 + inputStatistics.getRowCount() / beNumber,
inputStatistics.getRowCount() / beNumber, 0);
} else {
int factor = aggregate.getGroupByExpressions().isEmpty() ? 1 : beNumber;
// global
return Cost.of(context.getSessionVariable(), exprCost / 100
+ inputStatistics.getRowCount(),
inputStatistics.getRowCount(), 0);
return Cost.of(context.getSessionVariable(),
exprCost / 100 + inputStatistics.getRowCount() / factor,
inputStatistics.getRowCount() / factor, 0);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -244,6 +244,7 @@
import java.util.Objects;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import java.util.stream.Stream;

Expand Down Expand Up @@ -1182,6 +1183,7 @@ public PlanFragment visitPhysicalHashAggregate(
// 2. collect agg expressions and generate agg function to slot reference map
List<Slot> aggFunctionOutput = Lists.newArrayList();
ArrayList<FunctionCallExpr> execAggregateFunctions = Lists.newArrayListWithCapacity(outputExpressions.size());
AtomicBoolean hasPartialInAggFunc = new AtomicBoolean(false);
for (NamedExpression o : outputExpressions) {
if (o.containsType(AggregateExpression.class)) {
aggFunctionOutput.add(o.toSlot());
Expand All @@ -1191,10 +1193,16 @@ public PlanFragment visitPhysicalHashAggregate(
execAggregateFunctions.add(
(FunctionCallExpr) ExpressionTranslator.translate((AggregateExpression) c, context)
);
hasPartialInAggFunc.set(
((AggregateExpression) c).getAggregateParam().aggMode.productAggregateBuffer);
}
});
}
}
// An agg may have different functions, some product buffer, some product result.
// The criterion for passing it to the be stage is: as long as there is a product buffer function in agg,
// it must be isPartial
boolean isPartial = hasPartialInAggFunc.get();

// 3. generate output tuple
List<Slot> slotList = Lists.newArrayList();
Expand All @@ -1211,7 +1219,6 @@ public PlanFragment visitPhysicalHashAggregate(
aggFunOutputIds.add(slots.get(i).getId().asInt());
}
}
boolean isPartial = aggregate.getAggregateParam().aggMode.productAggregateBuffer;
AggregateInfo aggInfo = AggregateInfo.create(execGroupingExpressions, execAggregateFunctions,
aggFunOutputIds, isPartial, outputTupleDesc, aggregate.getAggPhase().toExec());
AggregationNode aggregationNode = new AggregationNode(context.nextPlanNodeId(),
Expand All @@ -1221,7 +1228,7 @@ public PlanFragment visitPhysicalHashAggregate(

aggregationNode.setNereidsId(aggregate.getId());
context.getNereidsIdToPlanNodeIdMap().put(aggregate.getId(), aggregationNode.getId());
if (!aggregate.getAggMode().isFinalPhase) {
if (isPartial) {
aggregationNode.unsetNeedsFinalize();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,8 @@ public void execute() {
// if break when running the loop above, the condition must be false.
if (curChildIndex == groupExpression.arity()) {
if (!calculateEnforce(requestChildrenProperties, outputChildrenProperties)) {
return; // if error exists, return
clear();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is a bug in base code too? we should fix it in a seperate PR and add some UT and regression case for it

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1

continue; // if error exists, return
}
if (curTotalCost.getValue() < context.getCostUpperBound()) {
context.setCostUpperBound(curTotalCost.getValue());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@
import org.apache.doris.nereids.rules.rewrite.CreatePartitionTopNFromWindow;
import org.apache.doris.nereids.rules.rewrite.DecoupleEncodeDecode;
import org.apache.doris.nereids.rules.rewrite.DeferMaterializeTopNResult;
import org.apache.doris.nereids.rules.rewrite.DistinctAggStrategySelector;
import org.apache.doris.nereids.rules.rewrite.DistinctAggregateRewriter;
import org.apache.doris.nereids.rules.rewrite.DistinctWindowExpression;
import org.apache.doris.nereids.rules.rewrite.EliminateAggCaseWhen;
import org.apache.doris.nereids.rules.rewrite.EliminateAggregate;
Expand Down Expand Up @@ -153,7 +155,6 @@
import org.apache.doris.nereids.rules.rewrite.SimplifyWindowExpression;
import org.apache.doris.nereids.rules.rewrite.SkewJoin;
import org.apache.doris.nereids.rules.rewrite.SplitLimit;
import org.apache.doris.nereids.rules.rewrite.SplitMultiDistinct;
import org.apache.doris.nereids.rules.rewrite.SumLiteralRewrite;
import org.apache.doris.nereids.rules.rewrite.TransposeSemiJoinAgg;
import org.apache.doris.nereids.rules.rewrite.TransposeSemiJoinAggProject;
Expand Down Expand Up @@ -762,6 +763,7 @@ public class Rewriter extends AbstractBatchJobExecutor {
new PushDownFilterThroughProject(),
new MergeProjectable()
)),
topDown(DistinctAggregateRewriter.INSTANCE),
custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new),
topDown(new PushDownVectorTopNIntoOlapScan()),
topDown(new PushDownVirtualColumnsIntoOlapScan()),
Expand Down Expand Up @@ -881,8 +883,9 @@ private static List<RewriteJob> getWholeTreeRewriteJobs(
rewriteJobs.addAll(jobs(topic("or expansion",
custom(RuleType.OR_EXPANSION, () -> OrExpansion.INSTANCE))));
}

rewriteJobs.addAll(jobs(topic("split multi distinct",
custom(RuleType.SPLIT_MULTI_DISTINCT, () -> SplitMultiDistinct.INSTANCE))));
custom(RuleType.DISTINCT_AGG_STRATEGY_SELECTOR, () -> DistinctAggStrategySelector.INSTANCE))));

if (needSubPathPushDown) {
rewriteJobs.addAll(jobs(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import org.apache.doris.nereids.properties.LogicalProperties;
import org.apache.doris.nereids.properties.PhysicalProperties;
import org.apache.doris.nereids.properties.RequestPropertyDeriver;
import org.apache.doris.nereids.properties.RequirePropertiesSupplier;
import org.apache.doris.nereids.rules.exploration.mv.AbstractMaterializedViewRule;
import org.apache.doris.nereids.trees.expressions.Expression;
import org.apache.doris.nereids.trees.plans.GroupPlan;
Expand Down Expand Up @@ -1013,7 +1012,6 @@ private List<List<PhysicalProperties>> extractInputProperties(GroupExpression gr
.filter(e -> e.stream().allMatch(PhysicalProperties.ANY::equals))
.findAny();
if (any.isPresent()
&& !(groupExpression.getPlan() instanceof RequirePropertiesSupplier)
&& !(groupExpression.getPlan() instanceof SetOperation)) {
res.clear();
res.add(any.get());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,16 @@
import org.apache.doris.nereids.jobs.JobContext;
import org.apache.doris.nereids.memo.GroupExpression;
import org.apache.doris.nereids.properties.DistributionSpecHash.ShuffleType;
import org.apache.doris.nereids.trees.expressions.AggregateExpression;
import org.apache.doris.nereids.trees.expressions.Alias;
import org.apache.doris.nereids.stats.StatsCalculator;
import org.apache.doris.nereids.trees.expressions.ExprId;
import org.apache.doris.nereids.trees.expressions.Expression;
import org.apache.doris.nereids.trees.expressions.SlotReference;
import org.apache.doris.nereids.trees.expressions.functions.agg.MultiDistinction;
import org.apache.doris.nereids.trees.plans.AggMode;
import org.apache.doris.nereids.trees.plans.GroupPlan;
import org.apache.doris.nereids.trees.plans.JoinType;
import org.apache.doris.nereids.trees.plans.Plan;
import org.apache.doris.nereids.trees.plans.SortPhase;
import org.apache.doris.nereids.trees.plans.physical.AbstractPhysicalSort;
import org.apache.doris.nereids.trees.plans.physical.PhysicalCTEConsumer;
import org.apache.doris.nereids.trees.plans.physical.PhysicalDistribute;
import org.apache.doris.nereids.trees.plans.physical.PhysicalFilter;
import org.apache.doris.nereids.trees.plans.physical.PhysicalHashAggregate;
Expand All @@ -47,26 +45,28 @@
import org.apache.doris.nereids.trees.plans.physical.PhysicalTopN;
import org.apache.doris.nereids.trees.plans.physical.PhysicalUnion;
import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor;
import org.apache.doris.nereids.util.AggregateUtils;
import org.apache.doris.nereids.util.JoinUtils;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.qe.SessionVariable;
import org.apache.doris.statistics.ColumnStatistic;
import org.apache.doris.statistics.Statistics;

import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;

import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;

/**
* ensure child add enough distribute. update children properties if we do regular.
* NOTICE: all visitor should call visit(plan, context) at proper place
* to process must shuffle except project and filter
*/
public class ChildrenPropertiesRegulator extends PlanVisitor<List<List<PhysicalProperties>>, Void> {

private final GroupExpression parent;
private final List<GroupExpression> children;
private final List<PhysicalProperties> originChildrenProperties;
Expand Down Expand Up @@ -110,74 +110,147 @@ public List<List<PhysicalProperties>> visitPhysicalHashAggregate(
if (agg.getGroupByExpressions().isEmpty() && agg.getOutputExpressions().isEmpty()) {
return ImmutableList.of();
}
// If the origin attribute satisfies the group by key but does not meet the requirements, ban the plan.
// e.g. select count(distinct a) from t group by b;
// requiredChildProperty: a
// but the child is already distributed by b
// ban this plan
PhysicalProperties originChildProperty = originChildrenProperties.get(0);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add ut

PhysicalProperties requiredChildProperty = requiredProperties.get(0);
PhysicalProperties hashSpec = PhysicalProperties.createHash(agg.getGroupByExpressions(), ShuffleType.REQUIRE);
GroupExpression child = children.get(0);
if (child.getPlan() instanceof PhysicalDistribute) {
PhysicalProperties properties = new PhysicalProperties(
DistributionSpecAny.INSTANCE, originChildProperty.getOrderSpec());
Optional<Pair<Cost, GroupExpression>> pair = child.getOwnerGroup().getLowestCostPlan(properties);
// add null check
if (!pair.isPresent()) {
return ImmutableList.of();
}
GroupExpression distributeChild = pair.get().second;
PhysicalProperties distributeChildProperties = distributeChild.getOutputProperties(properties);
if (distributeChildProperties.satisfy(hashSpec)
&& !distributeChildProperties.satisfy(requiredChildProperty)) {
return ImmutableList.of();
}
}

if (!agg.getAggregateParam().canBeBanned) {
return visit(agg, context);
}
// forbid one phase agg on distribute
if (agg.getAggMode() == AggMode.INPUT_TO_RESULT && children.get(0).getPlan() instanceof PhysicalDistribute) {
// this means one stage gather agg, usually bad pattern
// return aggBanByStatistics(agg, context);
if (shouldBanOnePhaseAgg(agg, requiredChildProperty)) {
return ImmutableList.of();
}
// process must shuffle
return visit(agg, context);
}

// forbid TWO_PHASE_AGGREGATE_WITH_DISTINCT after shuffle
// TODO: this is forbid good plan after cte reuse by mistake
if (agg.getAggMode() == AggMode.INPUT_TO_BUFFER
&& requiredProperties.get(0).getDistributionSpec() instanceof DistributionSpecHash
&& children.get(0).getPlan() instanceof PhysicalDistribute) {
return ImmutableList.of();
/**
* 1. Generally, one-stage AGG is disabled unless the child of distribute is a CTE consumer.
* 2. If it is a CTE consumer, to avoid being banned, ensure that distribute is not a gather.
* Alternatively, if the distribute is a shuffle, ensure that the shuffle expr is not skewed.
* */
private boolean shouldBanOnePhaseAgg(PhysicalHashAggregate<? extends Plan> aggregate,
PhysicalProperties requiredChildProperty) {
if (banAggUnionAll(aggregate)) {
return true;
}
ConnectContext ctx = ConnectContext.get();
if (ctx != null && ctx.getSessionVariable().aggPhase == 1) {
return false;
}
if (!onePhaseAggWithDistribute(aggregate)) {
return false;
}
if (childIsCTEConsumer()) {
// shape is agg-distribute-CTEConsumer
// distribute is gather
if (requireGather(requiredChildProperty)) {
return true;
}
// group by key is skew
return skewOnShuffleExpr(aggregate);

// agg(group by x)-union all(A, B)
// no matter x.ndv is high or not, it is not worthwhile to shuffle A and B by x
// and hence we forbid one phase agg
if (agg.getAggMode() == AggMode.INPUT_TO_RESULT
&& children.get(0).getPlan() instanceof PhysicalUnion
&& !((PhysicalUnion) children.get(0).getPlan()).isDistinct()) {
return ImmutableList.of();
} else {
return true;
}
// forbid multi distinct opt that bad than multi-stage version when multi-stage can be executed in one fragment
if (agg.getAggMode() == AggMode.INPUT_TO_BUFFER || agg.getAggMode() == AggMode.INPUT_TO_RESULT) {
List<MultiDistinction> multiDistinctions = agg.getOutputExpressions().stream()
.filter(Alias.class::isInstance)
.map(a -> ((Alias) a).child())
.filter(AggregateExpression.class::isInstance)
.map(a -> ((AggregateExpression) a).getFunction())
.filter(MultiDistinction.class::isInstance)
.map(MultiDistinction.class::cast)
.collect(Collectors.toList());
if (multiDistinctions.size() == 1) {
Expression distinctChild = multiDistinctions.get(0).child(0);
DistributionSpec childDistribution = originChildrenProperties.get(0).getDistributionSpec();
if (distinctChild instanceof SlotReference && childDistribution instanceof DistributionSpecHash) {
SlotReference slotReference = (SlotReference) distinctChild;
DistributionSpecHash distributionSpecHash = (DistributionSpecHash) childDistribution;
List<ExprId> groupByColumns = agg.getGroupByExpressions().stream()
.map(SlotReference.class::cast)
.map(SlotReference::getExprId)
.collect(Collectors.toList());
DistributionSpecHash groupByRequire = new DistributionSpecHash(
groupByColumns, ShuffleType.REQUIRE);
List<ExprId> distinctChildColumns = Lists.newArrayList(slotReference.getExprId());
distinctChildColumns.add(slotReference.getExprId());
DistributionSpecHash distinctChildRequire = new DistributionSpecHash(
distinctChildColumns, ShuffleType.REQUIRE);
if ((!groupByColumns.isEmpty() && distributionSpecHash.satisfy(groupByRequire))
|| (groupByColumns.isEmpty() && distributionSpecHash.satisfy(distinctChildRequire))) {
if (!agg.mustUseMultiDistinctAgg()) {
return ImmutableList.of();
}
}
}
// if distinct without group by key, we prefer three or four stage distinct agg
// because the second phase of multi-distinct only have one instance, and it is slow generally.
if (agg.getOutputExpressions().size() == 1 && agg.getGroupByExpressions().isEmpty()
&& !agg.mustUseMultiDistinctAgg()) {
return ImmutableList.of();
}
}

private boolean skewOnShuffleExpr(PhysicalHashAggregate<? extends Plan> agg) {
// if statistic is unknown -> not skew
Statistics aggStatistics = agg.getGroupExpression().get().getOwnerGroup().getStatistics();
Statistics inputStatistics = agg.getGroupExpression().get().childStatistics(0);
if (aggStatistics == null || inputStatistics == null) {
return false;
}
if (AggregateUtils.hasUnknownStatistics(agg.getGroupByExpressions(), inputStatistics)) {
return false;
}
// There are two cases of skew:
double gbyNdv = aggStatistics.getRowCount();
// 1. ndv is very low
if (gbyNdv <= AggregateUtils.LOW_NDV_THRESHOLD) {
return true;
}
// 2. There is a hot value, and the ndv of other keys is very low
return isSkew(agg.getGroupByExpressions(), inputStatistics);
}

// if one group by key has hot value, and others ndv is low -> skew
private boolean isSkew(List<Expression> groupBy, Statistics inputStatistics) {
for (int i = 0; i < groupBy.size(); ++i) {
Expression expr = groupBy.get(i);
ColumnStatistic colStat = inputStatistics.findColumnStatistics(expr);
if (colStat == null) {
continue;
}
if (colStat.getHotValues() == null) {
continue;
}
List<Expression> otherExpr = excludeElement(groupBy, i);
double otherNdv = StatsCalculator.estimateGroupByRowCount(otherExpr, inputStatistics);
if (otherNdv <= AggregateUtils.LOW_NDV_THRESHOLD) {
return true;
}
}
// process must shuffle
return visit(agg, context);
return false;
}

private static <T> List<T> excludeElement(List<T> list, int index) {
List<T> newList = new ArrayList<>();
for (int i = 0; i < list.size(); i++) {
if (index != i) {
newList.add(list.get(i));
}
}
return newList;
}

private boolean onePhaseAggWithDistribute(PhysicalHashAggregate<? extends Plan> aggregate) {
return aggregate.getAggMode() == AggMode.INPUT_TO_RESULT
&& children.get(0).getPlan() instanceof PhysicalDistribute;
}

private boolean childIsCTEConsumer() {
List<GroupExpression> groupExpressions = children.get(0).children().get(0).getPhysicalExpressions();
if (groupExpressions != null && !groupExpressions.isEmpty()) {
return groupExpressions.get(0).getPlan() instanceof PhysicalCTEConsumer;
}
return false;
}

private boolean requireGather(PhysicalProperties requiredChildProperty) {
return requiredChildProperty.getDistributionSpec() instanceof DistributionSpecGather;
}

/* agg(group by x)-union all(A, B)
* no matter x.ndv is high or not, it is not worthwhile to shuffle A and B by x
* and hence we forbid one phase agg */
private boolean banAggUnionAll(PhysicalHashAggregate<? extends Plan> aggregate) {
return aggregate.getAggMode() == AggMode.INPUT_TO_RESULT
&& children.get(0).getPlan() instanceof PhysicalUnion
&& !((PhysicalUnion) children.get(0).getPlan()).isDistinct();
}

@Override
Expand Down
Loading
Loading