diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/CascadesContext.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/CascadesContext.java index 3b9ba912383e2f1..a0d748c08c72d0e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/CascadesContext.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/CascadesContext.java @@ -240,19 +240,11 @@ public void toMemo() { } public Analyzer newAnalyzer() { - return newAnalyzer(false); - } - - public Analyzer newAnalyzer(boolean analyzeView) { - return new Analyzer(this, analyzeView); - } - - public Analyzer newAnalyzer(boolean analyzeView, Optional customTableResolver) { - return new Analyzer(this, analyzeView, customTableResolver); + return newAnalyzer(Optional.empty()); } public Analyzer newAnalyzer(Optional customTableResolver) { - return newAnalyzer(false, customTableResolver); + return new Analyzer(this, customTableResolver); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/AbstractBatchJobExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/AbstractBatchJobExecutor.java index bec86debc9ecac1..4eebf6ffc05f4b5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/AbstractBatchJobExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/AbstractBatchJobExecutor.java @@ -29,6 +29,7 @@ import org.apache.doris.nereids.rules.Rule; import org.apache.doris.nereids.rules.RuleFactory; import org.apache.doris.nereids.rules.RuleType; +import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.visitor.CustomRewriter; import com.google.common.collect.ImmutableList; @@ -36,6 +37,8 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; +import java.util.Set; +import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -46,6 +49,8 @@ * Each batch of rules will be uniformly executed. */ public abstract class AbstractBatchJobExecutor { + private static final ThreadLocal>> NOT_TRAVERSE_CHILDREN = new ThreadLocal(); + private static final Predicate TRAVERSE_ALL_PLANS = plan -> true; protected CascadesContext cascadesContext; @@ -65,6 +70,17 @@ public static List jobs(RewriteJob... jobs) { ).collect(ImmutableList.toImmutableList()); } + /** notTraverseChildrenOf */ + public static T notTraverseChildrenOf( + Set> notTraverseClasses, Supplier action) { + try { + NOT_TRAVERSE_CHILDREN.set((Set) notTraverseClasses); + return action.get(); + } finally { + NOT_TRAVERSE_CHILDREN.remove(); + } + } + public static TopicRewriteJob topic(String topicName, RewriteJob... jobs) { return new TopicRewriteJob(topicName, Arrays.asList(jobs)); } @@ -82,7 +98,7 @@ public static RewriteJob bottomUp(List ruleFactories) { .map(RuleFactory::buildRules) .flatMap(List::stream) .collect(ImmutableList.toImmutableList()); - return new RootPlanTreeRewriteJob(rules, PlanTreeRewriteBottomUpJob::new, true); + return new RootPlanTreeRewriteJob(rules, PlanTreeRewriteBottomUpJob::new, getTraversePredicate(), true); } public static RewriteJob topDown(RuleFactory... ruleFactories) { @@ -98,7 +114,7 @@ public static RewriteJob topDown(List ruleFactories, boolean once) .map(RuleFactory::buildRules) .flatMap(List::stream) .collect(ImmutableList.toImmutableList()); - return new RootPlanTreeRewriteJob(rules, PlanTreeRewriteTopDownJob::new, once); + return new RootPlanTreeRewriteJob(rules, PlanTreeRewriteTopDownJob::new, getTraversePredicate(), once); } public static RewriteJob custom(RuleType ruleType, Supplier planRewriter) { @@ -126,4 +142,24 @@ public void execute() { } public abstract List getJobs(); + + private static Predicate getTraversePredicate() { + Set> notTraverseChildren = NOT_TRAVERSE_CHILDREN.get(); + return notTraverseChildren == null + ? TRAVERSE_ALL_PLANS + : new NotTraverseChildren(notTraverseChildren); + } + + private static class NotTraverseChildren implements Predicate { + private final Set> notTraverseChildren; + + public NotTraverseChildren(Set> notTraverseChildren) { + this.notTraverseChildren = Objects.requireNonNull(notTraverseChildren, "notTraversePlans can not be null"); + } + + @Override + public boolean test(Plan plan) { + return !notTraverseChildren.contains(plan.getClass()); + } + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java index b72240cb8e503e0..4c840dd69d35517 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java @@ -48,6 +48,10 @@ import org.apache.doris.nereids.rules.rewrite.MergeProjects; import org.apache.doris.nereids.rules.rewrite.SemiJoinCommute; import org.apache.doris.nereids.rules.rewrite.SimplifyAggGroupBy; +import org.apache.doris.nereids.trees.plans.logical.LogicalCTEAnchor; +import org.apache.doris.nereids.trees.plans.logical.LogicalView; + +import com.google.common.collect.ImmutableSet; import java.util.List; import java.util.Objects; @@ -59,8 +63,7 @@ */ public class Analyzer extends AbstractBatchJobExecutor { - public static final List DEFAULT_ANALYZE_JOBS = buildAnalyzeJobs(Optional.empty()); - public static final List DEFAULT_ANALYZE_VIEW_JOBS = buildAnalyzeViewJobs(Optional.empty()); + public static final List ANALYZE_JOBS = buildAnalyzeJobs(Optional.empty()); private final List jobs; @@ -69,36 +72,23 @@ public class Analyzer extends AbstractBatchJobExecutor { * @param cascadesContext planner context for execute job */ public Analyzer(CascadesContext cascadesContext) { - this(cascadesContext, false); - } - - public Analyzer(CascadesContext cascadesContext, boolean analyzeView) { - this(cascadesContext, analyzeView, Optional.empty()); + this(cascadesContext, Optional.empty()); } /** * constructor of Analyzer. For view, we only do bind relation since other analyze step will do by outer Analyzer. * * @param cascadesContext current context for analyzer - * @param analyzeView analyze view or user sql. If true, analyzer is used for view. * @param customTableResolver custom resolver for outer catalog. */ - public Analyzer(CascadesContext cascadesContext, boolean analyzeView, - Optional customTableResolver) { + public Analyzer(CascadesContext cascadesContext, Optional customTableResolver) { super(cascadesContext); Objects.requireNonNull(customTableResolver, "customTableResolver cannot be null"); - if (analyzeView) { - if (customTableResolver.isPresent()) { - this.jobs = buildAnalyzeViewJobs(customTableResolver); - } else { - this.jobs = DEFAULT_ANALYZE_VIEW_JOBS; - } + + if (customTableResolver.isPresent()) { + this.jobs = buildAnalyzeJobs(customTableResolver); } else { - if (customTableResolver.isPresent()) { - this.jobs = buildAnalyzeJobs(customTableResolver); - } else { - this.jobs = DEFAULT_ANALYZE_JOBS; - } + this.jobs = ANALYZE_JOBS; } } @@ -114,47 +104,43 @@ public void analyze() { execute(); } - private static List buildAnalyzeViewJobs(Optional customTableResolver) { - return jobs( - topDown(new AnalyzeCTE()), - topDown(new EliminateLogicalSelectHint()), - bottomUp( - new BindRelation(customTableResolver), - new CheckPolicy() - ) + private static List buildAnalyzeJobs(Optional customTableResolver) { + return notTraverseChildrenOf( + ImmutableSet.of(LogicalView.class, LogicalCTEAnchor.class), + () -> buildAnalyzerJobs(customTableResolver) ); } - private static List buildAnalyzeJobs(Optional customTableResolver) { + private static List buildAnalyzerJobs(Optional customTableResolver) { return jobs( // we should eliminate hint before "Subquery unnesting". topDown(new AnalyzeCTE()), topDown(new EliminateLogicalSelectHint()), bottomUp( - new BindRelation(customTableResolver), - new CheckPolicy() + new BindRelation(customTableResolver), + new CheckPolicy() ), bottomUp(new BindExpression()), topDown(new BindSink()), bottomUp(new CheckAfterBind()), bottomUp( - new ProjectToGlobalAggregate(), - // this rule check's the logicalProject node's isDistinct property - // and replace the logicalProject node with a LogicalAggregate node - // so any rule before this, if create a new logicalProject node - // should make sure isDistinct property is correctly passed around. - // please see rule BindSlotReference or BindFunction for example - new EliminateDistinctConstant(), - new ProjectWithDistinctToAggregate(), - new ReplaceExpressionByChildOutput(), - new OneRowRelationExtractAggregate() + new ProjectToGlobalAggregate(), + // this rule check's the logicalProject node's isDistinct property + // and replace the logicalProject node with a LogicalAggregate node + // so any rule before this, if create a new logicalProject node + // should make sure isDistinct property is correctly passed around. + // please see rule BindSlotReference or BindFunction for example + new EliminateDistinctConstant(), + new ProjectWithDistinctToAggregate(), + new ReplaceExpressionByChildOutput(), + new OneRowRelationExtractAggregate() ), topDown( - new FillUpMissingSlots(), - // We should use NormalizeRepeat to compute nullable properties for LogicalRepeat in the analysis - // stage. NormalizeRepeat will compute nullable property, add virtual slot, LogicalAggregate and - // LogicalProject for normalize. This rule depends on FillUpMissingSlots to fill up slots. - new NormalizeRepeat() + new FillUpMissingSlots(), + // We should use NormalizeRepeat to compute nullable properties for LogicalRepeat in the analysis + // stage. NormalizeRepeat will compute nullable property, add virtual slot, LogicalAggregate and + // LogicalProject for normalize. This rule depends on FillUpMissingSlots to fill up slots. + new NormalizeRepeat() ), bottomUp(new AdjustAggregateNullableForEmptySet()), // consider sql with user defined var @t_zone diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java index 2c81a60e0ed697e..a13e784c7a88b0c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java @@ -139,8 +139,10 @@ import org.apache.doris.nereids.rules.rewrite.batch.EliminateUselessPlanUnderApply; import org.apache.doris.nereids.rules.rewrite.mv.SelectMaterializedIndexWithAggregate; import org.apache.doris.nereids.rules.rewrite.mv.SelectMaterializedIndexWithoutAggregate; +import org.apache.doris.nereids.trees.plans.logical.LogicalCTEAnchor; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import java.util.List; @@ -151,300 +153,308 @@ */ public class Rewriter extends AbstractBatchJobExecutor { - private static final List CTE_CHILDREN_REWRITE_JOBS_BEFORE_SUB_PATH_PUSH_DOWN = jobs( - topic("Plan Normalization", - topDown( - new EliminateOrderByConstant(), - new EliminateSortUnderSubqueryOrView(), - new EliminateGroupByConstant(), - // MergeProjects depends on this rule - new LogicalSubQueryAliasToLogicalProject(), - // TODO: we should do expression normalization after plan normalization - // because some rewritten depends on sub expression tree matching - // such as group by key matching and replaced - // but we need to do some normalization before subquery unnesting, - // such as extract common expression. - new ExpressionNormalizationAndOptimization(), - new AvgDistinctToSumDivCount(), - new CountDistinctRewrite(), - new ExtractFilterFromCrossJoin() - ), - topDown( - // ExtractSingleTableExpressionFromDisjunction conflict to InPredicateToEqualToRule - // in the ExpressionNormalization, so must invoke in another job, otherwise dead loop. - new ExtractSingleTableExpressionFromDisjunction() - ) - ), - // subquery unnesting relay on ExpressionNormalization to extract common factor expression - topic("Subquery unnesting", - // after doing NormalizeAggregate in analysis job - // we need run the following 2 rules to make AGG_SCALAR_SUBQUERY_TO_WINDOW_FUNCTION work - bottomUp(new PullUpProjectUnderApply()), - topDown(new PushDownFilterThroughProject()), - custom(RuleType.AGG_SCALAR_SUBQUERY_TO_WINDOW_FUNCTION, - AggScalarSubQueryToWindowFunction::new), - bottomUp( - new EliminateUselessPlanUnderApply(), - // CorrelateApplyToUnCorrelateApply and ApplyToJoin - // and SelectMaterializedIndexWithAggregate depends on this rule - new MergeProjects(), - /* - * Subquery unnesting. - * 1. Adjust the plan in correlated logicalApply - * so that there are no correlated columns in the subquery. - * 2. Convert logicalApply to a logicalJoin. - * TODO: group these rules to make sure the result plan is what we expected. - */ - new CorrelateApplyToUnCorrelateApply(), - new ApplyToJoin() - ) - ), - // before `Subquery unnesting` topic, some correlate slots should have appeared at LogicalApply.left, - // but it appeared at LogicalApply.right. After the `Subquery unnesting` topic, all slots is placed in a - // normal position, then we can check column privileges by these steps - // - // 1. use ColumnPruning rule to derive the used slots in LogicalView - // 2. and then check the column privileges - // 3. finally, we can eliminate the LogicalView - topic("Inline view and check column privileges", - custom(RuleType.CHECK_PRIVILEGES, CheckPrivileges::new), - bottomUp(new InlineLogicalView()) - ), - topic("Eliminate optimization", - bottomUp( - new EliminateLimit(), - new EliminateFilter(), - new EliminateAggregate(), - new EliminateAggCaseWhen(), - new ReduceAggregateChildOutputRows(), - new EliminateJoinCondition(), - new EliminateAssertNumRows(), - new EliminateSemiJoin() - ) - ), - // please note: this rule must run before NormalizeAggregate - topDown(new AdjustAggregateNullableForEmptySet()), - // The rule modification needs to be done after the subquery is unnested, - // because for scalarSubQuery, the connection condition is stored in apply in the analyzer phase, - // but when normalizeAggregate/normalizeSort is performed, the members in apply cannot be obtained, - // resulting in inconsistent output results and results in apply - topDown( - new NormalizeAggregate(), - new CountLiteralRewrite(), - new NormalizeSort() - ), - topic("Window analysis", - topDown( - new ExtractAndNormalizeWindowExpression(), - new CheckAndStandardizeWindowFunctionAndFrame(), - new SimplifyWindowExpression() - ) - ), - topic("Rewrite join", - // infer not null filter, then push down filter, and then reorder join(cross join to inner join) - topDown( - new InferAggNotNull(), - new InferFilterNotNull(), - new InferJoinNotNull() - ), - // ReorderJoin depends PUSH_DOWN_FILTERS - // the PUSH_DOWN_FILTERS depends on lots of rules, e.g. merge project, eliminate outer, - // sometimes transform the bottom plan make some rules usable which can apply to the top plan, - // but top-down traverse can not cover this case in one iteration, so bottom-up is more - // efficient because it can find the new plans and apply transform wherever it is - bottomUp(RuleSet.PUSH_DOWN_FILTERS), - // after push down, some new filters are generated, which needs to be optimized. (example: tpch q19) - // topDown(new ExpressionOptimization()), - topDown( - new MergeFilters(), - new ReorderJoin(), - new PushFilterInsideJoin(), - new FindHashConditionForJoin(), - new ConvertInnerOrCrossJoin(), - new EliminateNullAwareLeftAntiJoin() - ), - // push down SEMI Join - bottomUp( - new TransposeSemiJoinLogicalJoin(), - new TransposeSemiJoinLogicalJoinProject(), - new TransposeSemiJoinAgg(), - new TransposeSemiJoinAggProject() - ), - topDown( - new EliminateDedupJoinCondition() - ), - // eliminate useless not null or inferred not null - // TODO: wait InferPredicates to infer more not null. - bottomUp(new EliminateNotNull()), - topDown(new ConvertInnerOrCrossJoin()), - topDown(new ProjectOtherJoinConditionForNestedLoopJoin()) - ), - topic("Column pruning and infer predicate", - custom(RuleType.COLUMN_PRUNING, ColumnPruning::new), - custom(RuleType.INFER_PREDICATES, InferPredicates::new), - // column pruning create new project, so we should use PUSH_DOWN_FILTERS - // to change filter-project to project-filter - bottomUp(RuleSet.PUSH_DOWN_FILTERS), - // after eliminate outer join in the PUSH_DOWN_FILTERS, we can infer more predicate and push down - custom(RuleType.INFER_PREDICATES, InferPredicates::new), - bottomUp(RuleSet.PUSH_DOWN_FILTERS), - // after eliminate outer join, we can move some filters to join.otherJoinConjuncts, - // this can help to translate plan to backend - topDown(new PushFilterInsideJoin()), - topDown(new FindHashConditionForJoin()), - topDown(new ExpressionNormalization()) - ), - - // this rule should invoke after ColumnPruning - custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new), - - topic("Set operation optimization", - // Do MergeSetOperation first because we hope to match pattern of Distinct SetOperator. - topDown(new PushProjectThroughUnion(), new MergeProjects()), - bottomUp(new MergeSetOperations(), new MergeSetOperationsExcept()), - bottomUp(new PushProjectIntoOneRowRelation()), - topDown(new MergeOneRowRelationIntoUnion()), - topDown(new PushProjectIntoUnion()), - costBased(topDown(new InferSetOperatorDistinct())), - topDown(new BuildAggForUnion()) - ), + private static final List CTE_CHILDREN_REWRITE_JOBS_BEFORE_SUB_PATH_PUSH_DOWN = notTraverseChildrenOf( + ImmutableSet.of(LogicalCTEAnchor.class), + () -> jobs( + topic("Plan Normalization", + topDown( + new EliminateOrderByConstant(), + new EliminateSortUnderSubqueryOrView(), + new EliminateGroupByConstant(), + // MergeProjects depends on this rule + new LogicalSubQueryAliasToLogicalProject(), + // TODO: we should do expression normalization after plan normalization + // because some rewritten depends on sub expression tree matching + // such as group by key matching and replaced + // but we need to do some normalization before subquery unnesting, + // such as extract common expression. + new ExpressionNormalizationAndOptimization(), + new AvgDistinctToSumDivCount(), + new CountDistinctRewrite(), + new ExtractFilterFromCrossJoin() + ), + topDown( + // ExtractSingleTableExpressionFromDisjunction conflict to InPredicateToEqualToRule + // in the ExpressionNormalization, so must invoke in another job, otherwise dead loop. + new ExtractSingleTableExpressionFromDisjunction() + ) + ), + // subquery unnesting relay on ExpressionNormalization to extract common factor expression + topic("Subquery unnesting", + // after doing NormalizeAggregate in analysis job + // we need run the following 2 rules to make AGG_SCALAR_SUBQUERY_TO_WINDOW_FUNCTION work + bottomUp(new PullUpProjectUnderApply()), + topDown(new PushDownFilterThroughProject()), + custom(RuleType.AGG_SCALAR_SUBQUERY_TO_WINDOW_FUNCTION, + AggScalarSubQueryToWindowFunction::new), + bottomUp( + new EliminateUselessPlanUnderApply(), + // CorrelateApplyToUnCorrelateApply and ApplyToJoin + // and SelectMaterializedIndexWithAggregate depends on this rule + new MergeProjects(), + /* + * Subquery unnesting. + * 1. Adjust the plan in correlated logicalApply + * so that there are no correlated columns in the subquery. + * 2. Convert logicalApply to a logicalJoin. + * TODO: group these rules to make sure the result plan is what we expected. + */ + new CorrelateApplyToUnCorrelateApply(), + new ApplyToJoin() + ) + ), + // before `Subquery unnesting` topic, some correlate slots should have appeared at LogicalApply.left, + // but it appeared at LogicalApply.right. After the `Subquery unnesting` topic, all slots is placed in a + // normal position, then we can check column privileges by these steps + // + // 1. use ColumnPruning rule to derive the used slots in LogicalView + // 2. and then check the column privileges + // 3. finally, we can eliminate the LogicalView + topic("Inline view and check column privileges", + custom(RuleType.CHECK_PRIVILEGES, CheckPrivileges::new), + bottomUp(new InlineLogicalView()) + ), + topic("Eliminate optimization", + bottomUp( + new EliminateLimit(), + new EliminateFilter(), + new EliminateAggregate(), + new EliminateAggCaseWhen(), + new ReduceAggregateChildOutputRows(), + new EliminateJoinCondition(), + new EliminateAssertNumRows(), + new EliminateSemiJoin() + ) + ), + // please note: this rule must run before NormalizeAggregate + topDown(new AdjustAggregateNullableForEmptySet()), + // The rule modification needs to be done after the subquery is unnested, + // because for scalarSubQuery, the connection condition is stored in apply in the analyzer phase, + // but when normalizeAggregate/normalizeSort is performed, the members in apply cannot be obtained, + // resulting in inconsistent output results and results in apply + topDown( + new NormalizeAggregate(), + new CountLiteralRewrite(), + new NormalizeSort() + ), + topic("Window analysis", + topDown( + new ExtractAndNormalizeWindowExpression(), + new CheckAndStandardizeWindowFunctionAndFrame(), + new SimplifyWindowExpression() + ) + ), + topic("Rewrite join", + // infer not null filter, then push down filter, and then reorder join(cross join to inner join) + topDown( + new InferAggNotNull(), + new InferFilterNotNull(), + new InferJoinNotNull() + ), + // ReorderJoin depends PUSH_DOWN_FILTERS + // the PUSH_DOWN_FILTERS depends on lots of rules, e.g. merge project, eliminate outer, + // sometimes transform the bottom plan make some rules usable which can apply to the top plan, + // but top-down traverse can not cover this case in one iteration, so bottom-up is more + // efficient because it can find the new plans and apply transform wherever it is + bottomUp(RuleSet.PUSH_DOWN_FILTERS), + // after push down, some new filters are generated, which needs to be optimized. + // (example: tpch q19) + // topDown(new ExpressionOptimization()), + topDown( + new MergeFilters(), + new ReorderJoin(), + new PushFilterInsideJoin(), + new FindHashConditionForJoin(), + new ConvertInnerOrCrossJoin(), + new EliminateNullAwareLeftAntiJoin() + ), + // push down SEMI Join + bottomUp( + new TransposeSemiJoinLogicalJoin(), + new TransposeSemiJoinLogicalJoinProject(), + new TransposeSemiJoinAgg(), + new TransposeSemiJoinAggProject() + ), + topDown( + new EliminateDedupJoinCondition() + ), + // eliminate useless not null or inferred not null + // TODO: wait InferPredicates to infer more not null. + bottomUp(new EliminateNotNull()), + topDown(new ConvertInnerOrCrossJoin()), + topDown(new ProjectOtherJoinConditionForNestedLoopJoin()) + ), + topic("Column pruning and infer predicate", + custom(RuleType.COLUMN_PRUNING, ColumnPruning::new), + custom(RuleType.INFER_PREDICATES, InferPredicates::new), + // column pruning create new project, so we should use PUSH_DOWN_FILTERS + // to change filter-project to project-filter + bottomUp(RuleSet.PUSH_DOWN_FILTERS), + // after eliminate outer join in the PUSH_DOWN_FILTERS, + // we can infer more predicate and push down + custom(RuleType.INFER_PREDICATES, InferPredicates::new), + bottomUp(RuleSet.PUSH_DOWN_FILTERS), + // after eliminate outer join, we can move some filters to join.otherJoinConjuncts, + // this can help to translate plan to backend + topDown(new PushFilterInsideJoin()), + topDown(new FindHashConditionForJoin()), + topDown(new ExpressionNormalization()) + ), - topic("Eliminate GroupBy", - topDown(new EliminateGroupBy(), - new MergeAggregate(), - // need to adjust min/max/sum nullable attribute after merge aggregate - new AdjustAggregateNullableForEmptySet()) - ), + // this rule should invoke after ColumnPruning + custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new), - topic("Eager aggregation", - topDown( - new PushDownAggThroughJoinOneSide(), - new PushDownAggThroughJoin() - ), - custom(RuleType.PUSH_DOWN_DISTINCT_THROUGH_JOIN, PushDownDistinctThroughJoin::new) - ), + topic("Set operation optimization", + // Do MergeSetOperation first because we hope to match pattern of Distinct SetOperator. + topDown(new PushProjectThroughUnion(), new MergeProjects()), + bottomUp(new MergeSetOperations(), new MergeSetOperationsExcept()), + bottomUp(new PushProjectIntoOneRowRelation()), + topDown(new MergeOneRowRelationIntoUnion()), + topDown(new PushProjectIntoUnion()), + costBased(topDown(new InferSetOperatorDistinct())), + topDown(new BuildAggForUnion()) + ), - // this rule should invoke after infer predicate and push down distinct, and before push down limit - topic("eliminate join according unique or foreign key", - bottomUp(new EliminateJoinByFK()), - topDown(new EliminateJoinByUnique()) - ), + topic("Eliminate GroupBy", + topDown(new EliminateGroupBy(), + new MergeAggregate(), + // need to adjust min/max/sum nullable attribute after merge aggregate + new AdjustAggregateNullableForEmptySet()) + ), - // this rule should be after topic "Column pruning and infer predicate" - topic("Join pull up", - topDown( - new EliminateFilter(), - new PushDownFilterThroughProject(), - new MergeProjects() - ), - topDown( - new PullUpJoinFromUnionAll() - ), - custom(RuleType.COLUMN_PRUNING, ColumnPruning::new), - bottomUp(RuleSet.PUSH_DOWN_FILTERS), - custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new) - ), + topic("Eager aggregation", + topDown( + new PushDownAggThroughJoinOneSide(), + new PushDownAggThroughJoin() + ), + custom(RuleType.PUSH_DOWN_DISTINCT_THROUGH_JOIN, PushDownDistinctThroughJoin::new) + ), - // this rule should be invoked after topic "Join pull up" - topic("eliminate group by keys according to fd items", - topDown(new EliminateGroupByKey()) - ), + // this rule should invoke after infer predicate and push down distinct, and before push down limit + topic("eliminate join according unique or foreign key", + bottomUp(new EliminateJoinByFK()), + topDown(new EliminateJoinByUnique()) + ), - topic("Limit optimization", - // TODO: the logical plan should not contains any phase information, - // we should refactor like AggregateStrategies, e.g. LimitStrategies, - // generate one PhysicalLimit if current distribution is gather or two - // PhysicalLimits with gather exchange - topDown(new LimitSortToTopN()), - topDown(new MergeTopNs()), - topDown(new SplitLimit()), - topDown( - new PushDownLimit(), - new PushDownLimitDistinctThroughJoin(), - new PushDownLimitDistinctThroughUnion(), - new PushDownTopNDistinctThroughJoin(), - new PushDownTopNDistinctThroughUnion(), - new PushDownTopNThroughJoin(), - new PushDownTopNThroughWindow(), - new PushDownTopNThroughUnion() - ), - topDown(new CreatePartitionTopNFromWindow()), - topDown( - new PullUpProjectUnderTopN(), - new PullUpProjectUnderLimit() - ) - ), - // TODO: these rules should be implementation rules, and generate alternative physical plans. - topic("Table/Physical optimization", - topDown( - new PruneOlapScanPartition(), - new PruneEmptyPartition(), - new PruneFileScanPartition(), - new PushConjunctsIntoJdbcScan(), - new PushConjunctsIntoOdbcScan(), - new PushConjunctsIntoEsScan() - ) - ), - topic("MV optimization", - topDown( - new SelectMaterializedIndexWithAggregate(), - new SelectMaterializedIndexWithoutAggregate(), + // this rule should be after topic "Column pruning and infer predicate" + topic("Join pull up", + topDown( new EliminateFilter(), new PushDownFilterThroughProject(), - new MergeProjects(), - new PruneOlapScanTablet() - ), - custom(RuleType.COLUMN_PRUNING, ColumnPruning::new), - bottomUp(RuleSet.PUSH_DOWN_FILTERS), - custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new) - ), - topic("adjust preagg status", - topDown(new AdjustPreAggStatus()) - ), - topic("Point query short circuit", - topDown(new LogicalResultSinkToShortCircuitPointQuery())), - topic("eliminate", - // SORT_PRUNING should be applied after mergeLimit - custom(RuleType.ELIMINATE_SORT, EliminateSort::new), - bottomUp(new EliminateEmptyRelation()) - ), - topic("agg rewrite", - // these rules should be put after mv optimization to avoid mv matching fail - topDown(new SumLiteralRewrite(), - new MergePercentileToArray()) - ), - topic("Push project and filter on cte consumer to cte producer", - topDown( - new CollectFilterAboveConsumer(), - new CollectCteConsumerOutput() - ) + new MergeProjects() + ), + topDown( + new PullUpJoinFromUnionAll() + ), + custom(RuleType.COLUMN_PRUNING, ColumnPruning::new), + bottomUp(RuleSet.PUSH_DOWN_FILTERS), + custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new) + ), + + // this rule should be invoked after topic "Join pull up" + topic("eliminate group by keys according to fd items", + topDown(new EliminateGroupByKey()) + ), + + topic("Limit optimization", + // TODO: the logical plan should not contains any phase information, + // we should refactor like AggregateStrategies, e.g. LimitStrategies, + // generate one PhysicalLimit if current distribution is gather or two + // PhysicalLimits with gather exchange + topDown(new LimitSortToTopN()), + topDown(new MergeTopNs()), + topDown(new SplitLimit()), + topDown( + new PushDownLimit(), + new PushDownLimitDistinctThroughJoin(), + new PushDownLimitDistinctThroughUnion(), + new PushDownTopNDistinctThroughJoin(), + new PushDownTopNDistinctThroughUnion(), + new PushDownTopNThroughJoin(), + new PushDownTopNThroughWindow(), + new PushDownTopNThroughUnion() + ), + topDown(new CreatePartitionTopNFromWindow()), + topDown( + new PullUpProjectUnderTopN(), + new PullUpProjectUnderLimit() + ) + ), + // TODO: these rules should be implementation rules, and generate alternative physical plans. + topic("Table/Physical optimization", + topDown( + new PruneOlapScanPartition(), + new PruneEmptyPartition(), + new PruneFileScanPartition(), + new PushConjunctsIntoJdbcScan(), + new PushConjunctsIntoOdbcScan(), + new PushConjunctsIntoEsScan() + ) + ), + topic("MV optimization", + topDown( + new SelectMaterializedIndexWithAggregate(), + new SelectMaterializedIndexWithoutAggregate(), + new EliminateFilter(), + new PushDownFilterThroughProject(), + new MergeProjects(), + new PruneOlapScanTablet() + ), + custom(RuleType.COLUMN_PRUNING, ColumnPruning::new), + bottomUp(RuleSet.PUSH_DOWN_FILTERS), + custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new) + ), + topic("adjust preagg status", + topDown(new AdjustPreAggStatus()) + ), + topic("Point query short circuit", + topDown(new LogicalResultSinkToShortCircuitPointQuery())), + topic("eliminate", + // SORT_PRUNING should be applied after mergeLimit + custom(RuleType.ELIMINATE_SORT, EliminateSort::new), + bottomUp(new EliminateEmptyRelation()) + ), + topic("agg rewrite", + // these rules should be put after mv optimization to avoid mv matching fail + topDown(new SumLiteralRewrite(), + new MergePercentileToArray()) + ), + topic("Push project and filter on cte consumer to cte producer", + topDown( + new CollectFilterAboveConsumer(), + new CollectCteConsumerOutput() + ) + ) ) ); - private static final List CTE_CHILDREN_REWRITE_JOBS_AFTER_SUB_PATH_PUSH_DOWN = jobs( - // after variant sub path pruning, we need do column pruning again - custom(RuleType.COLUMN_PRUNING, ColumnPruning::new), - bottomUp(ImmutableList.of( - new PushDownFilterThroughProject(), - new MergeProjects() - )), - custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new), - topic("topn optimize", - topDown(new DeferMaterializeTopNResult()) - ), - // this rule batch must keep at the end of rewrite to do some plan check - topic("Final rewrite and check", - custom(RuleType.CHECK_DATA_TYPES, CheckDataTypes::new), - topDown(new PushDownFilterThroughProject(), new MergeProjects()), - custom(RuleType.ADJUST_CONJUNCTS_RETURN_TYPE, AdjustConjunctsReturnType::new), - bottomUp( - new ExpressionRewrite(CheckLegalityAfterRewrite.INSTANCE), - new CheckMatchExpression(), - new CheckMultiDistinct(), - new CheckAfterRewrite() - ) - ), - topDown(new CollectCteConsumerOutput()) + private static final List CTE_CHILDREN_REWRITE_JOBS_AFTER_SUB_PATH_PUSH_DOWN = notTraverseChildrenOf( + ImmutableSet.of(LogicalCTEAnchor.class), + () -> jobs( + // after variant sub path pruning, we need do column pruning again + custom(RuleType.COLUMN_PRUNING, ColumnPruning::new), + bottomUp(ImmutableList.of( + new PushDownFilterThroughProject(), + new MergeProjects() + )), + custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new), + topic("topn optimize", + topDown(new DeferMaterializeTopNResult()) + ), + // this rule batch must keep at the end of rewrite to do some plan check + topic("Final rewrite and check", + custom(RuleType.CHECK_DATA_TYPES, CheckDataTypes::new), + topDown(new PushDownFilterThroughProject(), new MergeProjects()), + custom(RuleType.ADJUST_CONJUNCTS_RETURN_TYPE, AdjustConjunctsReturnType::new), + bottomUp( + new ExpressionRewrite(CheckLegalityAfterRewrite.INSTANCE), + new CheckMatchExpression(), + new CheckMultiDistinct(), + new CheckAfterRewrite() + ) + ), + topDown(new CollectCteConsumerOutput()) + ) ); private static final List WHOLE_TREE_REWRITE_JOBS @@ -494,39 +504,45 @@ private static List getWholeTreeRewriteJobs( List beforePushDownJobs, List afterPushDownJobs) { - List rewriteJobs = Lists.newArrayListWithExpectedSize(300); - rewriteJobs.addAll(jobs( - topic("cte inline and pull up all cte anchor", - custom(RuleType.PULL_UP_CTE_ANCHOR, PullUpCteAnchor::new), - custom(RuleType.CTE_INLINE, CTEInline::new) - ), - topic("process limit session variables", - custom(RuleType.ADD_DEFAULT_LIMIT, AddDefaultLimit::new) - ), - topic("rewrite cte sub-tree before sub path push down", - custom(RuleType.REWRITE_CTE_CHILDREN, () -> new RewriteCteChildren(beforePushDownJobs)) - ))); - if (needOrExpansion) { - rewriteJobs.addAll(jobs(topic("or expansion", - custom(RuleType.OR_EXPANSION, () -> OrExpansion.INSTANCE)))); - } - if (needSubPathPushDown) { - rewriteJobs.addAll(jobs( - topic("variant element_at push down", - custom(RuleType.VARIANT_SUB_PATH_PRUNING, VariantSubPathPruning::new) - ) - )); - } - rewriteJobs.addAll(jobs( - topic("rewrite cte sub-tree after sub path push down", - custom(RuleType.CLEAR_CONTEXT_STATUS, ClearContextStatus::new), - custom(RuleType.REWRITE_CTE_CHILDREN, () -> new RewriteCteChildren(afterPushDownJobs)) - ), - topic("whole plan check", - custom(RuleType.ADJUST_NULLABLE, AdjustNullable::new) - ) - )); - return rewriteJobs; + return notTraverseChildrenOf( + ImmutableSet.of(LogicalCTEAnchor.class), + () -> { + List rewriteJobs = Lists.newArrayListWithExpectedSize(300); + + rewriteJobs.addAll(jobs( + topic("cte inline and pull up all cte anchor", + custom(RuleType.PULL_UP_CTE_ANCHOR, PullUpCteAnchor::new), + custom(RuleType.CTE_INLINE, CTEInline::new) + ), + topic("process limit session variables", + custom(RuleType.ADD_DEFAULT_LIMIT, AddDefaultLimit::new) + ), + topic("rewrite cte sub-tree before sub path push down", + custom(RuleType.REWRITE_CTE_CHILDREN, () -> new RewriteCteChildren(beforePushDownJobs)) + ))); + if (needOrExpansion) { + rewriteJobs.addAll(jobs(topic("or expansion", + custom(RuleType.OR_EXPANSION, () -> OrExpansion.INSTANCE)))); + } + if (needSubPathPushDown) { + rewriteJobs.addAll(jobs( + topic("variant element_at push down", + custom(RuleType.VARIANT_SUB_PATH_PRUNING, VariantSubPathPruning::new) + ) + )); + } + rewriteJobs.addAll(jobs( + topic("rewrite cte sub-tree after sub path push down", + custom(RuleType.CLEAR_CONTEXT_STATUS, ClearContextStatus::new), + custom(RuleType.REWRITE_CTE_CHILDREN, () -> new RewriteCteChildren(afterPushDownJobs)) + ), + topic("whole plan check", + custom(RuleType.ADJUST_NULLABLE, AdjustNullable::new) + ) + )); + return rewriteJobs; + } + ); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteBottomUpJob.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteBottomUpJob.java index 60555a9cc04ad69..aecb03a41585216 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteBottomUpJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteBottomUpJob.java @@ -21,11 +21,11 @@ import org.apache.doris.nereids.jobs.JobType; import org.apache.doris.nereids.rules.Rule; import org.apache.doris.nereids.trees.plans.Plan; -import org.apache.doris.nereids.trees.plans.logical.LogicalCTEAnchor; import java.util.List; import java.util.Objects; import java.util.Optional; +import java.util.function.Predicate; /** * PlanTreeRewriteBottomUpJob @@ -55,8 +55,10 @@ enum RewriteState { ENSURE_CHILDREN_REWRITTEN } - public PlanTreeRewriteBottomUpJob(RewriteJobContext rewriteJobContext, JobContext context, List rules) { - super(JobType.BOTTOM_UP_REWRITE, context); + public PlanTreeRewriteBottomUpJob( + RewriteJobContext rewriteJobContext, JobContext context, + Predicate isTraverseChildren, List rules) { + super(JobType.BOTTOM_UP_REWRITE, context, isTraverseChildren); this.rewriteJobContext = Objects.requireNonNull(rewriteJobContext, "rewriteContext cannot be null"); this.rules = Objects.requireNonNull(rules, "rules cannot be null"); this.batchId = rewriteJobContext.batchId; @@ -97,7 +99,7 @@ private void rewriteThis() { return; } // After the rewrite take effect, we should handle the children part again. - pushJob(new PlanTreeRewriteBottomUpJob(newJobContext, context, rules)); + pushJob(new PlanTreeRewriteBottomUpJob(newJobContext, context, isTraverseChildren, rules)); setState(rewriteResult.plan, RewriteState.ENSURE_CHILDREN_REWRITTEN, batchId); } else { // No new plan is generated, so just set the state of the current plan to 'REWRITTEN'. @@ -110,12 +112,12 @@ private void ensureChildrenRewritten() { Plan plan = rewriteJobContext.plan; int batchId = rewriteJobContext.batchId; setState(plan, RewriteState.REWRITE_THIS, batchId); - pushJob(new PlanTreeRewriteBottomUpJob(rewriteJobContext, context, rules)); + pushJob(new PlanTreeRewriteBottomUpJob(rewriteJobContext, context, isTraverseChildren, rules)); // some rule return new plan tree, which the number of new plan node > 1, // we should transform this new plan nodes too. // NOTICE: this relay on pull up cte anchor - if (!(rewriteJobContext.plan instanceof LogicalCTEAnchor)) { + if (isTraverseChildren.test(plan)) { pushChildrenJobs(plan); } } @@ -128,25 +130,25 @@ private void pushChildrenJobs(Plan plan) { Plan child = children.get(0); RewriteJobContext childRewriteJobContext = new RewriteJobContext( child, rewriteJobContext, 0, false, batchId); - pushJob(new PlanTreeRewriteBottomUpJob(childRewriteJobContext, context, rules)); + pushJob(new PlanTreeRewriteBottomUpJob(childRewriteJobContext, context, isTraverseChildren, rules)); return; case 2: Plan right = children.get(1); RewriteJobContext rightRewriteJobContext = new RewriteJobContext( right, rewriteJobContext, 1, false, batchId); - pushJob(new PlanTreeRewriteBottomUpJob(rightRewriteJobContext, context, rules)); + pushJob(new PlanTreeRewriteBottomUpJob(rightRewriteJobContext, context, isTraverseChildren, rules)); Plan left = children.get(0); RewriteJobContext leftRewriteJobContext = new RewriteJobContext( left, rewriteJobContext, 0, false, batchId); - pushJob(new PlanTreeRewriteBottomUpJob(leftRewriteJobContext, context, rules)); + pushJob(new PlanTreeRewriteBottomUpJob(leftRewriteJobContext, context, isTraverseChildren, rules)); return; default: for (int i = children.size() - 1; i >= 0; i--) { child = children.get(i); childRewriteJobContext = new RewriteJobContext( child, rewriteJobContext, i, false, batchId); - pushJob(new PlanTreeRewriteBottomUpJob(childRewriteJobContext, context, rules)); + pushJob(new PlanTreeRewriteBottomUpJob(childRewriteJobContext, context, isTraverseChildren, rules)); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteJob.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteJob.java index c2b136c40fad785..0f87a745b5e43f1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteJob.java @@ -31,12 +31,16 @@ import com.google.common.collect.ImmutableList; import java.util.List; +import java.util.Objects; +import java.util.function.Predicate; /** PlanTreeRewriteJob */ public abstract class PlanTreeRewriteJob extends Job { + protected final Predicate isTraverseChildren; - public PlanTreeRewriteJob(JobType type, JobContext context) { + public PlanTreeRewriteJob(JobType type, JobContext context, Predicate isTraverseChildren) { super(type, context); + this.isTraverseChildren = Objects.requireNonNull(isTraverseChildren, "isTraverseChildren can not be null"); } protected final RewriteResult rewrite(Plan plan, List rules, RewriteJobContext rewriteJobContext) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteTopDownJob.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteTopDownJob.java index 14019bc885e0d05..19d92e2f4f361ac 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteTopDownJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/PlanTreeRewriteTopDownJob.java @@ -21,10 +21,10 @@ import org.apache.doris.nereids.jobs.JobType; import org.apache.doris.nereids.rules.Rule; import org.apache.doris.nereids.trees.plans.Plan; -import org.apache.doris.nereids.trees.plans.logical.LogicalCTEAnchor; import java.util.List; import java.util.Objects; +import java.util.function.Predicate; /** * PlanTreeRewriteTopDownJob @@ -36,8 +36,10 @@ public class PlanTreeRewriteTopDownJob extends PlanTreeRewriteJob { private final RewriteJobContext rewriteJobContext; private final List rules; - public PlanTreeRewriteTopDownJob(RewriteJobContext rewriteJobContext, JobContext context, List rules) { - super(JobType.TOP_DOWN_REWRITE, context); + public PlanTreeRewriteTopDownJob( + RewriteJobContext rewriteJobContext, JobContext context, + Predicate isTraverseChildren, List rules) { + super(JobType.TOP_DOWN_REWRITE, context, isTraverseChildren); this.rewriteJobContext = Objects.requireNonNull(rewriteJobContext, "rewriteContext cannot be null"); this.rules = Objects.requireNonNull(rules, "rules cannot be null"); } @@ -49,15 +51,15 @@ public void execute() { if (rewriteResult.hasNewPlan) { RewriteJobContext newContext = rewriteJobContext .withPlanAndChildrenVisited(rewriteResult.plan, false); - pushJob(new PlanTreeRewriteTopDownJob(newContext, context, rules)); + pushJob(new PlanTreeRewriteTopDownJob(newContext, context, isTraverseChildren, rules)); return; } RewriteJobContext newRewriteJobContext = rewriteJobContext.withChildrenVisited(true); - pushJob(new PlanTreeRewriteTopDownJob(newRewriteJobContext, context, rules)); + pushJob(new PlanTreeRewriteTopDownJob(newRewriteJobContext, context, isTraverseChildren, rules)); // NOTICE: this relay on pull up cte anchor - if (!(this.rewriteJobContext.plan instanceof LogicalCTEAnchor)) { + if (isTraverseChildren.test(rewriteJobContext.plan)) { pushChildrenJobs(newRewriteJobContext); } } else { @@ -77,22 +79,22 @@ private void pushChildrenJobs(RewriteJobContext rewriteJobContext) { case 1: RewriteJobContext childRewriteJobContext = new RewriteJobContext( children.get(0), rewriteJobContext, 0, false, this.rewriteJobContext.batchId); - pushJob(new PlanTreeRewriteTopDownJob(childRewriteJobContext, context, rules)); + pushJob(new PlanTreeRewriteTopDownJob(childRewriteJobContext, context, isTraverseChildren, rules)); return; case 2: RewriteJobContext rightRewriteJobContext = new RewriteJobContext( children.get(1), rewriteJobContext, 1, false, this.rewriteJobContext.batchId); - pushJob(new PlanTreeRewriteTopDownJob(rightRewriteJobContext, context, rules)); + pushJob(new PlanTreeRewriteTopDownJob(rightRewriteJobContext, context, isTraverseChildren, rules)); RewriteJobContext leftRewriteJobContext = new RewriteJobContext( children.get(0), rewriteJobContext, 0, false, this.rewriteJobContext.batchId); - pushJob(new PlanTreeRewriteTopDownJob(leftRewriteJobContext, context, rules)); + pushJob(new PlanTreeRewriteTopDownJob(leftRewriteJobContext, context, isTraverseChildren, rules)); return; default: for (int i = children.size() - 1; i >= 0; i--) { childRewriteJobContext = new RewriteJobContext( children.get(i), rewriteJobContext, i, false, this.rewriteJobContext.batchId); - pushJob(new PlanTreeRewriteTopDownJob(childRewriteJobContext, context, rules)); + pushJob(new PlanTreeRewriteTopDownJob(childRewriteJobContext, context, isTraverseChildren, rules)); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/RootPlanTreeRewriteJob.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/RootPlanTreeRewriteJob.java index d352dfee4a0b206..859e942b380b7f0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/RootPlanTreeRewriteJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/rewrite/RootPlanTreeRewriteJob.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Predicate; /** RootPlanTreeRewriteJob */ public class RootPlanTreeRewriteJob implements RewriteJob { @@ -36,11 +37,18 @@ public class RootPlanTreeRewriteJob implements RewriteJob { private final List rules; private final RewriteJobBuilder rewriteJobBuilder; private final boolean once; + private final Predicate isTraverseChildren; public RootPlanTreeRewriteJob(List rules, RewriteJobBuilder rewriteJobBuilder, boolean once) { + this(rules, rewriteJobBuilder, plan -> true, once); + } + + public RootPlanTreeRewriteJob( + List rules, RewriteJobBuilder rewriteJobBuilder, Predicate isTraverseChildren, boolean once) { this.rules = Objects.requireNonNull(rules, "rules cannot be null"); this.rewriteJobBuilder = Objects.requireNonNull(rewriteJobBuilder, "rewriteJobBuilder cannot be null"); this.once = once; + this.isTraverseChildren = isTraverseChildren; } @Override @@ -52,7 +60,7 @@ public void execute(JobContext context) { int batchId = BATCH_ID.incrementAndGet(); RootRewriteJobContext rewriteJobContext = new RootRewriteJobContext( root, false, context, batchId); - Job rewriteJob = rewriteJobBuilder.build(rewriteJobContext, context, rules); + Job rewriteJob = rewriteJobBuilder.build(rewriteJobContext, context, isTraverseChildren, rules); context.getScheduleContext().pushJob(rewriteJob); cascadesContext.getJobScheduler().executeJobPool(cascadesContext); @@ -67,7 +75,8 @@ public boolean isOnce() { /** RewriteJobBuilder */ public interface RewriteJobBuilder { - Job build(RewriteJobContext rewriteJobContext, JobContext jobContext, List rules); + Job build(RewriteJobContext rewriteJobContext, JobContext jobContext, + Predicate isTraverseChildren, List rules); } /** RootRewriteJobContext */ diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindRelation.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindRelation.java index e8fd81565427753..bc4ac18b4845c61 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindRelation.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindRelation.java @@ -350,7 +350,7 @@ private Plan parseAndAnalyzeView(TableIf view, String ddlSql, CascadesContext pa CascadesContext viewContext = CascadesContext.initContext( parentContext.getStatementContext(), parsedViewPlan, PhysicalProperties.ANY); viewContext.keepOrShowPlanProcess(parentContext.showPlanProcess(), () -> { - viewContext.newAnalyzer(true, customTableResolver).analyze(); + viewContext.newAnalyzer(customTableResolver).analyze(); }); parentContext.addPlanProcesses(viewContext.getPlanProcesses()); // we should remove all group expression of the plan which in other memo, so the groupId would not conflict