Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[fix](Nereids) offset do more than once when have shuffle after limit #42576

Merged
merged 1 commit into from
Oct 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -325,9 +325,6 @@ public PlanFragment visitPhysicalDistribute(PhysicalDistribute<? extends Plan> d
.collect(Collectors.toList());
keys.addAll(validOutputIds);
validOutputIds = keys;
} else if (child instanceof PhysicalLimit && ((PhysicalLimit<?>) child).getPhase().isGlobal()) {
// because sort already contains Offset, we don't need to handle PhysicalTopN
exchangeNode.setOffset(((PhysicalLimit<?>) child).getOffset());
}
if (inputFragment instanceof MultiCastPlanFragment) {
// TODO: remove this logic when we split to multi-window in logical window to physical window conversion
Expand Down
10 changes: 0 additions & 10 deletions regression-test/data/nereids_syntax_p0/test_limit.out

This file was deleted.

29 changes: 23 additions & 6 deletions regression-test/suites/nereids_syntax_p0/test_limit.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,13 @@ suite("test_limit") {
result([[1]])
}

test {
sql """
select * from test1 t1 join (select * from test1 limit 1 offset 1) t2
"""
result([[1,1],[1,1]])
}

sql """
drop table if exists row_number_limit_tbl;
"""
Expand All @@ -57,22 +64,32 @@ suite("test_limit") {

sql """ INSERT INTO row_number_limit_tbl VALUES (7788, 'SCOTT', 'ANALYST', 7566, '1987-04-19', 3000, 0, 20); """
sql """ INSERT INTO row_number_limit_tbl VALUES (7844, 'TURNER', 'SALESMAN', 7698, '1981-09-08', 1500, 0, 30); """
qt_limit1 """
select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t order by k6s limit 1 offset 1;

test {
sql """
select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t order by k6s limit 1 offset 1
"""
rowNum 1
}

sql """ truncate table row_number_limit_tbl; """

sql """ INSERT INTO row_number_limit_tbl VALUES (7788, 'SCOTT', 'ANALYST', 7566, '1987-04-19', 3000, 0, 20); """
sql """ INSERT INTO row_number_limit_tbl VALUES (7844, 'TURNER', 'SALESMAN', 7698, '1981-09-08', 1500, 0, 30); """
sql """ INSERT INTO row_number_limit_tbl VALUES (7934, 'MILLER', 'CLERK', 7782, '1982-01-23', 1300, 0, 10); """

qt_lmit2 """
select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t limit 1 offset 2;
test {
sql """
select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t limit 1 offset 2
"""
rowNum 1
}

sql """ set parallel_pipeline_task_num = 1; """
qt_lmit3 """
select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t limit 1 offset 2;
test {
sql """
select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t limit 1 offset 2
"""
rowNum 1
}
}
Loading