Skip to content

Commit 2a99dda

Browse files
committed
Remove duplicated test from unnest.slt
1 parent 7eb5a35 commit 2a99dda

File tree

1 file changed

+0
-30
lines changed

1 file changed

+0
-30
lines changed

datafusion/sqllogictest/test_files/unnest.slt

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -972,36 +972,6 @@ physical_plan
972972
11)--------------------BoundedWindowAggExec: wdw=[row_number() ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING: Field { name: "row_number() ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING", data_type: UInt64, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }, frame: ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING], mode=[Sorted]
973973
12)----------------------LazyMemoryExec: partitions=1, batch_generators=[range: start=1, end=5, batch_size=8192]
974974

975-
## Unnest with ordering on unrelated column is preserved
976-
query TT
977-
EXPLAIN WITH unnested AS (SELECT
978-
ROW_NUMBER() OVER () AS generated_id,
979-
unnest(array[value]) as ar
980-
FROM range(1,5)) SELECT array_agg(ar) FROM unnested group by generated_id;
981-
----
982-
logical_plan
983-
01)Projection: array_agg(unnested.ar)
984-
02)--Aggregate: groupBy=[[unnested.generated_id]], aggr=[[array_agg(unnested.ar)]]
985-
03)----SubqueryAlias: unnested
986-
04)------Projection: generated_id, __unnest_placeholder(make_array(range().value),depth=1) AS UNNEST(make_array(range().value)) AS ar
987-
05)--------Unnest: lists[__unnest_placeholder(make_array(range().value))|depth=1] structs[]
988-
06)----------Projection: row_number() ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING AS generated_id, make_array(range().value) AS __unnest_placeholder(make_array(range().value))
989-
07)------------WindowAggr: windowExpr=[[row_number() ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING]]
990-
08)--------------TableScan: range() projection=[value]
991-
physical_plan
992-
01)ProjectionExec: expr=[array_agg(unnested.ar)@1 as array_agg(unnested.ar)]
993-
02)--AggregateExec: mode=FinalPartitioned, gby=[generated_id@0 as generated_id], aggr=[array_agg(unnested.ar)], ordering_mode=Sorted
994-
03)----SortExec: expr=[generated_id@0 ASC NULLS LAST], preserve_partitioning=[true]
995-
04)------CoalesceBatchesExec: target_batch_size=8192
996-
05)--------RepartitionExec: partitioning=Hash([generated_id@0], 4), input_partitions=4
997-
06)----------AggregateExec: mode=Partial, gby=[generated_id@0 as generated_id], aggr=[array_agg(unnested.ar)], ordering_mode=Sorted
998-
07)------------ProjectionExec: expr=[generated_id@0 as generated_id, __unnest_placeholder(make_array(range().value),depth=1)@1 as ar]
999-
08)--------------UnnestExec
1000-
09)----------------ProjectionExec: expr=[row_number() ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING@1 as generated_id, make_array(value@0) as __unnest_placeholder(make_array(range().value))]
1001-
10)------------------RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1
1002-
11)--------------------BoundedWindowAggExec: wdw=[row_number() ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING: Field { name: "row_number() ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING", data_type: UInt64, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }, frame: ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING], mode=[Sorted]
1003-
12)----------------------LazyMemoryExec: partitions=1, batch_generators=[range: start=1, end=5, batch_size=8192]
1004-
1005975
# Unnest array where data is already ordered by column2 (100, 200, 300, 400)
1006976
statement ok
1007977
COPY (

0 commit comments

Comments
 (0)