From 9f75a33aa48200ebd663fc93ac517d041637ff4f Mon Sep 17 00:00:00 2001 From: bobhan1 Date: Thu, 16 Nov 2023 22:09:14 +0800 Subject: [PATCH] [regression](partial update) Add row store cases for all existing partial update cases #26924 (#27017) --- .../data/nereids_p0/delete/delete_cte.out | 14 + .../delete/delete_mow_partial_update.out | 46 ++ .../data/nereids_p0/delete/delete_using.out | 14 + .../insert_into_table/partial_update.out | 65 ++ .../partial_update_complex.out | 36 ++ .../partial_update_seq_col.out | Bin 609 -> 1412 bytes .../partial_update/test_partial_update.out | 36 ++ .../test_partial_update_2pc_schema_change.out | 69 ++ .../test_partial_update_default_value.out | 5 + .../test_partial_update_delete.out | 42 ++ .../test_partial_update_delete_sign.out | 28 +- ...tial_update_insert_light_schema_change.out | 37 ++ .../test_partial_update_insert_seq_col.out | Bin 609 -> 1412 bytes ...test_partial_update_native_insert_stmt.out | 65 ++ ...tial_update_native_insert_stmt_complex.out | 36 ++ .../test_partial_update_seq_col.out | Bin 609 -> 1411 bytes .../test_partial_update_seq_col_delete.out | Bin 440 -> 1064 bytes .../test_partial_update_seq_type.out | Bin 1140 -> 3363 bytes .../test_partial_update_seq_type_delete.out | Bin 960 -> 3017 bytes .../test_partial_update_strict_mode.out | 26 + .../test_partial_update_upsert.out | 11 + .../test_partial_update_with_delete_stmt.out | 10 + .../test_partial_update_with_update_stmt.out | 8 + .../nereids_p0/delete/delete_cte.groovy | 187 +++--- .../delete/delete_mow_partial_update.groovy | 180 +++--- .../nereids_p0/delete/delete_using.groovy | 157 ++--- .../insert_into_table/partial_update.groovy | 441 +++++++------ .../partial_update_complex.groovy | 217 ++++--- .../partial_update_seq_col.groovy | 200 +++--- .../partial_update/test_partial_update.groovy | 328 +++++----- ...st_partial_update_2pc_schema_change.groovy | 250 +++---- .../test_partial_update_default_value.groovy | 83 +-- .../test_partial_update_delete.groovy | 163 ++--- .../test_partial_update_delete_sign.groovy | 345 +++++----- ...l_update_insert_light_schema_change.groovy | 611 +++++++++--------- .../test_partial_update_insert_seq_col.groovy | 198 +++--- ...t_partial_update_native_insert_stmt.groovy | 441 +++++++------ ...l_update_native_insert_stmt_complex.groovy | 216 ++++--- .../test_partial_update_seq_col.groovy | 262 ++++---- .../test_partial_update_seq_col_delete.groovy | 177 ++--- .../test_partial_update_seq_type.groovy | 265 ++++---- ...test_partial_update_seq_type_delete.groovy | 275 ++++---- .../test_partial_update_strict_mode.groovy | 393 +++++------ .../test_partial_update_upsert.groovy | 157 ++--- ...est_partial_update_with_delete_stmt.groovy | 273 ++++---- ...est_partial_update_with_update_stmt.groovy | 242 +++---- 46 files changed, 3704 insertions(+), 2905 deletions(-) diff --git a/regression-test/data/nereids_p0/delete/delete_cte.out b/regression-test/data/nereids_p0/delete/delete_cte.out index ac92b46e48de87..2734e5b4a33b33 100644 --- a/regression-test/data/nereids_p0/delete/delete_cte.out +++ b/regression-test/data/nereids_p0/delete/delete_cte.out @@ -13,3 +13,17 @@ 3 \N 6 3 3.0 \N 3 30 3 3 3.0 2000-01-03 +-- !sql -- +1 \N 2 1 1.0 \N +1 10 1 1 1.0 2000-01-01 +2 \N 4 2 2.0 \N +2 20 2 2 2.0 2000-01-02 +3 \N 6 3 3.0 \N +3 30 3 3 3.0 2000-01-03 + +-- !sql -- +2 \N 4 2 2.0 \N +2 20 2 2 2.0 2000-01-02 +3 \N 6 3 3.0 \N +3 30 3 3 3.0 2000-01-03 + diff --git a/regression-test/data/nereids_p0/delete/delete_mow_partial_update.out b/regression-test/data/nereids_p0/delete/delete_mow_partial_update.out index 488ad711f4e8ab..787f854bea29bc 100644 --- a/regression-test/data/nereids_p0/delete/delete_mow_partial_update.out +++ b/regression-test/data/nereids_p0/delete/delete_mow_partial_update.out @@ -45,3 +45,49 @@ 4 4 4 4 4 0 5 5 5 5 5 0 +-- !sql -- +1 1 +2 2 +3 3 +4 4 +5 5 + +-- !sql -- +4 4 +5 5 + +-- !sql_skip_delete_predicate -- +4 4 +5 5 + +-- !sql -- +1 \N 1 +1 1 0 +2 \N 1 +2 2 0 +3 \N 1 +3 3 0 +4 4 0 +5 5 0 + +-- !sql -- +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 + +-- !sql -- +4 4 4 4 4 +5 5 5 5 5 + +-- !sql -- +1 \N \N 0 \N 1 +1 1 1 1 1 0 +2 \N \N 0 \N 1 +2 2 2 2 2 0 +3 \N \N 0 \N 1 +3 3 3 3 3 0 +4 4 4 4 4 0 +5 5 5 5 5 0 + diff --git a/regression-test/data/nereids_p0/delete/delete_using.out b/regression-test/data/nereids_p0/delete/delete_using.out index ac92b46e48de87..2734e5b4a33b33 100644 --- a/regression-test/data/nereids_p0/delete/delete_using.out +++ b/regression-test/data/nereids_p0/delete/delete_using.out @@ -13,3 +13,17 @@ 3 \N 6 3 3.0 \N 3 30 3 3 3.0 2000-01-03 +-- !sql -- +1 \N 2 1 1.0 \N +1 10 1 1 1.0 2000-01-01 +2 \N 4 2 2.0 \N +2 20 2 2 2.0 2000-01-02 +3 \N 6 3 3.0 \N +3 30 3 3 3.0 2000-01-03 + +-- !sql -- +2 \N 4 2 2.0 \N +2 20 2 2 2.0 2000-01-02 +3 \N 6 3 3.0 \N +3 30 3 3 3.0 2000-01-03 + diff --git a/regression-test/data/nereids_p0/insert_into_table/partial_update.out b/regression-test/data/nereids_p0/insert_into_table/partial_update.out index d3362b159a1431..160af6eb1684be 100644 --- a/regression-test/data/nereids_p0/insert_into_table/partial_update.out +++ b/regression-test/data/nereids_p0/insert_into_table/partial_update.out @@ -64,3 +64,68 @@ 3 3 3 2 3 4 4 4 1 2 +-- !1 -- +1 doris 1000 123 1 +2 doris2 2000 223 1 + +-- !1 -- +1 doris 200 123 1 +2 doris2 400 223 1 +4 yixiu 400 \N 4321 + +-- !2 -- +1 doris 1000 123 1 2023-01-01 +2 doris2 2000 223 1 2023-01-01 + +-- !2 -- +1 doris 1000 123 1 2023-01-01 +2 doris2 2600 223 1 2023-07-20 +3 unknown 2500 \N 4321 2022-07-18 + +-- !3 -- +1 doris 1000 123 1 +2 doris2 2000 223 1 + +-- !3 -- +1 doris 1000 123 1 +2 doris2 2000 223 1 + +-- !4 -- +1 doris 1000 123 1 +2 doris2 2000 223 1 +3 doris3 5000 34 345 + +-- !4 -- +1 doris 1000 123 1 +3 doris3 5000 34 345 + +-- !5 -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + +-- !5 -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + +-- !6 -- +1 1 3 4 +2 2 4 5 +3 3 2 3 +4 4 1 2 + +-- !6 -- +1 2 3 4 +2 3 4 5 +3 4 2 3 +4 5 1 2 + +-- !7 -- +1 1 1 3 4 +2 2 2 4 5 +3 3 3 2 3 +4 4 4 1 2 + +-- !7 -- +1 1 1 3 4 +2 2 2 4 5 +3 3 3 2 3 +4 4 4 1 2 + diff --git a/regression-test/data/nereids_p0/insert_into_table/partial_update_complex.out b/regression-test/data/nereids_p0/insert_into_table/partial_update_complex.out index b79e8a8436b434..b38fb38c6b27fd 100644 --- a/regression-test/data/nereids_p0/insert_into_table/partial_update_complex.out +++ b/regression-test/data/nereids_p0/insert_into_table/partial_update_complex.out @@ -35,3 +35,39 @@ -- !complex_delete -- 2 2 2 2.0 2000-01-02 +-- !tbl1 -- +1 1 1 1.0 2000-01-01 +2 2 2 2.0 2000-01-02 +3 3 3 3.0 2000-01-03 + +-- !tbl2 -- +1 10 10 10.0 2000-01-10 +2 20 20 20.0 2000-01-20 +3 30 30 30.0 2000-01-30 +4 4 4 4.0 2000-01-04 +5 5 5 5.0 2000-01-05 + +-- !tbl3 -- +1 +3 +5 + +-- !select_result -- +1 10 1000.0 +3 30 3000.0 +5 5 500.0 + +-- !complex_update -- +1 10 1 1000.0 2000-01-01 +2 2 2 2.0 2000-01-02 +3 30 3 3000.0 2000-01-03 +5 5 \N 500.0 \N + +-- !select_result -- +1 1 +3 1 +5 1 + +-- !complex_delete -- +2 2 2 2.0 2000-01-02 + diff --git a/regression-test/data/nereids_p0/insert_into_table/partial_update_seq_col.out b/regression-test/data/nereids_p0/insert_into_table/partial_update_seq_col.out index 01b4341d54bbe5c33c6a5ed44c537ed6f28cce08..e836839c72b403f49ace84f19568f956f8d7b6f8 100644 GIT binary patch delta 322 zcmaFJ(!#wVk!fRmEn_@mOr9SD7_c&eC`N`DPF5hDl3$cr%)rFJ`hNdjkT^2~Yc+^w zVSuP(WnfhVO7Sx=umR;5IDixk#BjpoIE^PeGHPXT8gK%w@rTH>GJ$B2H6R^eYm7kl zXvE|J`9N!+79v>$6v1N^OqSDRass1nBE&KekR=QZtU#whEMo`KrFq$T`Q>>~Ujdy7 Y_6|_L0FVXr5X>a(UV_O^wqjBT0B~$OuK)l5 delta 7 OcmZqSe#o*RkqH0`E&_-E diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update.out index 38ac388846dd00..a241189606316b 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update.out @@ -35,3 +35,39 @@ 3 "stranger" 500 \N 4321 4 "foreigner" 600 \N 4321 +-- !select_default -- +1 doris 200 123 1 +2 doris2 400 223 1 + +-- !partial_update_in_one_stream_load -- +1 doris 444 123 1 +2 doris2 555 223 1 + +-- !partial_update_in_one_stream_load -- +1 doris 1111 123 1 +2 doris2 2222 223 1 + +-- !partial_update_in_one_stream_load -- +1 doris 1111 123 1 +2 doris2 2222 223 1 +3 "stranger" 500 \N 4321 +4 "foreigner" 600 \N 4321 + +-- !partial_update_in_one_stream_load -- +1 doris 1111 123 1 +2 doris2 2222 223 1 +3 "stranger" 500 \N 4321 +4 "foreigner" 600 \N 4321 + +-- !partial_update_in_one_stream_load -- +1 doris 1111 123 1 +2 doris2 2222 223 1 +3 "stranger" 500 \N 4321 +4 "foreigner" 600 \N 4321 + +-- !partial_update_in_one_stream_load -- +1 doris 1111 123 1 +2 doris2 2222 223 1 +3 "stranger" 500 \N 4321 +4 "foreigner" 600 \N 4321 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_2pc_schema_change.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_2pc_schema_change.out index 9ad9f8333a0117..58f3a56c8acd64 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_2pc_schema_change.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_2pc_schema_change.out @@ -68,3 +68,72 @@ 8 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N 9 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +-- !sql -- +0 \N \N \N \N \N +1 \N \N \N \N \N +10 \N \N \N \N \N +11 \N \N \N \N \N +12 \N \N \N \N \N +13 \N \N \N \N \N +14 \N \N \N \N \N +15 \N \N \N \N \N +16 \N \N \N \N \N +17 \N \N \N \N \N +18 \N \N \N \N \N +19 \N \N \N \N \N +2 \N \N \N \N \N +20 \N \N \N \N \N +3 \N \N \N \N \N +4 \N \N \N \N \N +5 \N \N \N \N \N +6 \N \N \N \N \N +7 \N \N \N \N \N +8 \N \N \N \N \N +9 \N \N \N \N \N + +-- !sql -- +0 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +1 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +10 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +11 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +12 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +13 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +14 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +15 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +16 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +17 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +18 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +19 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +2 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +20 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +3 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +4 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +5 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +6 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +7 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +8 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +9 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N + +-- !sql -- +0 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +1 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +10 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +11 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +12 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +13 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +14 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +15 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +16 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +17 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +18 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +19 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +2 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +20 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +3 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +4 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +5 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +6 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +7 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +8 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N +9 \N aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \N \N \N + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out index 888cde01118ea2..edd3326a752f97 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out @@ -4,3 +4,8 @@ 2 doris2 400 223 1 3 yixiu 600 4321 4321 +-- !select_default -- +1 doris 200 123 1 +2 doris2 400 223 1 +3 yixiu 600 4321 4321 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete.out index 3477da40685cc0..0863afd7931780 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete.out @@ -41,3 +41,45 @@ 4 4 4 4 4 0 5 5 5 5 5 0 +-- !sql -- +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 + +-- !sql -- +4 4 4 4 4 +5 5 5 5 5 + +-- !with_delete_sign -- +1 \N \N 0 \N 1 +1 1 1 1 1 0 +2 \N \N 0 \N 1 +2 2 2 2 2 0 +3 \N \N 0 \N 1 +3 3 3 3 3 0 +4 4 4 4 4 0 +5 5 5 5 5 0 + +-- !sql -- +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 + +-- !sql -- +4 4 4 4 4 +5 5 5 5 5 + +-- !sql -- +1 \N \N \N \N 1 +1 1 1 1 1 0 +2 \N \N \N \N 1 +2 2 2 2 2 0 +3 \N \N \N \N 1 +3 3 3 3 3 0 +4 4 4 4 4 0 +5 5 5 5 5 0 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete_sign.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete_sign.out index baf484fd3d854d..f14434b2f9f88c 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete_sign.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete_sign.out @@ -10,7 +10,7 @@ 2 2 2 2 2 4 4 4 4 4 --- !1 -- +-- !with_delete_sign -- 1 \N \N \N \N 1 1 1 1 1 1 0 2 2 2 2 2 0 @@ -21,9 +21,21 @@ 5 5 5 5 5 0 6 \N \N \N \N 1 +-- !1 -- +1 1 1 + -- !2 -- -2 2 2 2 2 0 -4 4 4 4 4 0 + +-- !3 -- +1 2 \N + +-- !1 -- +1 1 1 1 + +-- !2 -- + +-- !3 -- +1 2 \N \N -- !sql -- 1 1 1 1 1 @@ -36,7 +48,7 @@ 2 2 2 2 2 4 4 4 4 4 --- !1 -- +-- !with_delete_sign -- 1 \N \N \N \N 1 1 1 1 1 1 0 2 2 2 2 2 0 @@ -47,14 +59,6 @@ 5 5 5 5 5 0 6 \N \N \N \N 1 --- !2 -- -1 \N \N \N \N 1 -2 2 2 2 2 0 -3 \N \N \N \N 1 -4 4 4 4 4 0 -5 \N \N \N \N 1 -6 \N \N \N \N 1 - -- !1 -- 1 1 1 diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.out index d7e0e49e58b000..c45b54efe91637 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.out @@ -36,3 +36,40 @@ -- !create_index_2 -- 1 1 1 0 0 0 0 0 0 0 +-- !add_value_col_1 -- +1 0 0 0 0 0 0 0 0 0 + +-- !add_value_col_2 -- +1 1 1 0 0 0 0 0 0 0 0 + +-- !add_value_col_3 -- +1 1 1 0 0 0 0 0 0 0 10 + +-- !delete_value_col_1 -- +1 0 0 0 0 0 0 0 0 0 + +-- !delete_value_col_2 -- +1 1 1 0 0 0 0 0 0 + +-- !delete_seq_col_1 -- +1 10 10 10 +2 20 20 20 + +-- !update_value_col_1 -- +1 0 0 0 0 0 0 0 0 0 + +-- !update_value_col_2 -- +1 1 1.0 0 0 0 0 0 0 0 + +-- !add_key_col_1 -- +1 + +-- !add_key_col_2 -- +1 0 10 \N + +-- !create_index_1 -- +1 0 0 0 0 0 0 0 0 0 + +-- !create_index_2 -- +1 1 1 0 0 0 0 0 0 0 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_insert_seq_col.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_insert_seq_col.out index 01b4341d54bbe5c33c6a5ed44c537ed6f28cce08..e836839c72b403f49ace84f19568f956f8d7b6f8 100644 GIT binary patch delta 322 zcmaFJ(!#wVk!fRmEn_@mOr9SD7_c&eC`N`DPF5hDl3$cr%)rFJ`hNdjkT^2~Yc+^w zVSuP(WnfhVO7Sx=umR;5IDixk#BjpoIE^PeGHPXT8gK%w@rTH>GJ$B2H6R^eYm7kl zXvE|J`9N!+79v>$6v1N^OqSDRass1nBE&KekR=QZtU#whEMo`KrFq$T`Q>>~Ujdy7 Y_6|_L0FVXr5X>a(UV_O^wqjBT0B~$OuK)l5 delta 7 OcmZqSe#o*RkqH0`E&_-E diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out index d3362b159a1431..160af6eb1684be 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out @@ -64,3 +64,68 @@ 3 3 3 2 3 4 4 4 1 2 +-- !1 -- +1 doris 1000 123 1 +2 doris2 2000 223 1 + +-- !1 -- +1 doris 200 123 1 +2 doris2 400 223 1 +4 yixiu 400 \N 4321 + +-- !2 -- +1 doris 1000 123 1 2023-01-01 +2 doris2 2000 223 1 2023-01-01 + +-- !2 -- +1 doris 1000 123 1 2023-01-01 +2 doris2 2600 223 1 2023-07-20 +3 unknown 2500 \N 4321 2022-07-18 + +-- !3 -- +1 doris 1000 123 1 +2 doris2 2000 223 1 + +-- !3 -- +1 doris 1000 123 1 +2 doris2 2000 223 1 + +-- !4 -- +1 doris 1000 123 1 +2 doris2 2000 223 1 +3 doris3 5000 34 345 + +-- !4 -- +1 doris 1000 123 1 +3 doris3 5000 34 345 + +-- !5 -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + +-- !5 -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + +-- !6 -- +1 1 3 4 +2 2 4 5 +3 3 2 3 +4 4 1 2 + +-- !6 -- +1 2 3 4 +2 3 4 5 +3 4 2 3 +4 5 1 2 + +-- !7 -- +1 1 1 3 4 +2 2 2 4 5 +3 3 3 2 3 +4 4 4 1 2 + +-- !7 -- +1 1 1 3 4 +2 2 2 4 5 +3 3 3 2 3 +4 4 4 1 2 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt_complex.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt_complex.out index b79e8a8436b434..b38fb38c6b27fd 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt_complex.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt_complex.out @@ -35,3 +35,39 @@ -- !complex_delete -- 2 2 2 2.0 2000-01-02 +-- !tbl1 -- +1 1 1 1.0 2000-01-01 +2 2 2 2.0 2000-01-02 +3 3 3 3.0 2000-01-03 + +-- !tbl2 -- +1 10 10 10.0 2000-01-10 +2 20 20 20.0 2000-01-20 +3 30 30 30.0 2000-01-30 +4 4 4 4.0 2000-01-04 +5 5 5 5.0 2000-01-05 + +-- !tbl3 -- +1 +3 +5 + +-- !select_result -- +1 10 1000.0 +3 30 3000.0 +5 5 500.0 + +-- !complex_update -- +1 10 1 1000.0 2000-01-01 +2 2 2 2.0 2000-01-02 +3 30 3 3000.0 2000-01-03 +5 5 \N 500.0 \N + +-- !select_result -- +1 1 +3 1 +5 1 + +-- !complex_delete -- +2 2 2 2.0 2000-01-02 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_col.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_col.out index a789f18216e7de159bfd0b36ed2569b20c4654e3..f4dde3a0f7aafabf30aa2c96c37383d62f778f30 100644 GIT binary patch delta 321 zcmaFJ(#*Xfk!fRmEn_@mOr9SD7_c&eC`N`DPF5hDl3$cr%)rFJ`hNdjkT^2~Yc+^w zVSuP(WnfhVO7Sx=umR;5IDixk#BjpoIE^PeGHPXT8gK%w@rTH>GJ$B2H6R^eYm7kl zXvE|J`9N!+79v>$6v1N^OqSDRass1n0>m;8kR=QZtU#whEMo`KrFq$T`Q>>~Ux9oH Yv;?SK0LTLR2WAjvVa+f Z1wglw=><*`P9p;&BV7Y?T_Xc7E&wkMLP7ul delta 7 OcmZ3%v4eTT4n_bA^a9lY diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_type.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_type.out index d22b34bcc1c02505f84c3d9283d5d0ba60ead5c1..e8e4cf50da639b496c6fe9cd0c078641ad0eb8d7 100644 GIT binary patch literal 3363 zcmeH}Uyjo-5XRGmz@`t&9>E;ItHf^0?kfiXgm^)!6r#qZ5g}QcMAg2)$q+}{EAWl; z&)d>8Vbwk$qN;8^_W0+S&*N~;KCGgQEu&a5v_X-lUxPea1aW-FKC48ngItAI>|H6NiY6=E)mNKlX^4ku`U0w7Ps+ zXO?~c>*qQCKWTnFwaSsU?t%G*Wj%x4I!9AKbEh8Pzw7|zZ8_gQqn1#}qeccF5$fEZBH0P+t0@z~f_3Gy6mrR^#z)MK&$;Sya4%li%*m@tuo^3PVy z<4dY{T|TL#x~gbDNzuV84_YnoR#ZQ_MMV>ia!qN|8k6<3aQ*1YdedP%d;tNux5Y;Y;7Lw68s`dq*4Dm?30(VZ* zcx&q|n!0IG6-V~j{@wl0zA{F?q*+B*S*|IxNo~p>iOH5pp5M^#y3p&yXmvrqmNl)? zvd$H~D#{Jrq=}(7(5W&-vz7haB!!`-q*`Ur!m?Aw=(|eidTC;%S4o{4%Gi_#s$6GP zz{4;MxR3!4L?|Q+IsB%g85Ph6!{Y9+UGq9wn=HxW`dR^Ayva-&SNdmMEz7mWE$+99 z(@ZH{G(2^IP8ETWxFx=3QdlZLiWt8HL}>>AuQQf&G@kXw{1RQTz%szmsoWYj&N#+G zuuw4mp;;MR^)nk?+1PmLLbyNhUe?1uMfq<5!ynMH%1GZYvq@ zW7Vh-^=mdNV638DTCfwd-ndmkSYtVlTooQo+sW`S*g3$${%*<)f<@z~_Y6}8LJMam zfR$n3R%l=R0UIrfPsBcx5jG>T2;M;3o;PwrCjb8aIfb6__Uo1XI)}cA{Q`rxguEu> z5jlmXdlo@AW7sivkWO%|JxJ$g&<{Z0U=KoW@x1rY;51+va7#EW;{z?**H8!O$#gwjq#c?= Xx}IT;YRcJe-v9DsS(}#IKLY;&(q)Yl delta 7 OcmX>pet><$0cHRVe*+r; diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_strict_mode.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_strict_mode.out index c3d0ac7e001dfb..aa1946a5e41462 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_strict_mode.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_strict_mode.out @@ -25,3 +25,29 @@ 1 kevin 18 shenzhen 400 2023-07-01T12:00 3 steve 23 beijing 500 2023-07-03T12:00:02 +-- !sql -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + +-- !sql -- +1 kevin 18 shenzhen 500 2023-07-03T12:00:01 + +-- !sql -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + +-- !sql -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + +-- !sql -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + +-- !sql -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + +-- !sql -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 +3 steve 23 beijing 500 2023-07-03T12:00:02 + +-- !sql -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 +3 steve 23 beijing 500 2023-07-03T12:00:02 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_upsert.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_upsert.out index e4333002db6f24..7117084a47be88 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_upsert.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_upsert.out @@ -10,3 +10,14 @@ -- !sql -- 1 kevin 18 shenzhen 400 2023-07-01T12:00 +-- !sql -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + +-- !sql -- +1 kevin 18 shenzhen 500 2023-07-03T12:00:01 +3 \N 20 beijing 23 2023-07-03T12:00:02 +18 \N 20 beijing 9999999 2023-07-03T12:00:03 + +-- !sql -- +1 kevin 18 shenzhen 400 2023-07-01T12:00 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_with_delete_stmt.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_with_delete_stmt.out index cd105da2043132..ba5060cf8c53cc 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_with_delete_stmt.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_with_delete_stmt.out @@ -9,3 +9,13 @@ -- !select_join -- 1 doris 1000 123 1 +-- !select_default -- +1 doris 1000 123 1 +2 doris2 2000 223 1 + +-- !select_before_delete -- +2 + +-- !select_join -- +1 doris 1000 123 1 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_with_update_stmt.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_with_update_stmt.out index 8461ca7a59a075..43c2e32104aec2 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_with_update_stmt.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_with_update_stmt.out @@ -7,3 +7,11 @@ 1 doris 4000 123 1 2 doris4 8000 223 1 +-- !select_default -- +1 doris 4000 123 1 +2 doris2 2000 223 1 + +-- !select_join -- +1 doris 4000 123 1 +2 doris4 8000 223 1 + diff --git a/regression-test/suites/nereids_p0/delete/delete_cte.groovy b/regression-test/suites/nereids_p0/delete/delete_cte.groovy index 1fc61b7ca5eda8..080c760b51e5ad 100644 --- a/regression-test/suites/nereids_p0/delete/delete_cte.groovy +++ b/regression-test/suites/nereids_p0/delete/delete_cte.groovy @@ -16,92 +16,103 @@ // under the License. suite('nereids_delete_cte') { - def t1 = 't1_cte' - def t2 = 't2_cte' - def t3 = 't3_cte' - - sql "drop table if exists ${t1}" - sql """ - create table ${t1} ( - id int, - id1 int, - c1 bigint, - c2 string, - c3 double, - c4 date - ) unique key (id, id1) - distributed by hash(id, id1) - properties( - "replication_num"="1", - "enable_unique_key_merge_on_write" = "true" - ); - """ - - sql "drop table if exists ${t2}" - sql """ - create table ${t2} ( - id int, - c1 bigint, - c2 string, - c3 double, - c4 date - ) unique key (id) - distributed by hash(id) - properties( - "replication_num"="1" - ); - """ - - sql "drop table if exists ${t3}" - sql """ - create table ${t3} ( - id int - ) distributed by hash(id) - properties( - "replication_num"="1" - ); - """ - - sql """ - INSERT INTO ${t1} VALUES - (1, 10, 1, '1', 1.0, '2000-01-01'), - (2, 20, 2, '2', 2.0, '2000-01-02'), - (3, 30, 3, '3', 3.0, '2000-01-03'); - """ - - sql """ - - INSERT INTO ${t2} VALUES - (1, 10, '10', 10.0, '2000-01-10'), - (2, 20, '20', 20.0, '2000-01-20'), - (3, 30, '30', 30.0, '2000-01-30'), - (4, 4, '4', 4.0, '2000-01-04'), - (5, 5, '5', 5.0, '2000-01-05'); - """ - - sql """ - INSERT INTO ${t3} VALUES - (1), - (4), - (5); - """ - - sql "set enable_nereids_planner=true" - sql "set enable_fallback_to_original_planner=false" - sql "set enable_nereids_dml=true" - - sql "insert into ${t1}(id, c1, c2, c3) select id, c1 * 2, c2, c3 from ${t1}" - sql "insert into ${t2}(id, c1, c2, c3) select id, c1, c2 * 2, c3 from ${t2}" - sql "insert into ${t2}(c1, c3) select c1 + 1, c3 + 1 from (select id, c1, c3 from ${t1} order by id, c1 limit 10) ${t1}, ${t3}" - - qt_sql "select * from ${t1} order by id, id1" - - sql """ - with cte as (select * from ${t3}) - delete from ${t1} - using ${t2} join cte on ${t2}.id = cte.id - where ${t1}.id = ${t2}.id; - """ - - qt_sql "select * from ${t1} order by id, id1" + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def t1 = 't1_cte' + def t2 = 't2_cte' + def t3 = 't3_cte' + + sql "drop table if exists ${t1}" + sql """ + create table ${t1} ( + id int, + id1 int, + c1 bigint, + c2 string, + c3 double, + c4 date + ) unique key (id, id1) + distributed by hash(id, id1) + properties( + "replication_num"="1", + "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql "drop table if exists ${t2}" + sql """ + create table ${t2} ( + id int, + c1 bigint, + c2 string, + c3 double, + c4 date + ) unique key (id) + distributed by hash(id) + properties( + "replication_num"="1" + ); + """ + + sql "drop table if exists ${t3}" + sql """ + create table ${t3} ( + id int + ) distributed by hash(id) + properties( + "replication_num"="1" + ); + """ + + sql """ + INSERT INTO ${t1} VALUES + (1, 10, 1, '1', 1.0, '2000-01-01'), + (2, 20, 2, '2', 2.0, '2000-01-02'), + (3, 30, 3, '3', 3.0, '2000-01-03'); + """ + + sql """ + + INSERT INTO ${t2} VALUES + (1, 10, '10', 10.0, '2000-01-10'), + (2, 20, '20', 20.0, '2000-01-20'), + (3, 30, '30', 30.0, '2000-01-30'), + (4, 4, '4', 4.0, '2000-01-04'), + (5, 5, '5', 5.0, '2000-01-05'); + """ + + sql """ + INSERT INTO ${t3} VALUES + (1), + (4), + (5); + """ + + sql "set enable_nereids_planner=true" + sql "set enable_fallback_to_original_planner=false" + sql "set enable_nereids_dml=true" + + sql "insert into ${t1}(id, c1, c2, c3) select id, c1 * 2, c2, c3 from ${t1}" + sql "insert into ${t2}(id, c1, c2, c3) select id, c1, c2 * 2, c3 from ${t2}" + sql "insert into ${t2}(c1, c3) select c1 + 1, c3 + 1 from (select id, c1, c3 from ${t1} order by id, c1 limit 10) ${t1}, ${t3}" + + qt_sql "select * from ${t1} order by id, id1" + + sql """ + with cte as (select * from ${t3}) + delete from ${t1} + using ${t2} join cte on ${t2}.id = cte.id + where ${t1}.id = ${t2}.id; + """ + + qt_sql "select * from ${t1} order by id, id1" + } + } } \ No newline at end of file diff --git a/regression-test/suites/nereids_p0/delete/delete_mow_partial_update.groovy b/regression-test/suites/nereids_p0/delete/delete_mow_partial_update.groovy index f510724629da02..bfb27ce14ba7b3 100644 --- a/regression-test/suites/nereids_p0/delete/delete_mow_partial_update.groovy +++ b/regression-test/suites/nereids_p0/delete/delete_mow_partial_update.groovy @@ -16,97 +16,109 @@ // under the License. suite('nereids_delete_mow_partial_update') { - sql 'set enable_nereids_planner=true' - sql 'set enable_fallback_to_original_planner=false' - sql "set experimental_enable_nereids_planner=true;" - sql 'set enable_nereids_dml=true' - - sql "sync" - def tableName1 = "nereids_delete_mow_partial_update1" - sql "DROP TABLE IF EXISTS ${tableName1};" + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database - sql """ CREATE TABLE IF NOT EXISTS ${tableName1} ( - `uid` BIGINT NULL, - `v1` BIGINT NULL - )UNIQUE KEY(uid) - DISTRIBUTED BY HASH(uid) BUCKETS 3 - PROPERTIES ( - "enable_unique_key_merge_on_write" = "true", - "disable_auto_compaction" = "true", - "replication_num" = "1" - );""" - def tableName2 = "nereids_delete_mow_partial_update2" - sql "DROP TABLE IF EXISTS ${tableName2};" + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") - sql """ CREATE TABLE IF NOT EXISTS ${tableName2} ( - `uid` BIGINT NULL - ) UNIQUE KEY(uid) - DISTRIBUTED BY HASH(uid) BUCKETS 3 - PROPERTIES ( - "enable_unique_key_merge_on_write" = "true", - "disable_auto_compaction" = "true", - "replication_num" = "1" - );""" + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" - sql "insert into ${tableName1} values(1, 1), (2, 2), (3, 3), (4, 4), (5, 5);" - qt_sql "select * from ${tableName1} order by uid;" - sql "insert into ${tableName2} values(1), (2), (3);" - sql "delete from ${tableName1} A using ${tableName2} B where A.uid=B.uid;" - qt_sql "select * from ${tableName1} order by uid;" - // when using parital update insert stmt for delete stmt, it will use delete bitmap or delete sign rather than - // delete predicate to "delete" the rows - sql "set skip_delete_predicate=true;" - sql "sync" - qt_sql_skip_delete_predicate "select * from ${tableName1} order by uid;" + sql 'set enable_nereids_planner=true' + sql 'set enable_fallback_to_original_planner=false' + sql "set experimental_enable_nereids_planner=true;" + sql 'set enable_nereids_dml=true' + + sql "sync" - sql "set skip_delete_sign=true;" - sql "set skip_storage_engine_merge=true;" - sql "set skip_delete_bitmap=true;" - sql "sync" - qt_sql "select uid, v1, __DORIS_DELETE_SIGN__ from ${tableName1} order by uid, v1, __DORIS_DELETE_SIGN__;" - sql "drop table if exists ${tableName1};" - sql "drop table if exists ${tableName2};" + def tableName1 = "nereids_delete_mow_partial_update1" + sql "DROP TABLE IF EXISTS ${tableName1};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName1} ( + `uid` BIGINT NULL, + `v1` BIGINT NULL + )UNIQUE KEY(uid) + DISTRIBUTED BY HASH(uid) BUCKETS 3 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "disable_auto_compaction" = "true", + "replication_num" = "1", + "store_row_column" = "${use_row_store}"); """ - sql "set skip_delete_sign=false;" - sql "set skip_storage_engine_merge=false;" - sql "set skip_delete_bitmap=false;" - sql "sync" - def tableName3 = "test_partial_update_delete3" - sql "DROP TABLE IF EXISTS ${tableName3};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName3} ( - `k1` int NOT NULL, - `c1` int, - `c2` int, - `c3` int NOT NULL, - `c4` int - )UNIQUE KEY(k1) - DISTRIBUTED BY HASH(k1) BUCKETS 1 - PROPERTIES ( - "enable_unique_key_merge_on_write" = "true", - "disable_auto_compaction" = "true", - "replication_num" = "1" - );""" - sql "insert into ${tableName3} values(1, 1, 1, 1, 1), (2, 2, 2, 2, 2), (3, 3, 3, 3, 3), (4, 4, 4, 4, 4), (5, 5, 5, 5, 5);" - qt_sql "select k1, c1, c2, c3, c4 from ${tableName3} order by k1, c1, c2, c3, c4;" - streamLoad { - table "${tableName3}" + def tableName2 = "nereids_delete_mow_partial_update2" + sql "DROP TABLE IF EXISTS ${tableName2};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName2} ( + `uid` BIGINT NULL + ) UNIQUE KEY(uid) + DISTRIBUTED BY HASH(uid) BUCKETS 3 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "disable_auto_compaction" = "true", + "replication_num" = "1", + "store_row_column" = "${use_row_store}"); """ - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'k1' - set 'partial_columns', 'true' - set 'merge_type', 'DELETE' + sql "insert into ${tableName1} values(1, 1), (2, 2), (3, 3), (4, 4), (5, 5);" + qt_sql "select * from ${tableName1} order by uid;" + sql "insert into ${tableName2} values(1), (2), (3);" + sql "delete from ${tableName1} A using ${tableName2} B where A.uid=B.uid;" + qt_sql "select * from ${tableName1} order by uid;" + // when using parital update insert stmt for delete stmt, it will use delete bitmap or delete sign rather than + // delete predicate to "delete" the rows + sql "set skip_delete_predicate=true;" + sql "sync" + qt_sql_skip_delete_predicate "select * from ${tableName1} order by uid;" - file 'partial_update_delete.csv' - time 10000 + sql "set skip_delete_sign=true;" + sql "set skip_storage_engine_merge=true;" + sql "set skip_delete_bitmap=true;" + sql "sync" + qt_sql "select uid, v1, __DORIS_DELETE_SIGN__ from ${tableName1} order by uid, v1, __DORIS_DELETE_SIGN__;" + sql "drop table if exists ${tableName1};" + sql "drop table if exists ${tableName2};" + + sql "set skip_delete_sign=false;" + sql "set skip_storage_engine_merge=false;" + sql "set skip_delete_bitmap=false;" + sql "sync" + def tableName3 = "test_partial_update_delete3" + sql "DROP TABLE IF EXISTS ${tableName3};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName3} ( + `k1` int NOT NULL, + `c1` int, + `c2` int, + `c3` int NOT NULL, + `c4` int + )UNIQUE KEY(k1) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "disable_auto_compaction" = "true", + "replication_num" = "1", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName3} values(1, 1, 1, 1, 1), (2, 2, 2, 2, 2), (3, 3, 3, 3, 3), (4, 4, 4, 4, 4), (5, 5, 5, 5, 5);" + qt_sql "select k1, c1, c2, c3, c4 from ${tableName3} order by k1, c1, c2, c3, c4;" + streamLoad { + table "${tableName3}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k1' + set 'partial_columns', 'true' + set 'merge_type', 'DELETE' + + file 'partial_update_delete.csv' + time 10000 + } + sql "sync" + qt_sql "select k1, c1, c2, c3, c4 from ${tableName3} order by k1, c1, c2, c3, c4;" + sql "set skip_delete_sign=true;" + sql "set skip_storage_engine_merge=true;" + sql "set skip_delete_bitmap=true;" + sql "sync" + qt_sql "select k1, c1, c2, c3, c4, __DORIS_DELETE_SIGN__ from ${tableName3} order by k1, c1, c2, c3, c4, __DORIS_DELETE_SIGN__;" + sql "drop table if exists ${tableName3};" + } } - sql "sync" - qt_sql "select k1, c1, c2, c3, c4 from ${tableName3} order by k1, c1, c2, c3, c4;" - sql "set skip_delete_sign=true;" - sql "set skip_storage_engine_merge=true;" - sql "set skip_delete_bitmap=true;" - sql "sync" - qt_sql "select k1, c1, c2, c3, c4, __DORIS_DELETE_SIGN__ from ${tableName3} order by k1, c1, c2, c3, c4, __DORIS_DELETE_SIGN__;" - sql "drop table if exists ${tableName3};" } diff --git a/regression-test/suites/nereids_p0/delete/delete_using.groovy b/regression-test/suites/nereids_p0/delete/delete_using.groovy index bc5a29430c5b3e..a4f9639c57fd3c 100644 --- a/regression-test/suites/nereids_p0/delete/delete_using.groovy +++ b/regression-test/suites/nereids_p0/delete/delete_using.groovy @@ -16,87 +16,98 @@ // under the License. suite('nereids_delete_using') { - sql 'drop table if exists t1' - sql ''' - create table t1 ( - id int, - id1 int, - c1 bigint, - c2 string, - c3 double, - c4 date - ) unique key (id, id1) - distributed by hash(id, id1) - properties( - 'replication_num'='1', - "enable_unique_key_merge_on_write" = "true" - ); - ''' - sql 'drop table if exists t2' - sql ''' - create table t2 ( - id int, - c1 bigint, - c2 string, - c3 double, - c4 date - ) unique key (id) - distributed by hash(id) - properties( - 'replication_num'='1' - ); - ''' + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database - sql 'drop table if exists t3' - sql ''' - create table t3 ( - id int - ) distributed by hash(id) - properties( - 'replication_num'='1' - ); - ''' + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") - sql ''' - INSERT INTO t1 VALUES - (1, 10, 1, '1', 1.0, '2000-01-01'), - (2, 20, 2, '2', 2.0, '2000-01-02'), - (3, 30, 3, '3', 3.0, '2000-01-03'); - ''' + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" - sql ''' + sql 'drop table if exists t1' + sql """ + create table t1 ( + id int, + id1 int, + c1 bigint, + c2 string, + c3 double, + c4 date + ) unique key (id, id1) + distributed by hash(id, id1) + properties( + 'replication_num'='1', + "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ - INSERT INTO t2 VALUES - (1, 10, '10', 10.0, '2000-01-10'), - (2, 20, '20', 20.0, '2000-01-20'), - (3, 30, '30', 30.0, '2000-01-30'), - (4, 4, '4', 4.0, '2000-01-04'), - (5, 5, '5', 5.0, '2000-01-05'); - ''' + sql 'drop table if exists t2' + sql ''' + create table t2 ( + id int, + c1 bigint, + c2 string, + c3 double, + c4 date + ) unique key (id) + distributed by hash(id) + properties( + 'replication_num'='1' + ); + ''' - sql ''' - INSERT INTO t3 VALUES - (1), - (4), - (5); - ''' - - sql 'set enable_nereids_planner=true' - sql 'set enable_fallback_to_original_planner=false' - sql 'set enable_nereids_dml=true' + sql 'drop table if exists t3' + sql ''' + create table t3 ( + id int + ) distributed by hash(id) + properties( + 'replication_num'='1' + ); + ''' - sql 'insert into t1(id, c1, c2, c3) select id, c1 * 2, c2, c3 from t1' - sql 'insert into t2(id, c1, c2, c3) select id, c1, c2 * 2, c3 from t2' - sql 'insert into t2(c1, c3) select c1 + 1, c3 + 1 from (select id, c1, c3 from t1 order by id, c1 limit 10) t1, t3' + sql ''' + INSERT INTO t1 VALUES + (1, 10, 1, '1', 1.0, '2000-01-01'), + (2, 20, 2, '2', 2.0, '2000-01-02'), + (3, 30, 3, '3', 3.0, '2000-01-03'); + ''' - qt_sql 'select * from t1 order by id, id1' + sql ''' - sql ''' - delete from t1 - using t2 join t3 on t2.id = t3.id - where t1.id = t2.id; - ''' + INSERT INTO t2 VALUES + (1, 10, '10', 10.0, '2000-01-10'), + (2, 20, '20', 20.0, '2000-01-20'), + (3, 30, '30', 30.0, '2000-01-30'), + (4, 4, '4', 4.0, '2000-01-04'), + (5, 5, '5', 5.0, '2000-01-05'); + ''' - qt_sql 'select * from t1 order by id, id1' + sql ''' + INSERT INTO t3 VALUES + (1), + (4), + (5); + ''' + + sql 'set enable_nereids_planner=true' + sql 'set enable_fallback_to_original_planner=false' + sql 'set enable_nereids_dml=true' + + sql 'insert into t1(id, c1, c2, c3) select id, c1 * 2, c2, c3 from t1' + sql 'insert into t2(id, c1, c2, c3) select id, c1, c2 * 2, c3 from t2' + sql 'insert into t2(c1, c3) select c1 + 1, c3 + 1 from (select id, c1, c3 from t1 order by id, c1 limit 10) t1, t3' + + qt_sql 'select * from t1 order by id, id1' + + sql ''' + delete from t1 + using t2 join t3 on t2.id = t3.id + where t1.id = t2.id; + ''' + + qt_sql 'select * from t1 order by id, id1' + } + } } diff --git a/regression-test/suites/nereids_p0/insert_into_table/partial_update.groovy b/regression-test/suites/nereids_p0/insert_into_table/partial_update.groovy index 699c7c600c553e..5fcf4e63deeb5c 100644 --- a/regression-test/suites/nereids_p0/insert_into_table/partial_update.groovy +++ b/regression-test/suites/nereids_p0/insert_into_table/partial_update.groovy @@ -17,216 +17,235 @@ // under the License. suite("nereids_partial_update_native_insert_stmt", "p0") { - sql "set enable_nereids_dml=true;" - sql "set experimental_enable_nereids_planner=true;" - sql "set enable_fallback_to_original_planner=false;" - sql "sync;" - - def tableName = "nereids_partial_update_native_insert_stmt" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL DEFAULT "yixiu" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - sql """insert into ${tableName} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1)""" - qt_1 """ select * from ${tableName} order by id; """ - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - // partial update using insert stmt in non-strict mode, - // existing rows should be updated and new rows should be inserted with unmentioned columns filled with default or null value - sql """insert into ${tableName}(id,score) values(2,400),(1,200),(4,400)""" - qt_1 """ select * from ${tableName} order by id; """ - test { - sql """insert into ${tableName} values(2,400),(1,200),(4,400)""" - exception "You must explicitly specify the columns to be updated when updating partial columns using the INSERT statement." - } - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql """ DROP TABLE IF EXISTS ${tableName} """ - - - def tableName2 = "nereids_partial_update_native_insert_stmt2" - sql """ DROP TABLE IF EXISTS ${tableName2} """ - sql """ - CREATE TABLE ${tableName2} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321", - `update_time` date NULL) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "update_time" - )""" - sql """ insert into ${tableName2} values - (2, "doris2", 2000, 223, 1, '2023-01-01'), - (1, "doris", 1000, 123, 1, '2023-01-01');""" - qt_2 "select * from ${tableName2} order by id;" - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - // partial update with seq col - sql """ insert into ${tableName2}(id,score,update_time) values - (2,2500,"2023-07-19"), - (2,2600,"2023-07-20"), - (1,1300,"2022-07-19"), - (3,1500,"2022-07-20"), - (3,2500,"2022-07-18"); """ - qt_2 "select * from ${tableName2} order by id;" - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql """ DROP TABLE IF EXISTS ${tableName2}; """ - - - def tableName3 = "nereids_partial_update_native_insert_stmt3" - sql """ DROP TABLE IF EXISTS ${tableName3}; """ - sql """ - CREATE TABLE ${tableName3} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - sql """insert into ${tableName3} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1);""" - qt_3 """ select * from ${tableName3} order by id; """ - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - // in partial update, the unmentioned columns should have default values or be nullable - // but field `name` is not nullable and doesn't have default value - test { - sql """insert into ${tableName3}(id,score) values(2,400),(1,200),(4,400)""" - exception "INTERNAL_ERROR" - } - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - qt_3 """ select * from ${tableName3} order by id; """ - sql """ DROP TABLE IF EXISTS ${tableName3} """ - - - def tableName4 = "nereids_partial_update_native_insert_stmt4" - sql """ DROP TABLE IF EXISTS ${tableName4} """ - sql """ - CREATE TABLE ${tableName4} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL DEFAULT "yixiu" COMMENT "用户姓名", - `score` int(11) NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - sql """insert into ${tableName4} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1),(3,"doris3",5000,34,345);""" - qt_4 """ select * from ${tableName4} order by id; """ - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - // partial update with delete sign - sql "insert into ${tableName4}(id,__DORIS_DELETE_SIGN__) values(2,1);" - qt_4 """ select * from ${tableName4} order by id; """ - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql """ DROP TABLE IF EXISTS ${tableName4} """ - - - def tableName5 = "nereids_partial_update_native_insert_stmt5" - sql """ DROP TABLE IF EXISTS ${tableName5} """ - sql """ - CREATE TABLE ${tableName5} ( - `id` int(11) NULL, - `name` varchar(10) NULL, - `age` int(11) NULL DEFAULT "20", - `city` varchar(10) NOT NULL DEFAULT "beijing", - `balance` decimalv3(9, 0) NULL, - `last_access_time` datetime NULL - ) ENGINE = OLAP UNIQUE KEY(`id`) - COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) - BUCKETS AUTO PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "storage_format" = "V2", - "enable_unique_key_merge_on_write" = "true", - "light_schema_change" = "true", - "disable_auto_compaction" = "false", - "enable_single_replica_compaction" = "false" - ); - """ - sql """insert into ${tableName5} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" - qt_5 """select * from ${tableName5} order by id;""" - sql "set enable_insert_strict = true;" - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - // partial update using insert stmt in strict mode, the max_filter_ratio is always 0 - test { - sql """ insert into ${tableName5}(id,balance,last_access_time) values(1,500,"2023-07-03 12:00:01"),(3,23,"2023-07-03 12:00:02"),(18,9999999,"2023-07-03 12:00:03"); """ - exception "Insert has filtered data in strict mode" - } - qt_5 """select * from ${tableName5} order by id;""" - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """ DROP TABLE IF EXISTS ${tableName5}; """ - - def tableName6 = "nereids_partial_update_native_insert_stmt6" - sql """ DROP TABLE IF EXISTS ${tableName6} """ - sql """create table ${tableName6} ( - k int null, - v int null, - v2 int null, - v3 int null - ) unique key (k) distributed by hash(k) buckets 1 - properties("replication_num" = "1", - "enable_unique_key_merge_on_write"="true", - "disable_auto_compaction"="true"); """ - sql "insert into ${tableName6} values(1,1,3,4),(2,2,4,5),(3,3,2,3),(4,4,1,2);" - qt_6 "select * from ${tableName6} order by k;" - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - sql "insert into ${tableName6}(k,v) select v2,v3 from ${tableName6};" - qt_6 "select * from ${tableName6} order by k;" - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """ DROP TABLE IF EXISTS ${tableName6}; """ - - def tableName7 = "nereids_partial_update_native_insert_stmt7" - sql """ DROP TABLE IF EXISTS ${tableName7} """ - sql """create table ${tableName7} ( - k1 int null, - k2 int null, - k3 int null, - v1 int null, - v2 int null - ) unique key (k1,k2,k3) distributed by hash(k1,k2) buckets 4 - properties("replication_num" = "1", - "enable_unique_key_merge_on_write"="true", - "disable_auto_compaction"="true"); """ - sql "insert into ${tableName7} values(1,1,1,3,4),(2,2,2,4,5),(3,3,3,2,3),(4,4,4,1,2);" - qt_7 "select * from ${tableName7} order by k1;" - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - test { - sql "insert into ${tableName7}(k1,k2,v2) select k2,k3,v1 from ${tableName7};" - exception "Partial update should include all key columns, missing: k3" + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + sql "set enable_nereids_dml=true;" + sql "set experimental_enable_nereids_planner=true;" + sql "set enable_fallback_to_original_planner=false;" + sql "sync;" + + def tableName = "nereids_partial_update_native_insert_stmt" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL DEFAULT "yixiu" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql """insert into ${tableName} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1)""" + qt_1 """ select * from ${tableName} order by id; """ + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + // partial update using insert stmt in non-strict mode, + // existing rows should be updated and new rows should be inserted with unmentioned columns filled with default or null value + sql """insert into ${tableName}(id,score) values(2,400),(1,200),(4,400)""" + qt_1 """ select * from ${tableName} order by id; """ + test { + sql """insert into ${tableName} values(2,400),(1,200),(4,400)""" + exception "You must explicitly specify the columns to be updated when updating partial columns using the INSERT statement." + } + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql """ DROP TABLE IF EXISTS ${tableName} """ + + + def tableName2 = "nereids_partial_update_native_insert_stmt2" + sql """ DROP TABLE IF EXISTS ${tableName2} """ + sql """ + CREATE TABLE ${tableName2} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321", + `update_time` date NULL) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "update_time", + "store_row_column" = "${use_row_store}"); """ + + sql """ insert into ${tableName2} values + (2, "doris2", 2000, 223, 1, '2023-01-01'), + (1, "doris", 1000, 123, 1, '2023-01-01');""" + qt_2 "select * from ${tableName2} order by id;" + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + // partial update with seq col + sql """ insert into ${tableName2}(id,score,update_time) values + (2,2500,"2023-07-19"), + (2,2600,"2023-07-20"), + (1,1300,"2022-07-19"), + (3,1500,"2022-07-20"), + (3,2500,"2022-07-18"); """ + qt_2 "select * from ${tableName2} order by id;" + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql """ DROP TABLE IF EXISTS ${tableName2}; """ + + + def tableName3 = "nereids_partial_update_native_insert_stmt3" + sql """ DROP TABLE IF EXISTS ${tableName3}; """ + sql """ + CREATE TABLE ${tableName3} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql """insert into ${tableName3} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1);""" + qt_3 """ select * from ${tableName3} order by id; """ + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + // in partial update, the unmentioned columns should have default values or be nullable + // but field `name` is not nullable and doesn't have default value + test { + sql """insert into ${tableName3}(id,score) values(2,400),(1,200),(4,400)""" + exception "INTERNAL_ERROR" + } + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + qt_3 """ select * from ${tableName3} order by id; """ + sql """ DROP TABLE IF EXISTS ${tableName3} """ + + + def tableName4 = "nereids_partial_update_native_insert_stmt4" + sql """ DROP TABLE IF EXISTS ${tableName4} """ + sql """ + CREATE TABLE ${tableName4} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL DEFAULT "yixiu" COMMENT "用户姓名", + `score` int(11) NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql """insert into ${tableName4} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1),(3,"doris3",5000,34,345);""" + qt_4 """ select * from ${tableName4} order by id; """ + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + // partial update with delete sign + sql "insert into ${tableName4}(id,__DORIS_DELETE_SIGN__) values(2,1);" + qt_4 """ select * from ${tableName4} order by id; """ + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql """ DROP TABLE IF EXISTS ${tableName4} """ + + + def tableName5 = "nereids_partial_update_native_insert_stmt5" + sql """ DROP TABLE IF EXISTS ${tableName5} """ + sql """ + CREATE TABLE ${tableName5} ( + `id` int(11) NULL, + `name` varchar(10) NULL, + `age` int(11) NULL DEFAULT "20", + `city` varchar(10) NOT NULL DEFAULT "beijing", + `balance` decimalv3(9, 0) NULL, + `last_access_time` datetime NULL + ) ENGINE = OLAP UNIQUE KEY(`id`) + COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) + BUCKETS AUTO PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "storage_format" = "V2", + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "disable_auto_compaction" = "false", + "enable_single_replica_compaction" = "false", + "store_row_column" = "${use_row_store}"); """ + + sql """insert into ${tableName5} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" + qt_5 """select * from ${tableName5} order by id;""" + sql "set enable_insert_strict = true;" + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + // partial update using insert stmt in strict mode, the max_filter_ratio is always 0 + test { + sql """ insert into ${tableName5}(id,balance,last_access_time) values(1,500,"2023-07-03 12:00:01"),(3,23,"2023-07-03 12:00:02"),(18,9999999,"2023-07-03 12:00:03"); """ + exception "Insert has filtered data in strict mode" + } + qt_5 """select * from ${tableName5} order by id;""" + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict = false;" + sql "sync;" + sql """ DROP TABLE IF EXISTS ${tableName5}; """ + + def tableName6 = "nereids_partial_update_native_insert_stmt6" + sql """ DROP TABLE IF EXISTS ${tableName6} """ + sql """create table ${tableName6} ( + k int null, + v int null, + v2 int null, + v3 int null + ) unique key (k) distributed by hash(k) buckets 1 + properties("replication_num" = "1", + "enable_unique_key_merge_on_write"="true", + "disable_auto_compaction"="true", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName6} values(1,1,3,4),(2,2,4,5),(3,3,2,3),(4,4,1,2);" + qt_6 "select * from ${tableName6} order by k;" + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + sql "insert into ${tableName6}(k,v) select v2,v3 from ${tableName6};" + qt_6 "select * from ${tableName6} order by k;" + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict = false;" + sql "sync;" + sql """ DROP TABLE IF EXISTS ${tableName6}; """ + + def tableName7 = "nereids_partial_update_native_insert_stmt7" + sql """ DROP TABLE IF EXISTS ${tableName7} """ + sql """create table ${tableName7} ( + k1 int null, + k2 int null, + k3 int null, + v1 int null, + v2 int null + ) unique key (k1,k2,k3) distributed by hash(k1,k2) buckets 4 + properties("replication_num" = "1", + "enable_unique_key_merge_on_write"="true", + "disable_auto_compaction"="true", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName7} values(1,1,1,3,4),(2,2,2,4,5),(3,3,3,2,3),(4,4,4,1,2);" + qt_7 "select * from ${tableName7} order by k1;" + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + test { + sql "insert into ${tableName7}(k1,k2,v2) select k2,k3,v1 from ${tableName7};" + exception "Partial update should include all key columns, missing: k3" + } + qt_7 "select * from ${tableName7} order by k1;" + sql """ DROP TABLE IF EXISTS ${tableName7}; """ + + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict = false;" + sql "set enable_fallback_to_original_planner=true;" + sql "sync;" + } } - qt_7 "select * from ${tableName7} order by k1;" - sql """ DROP TABLE IF EXISTS ${tableName7}; """ - - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict = true;" - sql "set enable_fallback_to_original_planner=true;" - sql "set enable_nereids_dml=false;" - sql "sync;" } diff --git a/regression-test/suites/nereids_p0/insert_into_table/partial_update_complex.groovy b/regression-test/suites/nereids_p0/insert_into_table/partial_update_complex.groovy index 66e237ff89c6e7..f9857e259d14f3 100644 --- a/regression-test/suites/nereids_p0/insert_into_table/partial_update_complex.groovy +++ b/regression-test/suites/nereids_p0/insert_into_table/partial_update_complex.groovy @@ -16,108 +16,119 @@ // under the License. suite("nereids_partial_update_native_insert_stmt_complex", "p0") { - sql "set enable_nereids_dml=true;" - sql "set experimental_enable_nereids_planner=true;" - sql "set enable_fallback_to_original_planner=false;" - sql "sync;" - - // test complex partial update - def tbName1 = "nereids_partial_update_native_insert_stmt_complex1" - def tbName2 = "nereids_partial_update_native_insert_stmt_complex2" - def tbName3 = "nereids_partial_update_native_insert_stmt_complex3" - - sql "DROP TABLE IF EXISTS ${tbName1}" - sql "DROP TABLE IF EXISTS ${tbName2}" - sql "DROP TABLE IF EXISTS ${tbName3}" - - sql """create table ${tbName1} ( - id int, - c1 bigint, - c2 string, - c3 double, - c4 date) unique key (id) distributed by hash(id) - properties('replication_num'='1', 'enable_unique_key_merge_on_write' = 'true');""" - - sql """create table ${tbName2} ( - id int, - c1 bigint, - c2 string, - c3 double, - c4 date) unique key (id) distributed by hash(id) - properties('replication_num'='1', 'enable_unique_key_merge_on_write' = 'true');""" - - sql """create table ${tbName3} (id int) distributed by hash (id) properties('replication_num'='1');""" - - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql """insert into ${tbName1} values - (1, 1, '1', 1.0, '2000-01-01'), - (2, 2, '2', 2.0, '2000-01-02'), - (3, 3, '3', 3.0, '2000-01-03');""" - sql """insert into ${tbName2} values - (1, 10, '10', 10.0, '2000-01-10'), - (2, 20, '20', 20.0, '2000-01-20'), - (3, 30, '30', 30.0, '2000-01-30'), - (4, 4, '4', 4.0, '2000-01-04'), - (5, 5, '5', 5.0, '2000-01-05');""" - sql """insert into ${tbName3} values(1), (3), (5);""" - - qt_tbl1 "select * from ${tbName1} order by id;" - qt_tbl2 "select * from ${tbName2} order by id;" - qt_tbl3 "select * from ${tbName3} order by id;" - - qt_select_result """select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 - from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id order by ${tbName2}.id;""" - - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """insert into ${tbName1}(id, c1, c3) - select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 - from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id; """ - - qt_complex_update """select * from ${tbName1} order by id;""" - test { - sql """insert into ${tbName1} - select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 - from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id; """ - exception "You must explicitly specify the columns to be updated when updating partial columns using the INSERT statement." + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + sql "set enable_nereids_dml=true;" + sql "set experimental_enable_nereids_planner=true;" + sql "set enable_fallback_to_original_planner=false;" + sql "sync;" + + // test complex partial update + def tbName1 = "nereids_partial_update_native_insert_stmt_complex1" + def tbName2 = "nereids_partial_update_native_insert_stmt_complex2" + def tbName3 = "nereids_partial_update_native_insert_stmt_complex3" + + sql "DROP TABLE IF EXISTS ${tbName1}" + sql "DROP TABLE IF EXISTS ${tbName2}" + sql "DROP TABLE IF EXISTS ${tbName3}" + + sql """create table ${tbName1} ( + id int, + c1 bigint, + c2 string, + c3 double, + c4 date) unique key (id) distributed by hash(id) + properties('replication_num'='1', 'enable_unique_key_merge_on_write' = 'true',"store_row_column" = "${use_row_store}"); """ + + sql """create table ${tbName2} ( + id int, + c1 bigint, + c2 string, + c3 double, + c4 date) unique key (id) distributed by hash(id) + properties('replication_num'='1', 'enable_unique_key_merge_on_write' = 'true',"store_row_column" = "${use_row_store}"); """ + + sql """create table ${tbName3} (id int) distributed by hash (id) properties('replication_num'='1');""" + + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql """insert into ${tbName1} values + (1, 1, '1', 1.0, '2000-01-01'), + (2, 2, '2', 2.0, '2000-01-02'), + (3, 3, '3', 3.0, '2000-01-03');""" + sql """insert into ${tbName2} values + (1, 10, '10', 10.0, '2000-01-10'), + (2, 20, '20', 20.0, '2000-01-20'), + (3, 30, '30', 30.0, '2000-01-30'), + (4, 4, '4', 4.0, '2000-01-04'), + (5, 5, '5', 5.0, '2000-01-05');""" + sql """insert into ${tbName3} values(1), (3), (5);""" + + qt_tbl1 "select * from ${tbName1} order by id;" + qt_tbl2 "select * from ${tbName2} order by id;" + qt_tbl3 "select * from ${tbName3} order by id;" + + qt_select_result """select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 + from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id order by ${tbName2}.id;""" + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + sql """insert into ${tbName1}(id, c1, c3) + select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 + from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id; """ + + qt_complex_update """select * from ${tbName1} order by id;""" + test { + sql """insert into ${tbName1} + select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 + from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id; """ + exception "You must explicitly specify the columns to be updated when updating partial columns using the INSERT statement." + } + sql "truncate table ${tbName1};" + sql "truncate table ${tbName2};" + sql "truncate table ${tbName3};" + + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql """insert into ${tbName1} values + (1, 1, '1', 1.0, '2000-01-01'), + (2, 2, '2', 2.0, '2000-01-02'), + (3, 3, '3', 3.0, '2000-01-03');""" + sql """insert into ${tbName2} values + (1, 10, '10', 10.0, '2000-01-10'), + (2, 20, '20', 20.0, '2000-01-20'), + (3, 30, '30', 30.0, '2000-01-30'), + (4, 4, '4', 4.0, '2000-01-04'), + (5, 5, '5', 5.0, '2000-01-05');""" + sql """insert into ${tbName3} values(1), (3), (5);""" + + qt_select_result "select ${tbName2}.id,1 from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id order by ${tbName2}.id;" + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + sql """ insert into ${tbName1}(id, __DORIS_DELETE_SIGN__) + select ${tbName2}.id,1 from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id;""" + + qt_complex_delete """select * from ${tbName1} order by id;""" + + sql "DROP TABLE IF EXISTS ${tbName1}" + sql "DROP TABLE IF EXISTS ${tbName2}" + sql "DROP TABLE IF EXISTS ${tbName3}" + + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict = false;" + sql "set enable_fallback_to_original_planner=true;" + sql "sync;" + } } - sql "truncate table ${tbName1};" - sql "truncate table ${tbName2};" - sql "truncate table ${tbName3};" - - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql """insert into ${tbName1} values - (1, 1, '1', 1.0, '2000-01-01'), - (2, 2, '2', 2.0, '2000-01-02'), - (3, 3, '3', 3.0, '2000-01-03');""" - sql """insert into ${tbName2} values - (1, 10, '10', 10.0, '2000-01-10'), - (2, 20, '20', 20.0, '2000-01-20'), - (3, 30, '30', 30.0, '2000-01-30'), - (4, 4, '4', 4.0, '2000-01-04'), - (5, 5, '5', 5.0, '2000-01-05');""" - sql """insert into ${tbName3} values(1), (3), (5);""" - - qt_select_result "select ${tbName2}.id,1 from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id order by ${tbName2}.id;" - - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """ insert into ${tbName1}(id, __DORIS_DELETE_SIGN__) - select ${tbName2}.id,1 from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id;""" - - qt_complex_delete """select * from ${tbName1} order by id;""" - - sql "DROP TABLE IF EXISTS ${tbName1}" - sql "DROP TABLE IF EXISTS ${tbName2}" - sql "DROP TABLE IF EXISTS ${tbName3}" - - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict = true;" - sql "set enable_fallback_to_original_planner=true;" - sql "set enable_nereids_dml=false;" - sql "sync;" } diff --git a/regression-test/suites/nereids_p0/insert_into_table/partial_update_seq_col.groovy b/regression-test/suites/nereids_p0/insert_into_table/partial_update_seq_col.groovy index 7d4190852f6ee0..622350955e9ef0 100644 --- a/regression-test/suites/nereids_p0/insert_into_table/partial_update_seq_col.groovy +++ b/regression-test/suites/nereids_p0/insert_into_table/partial_update_seq_col.groovy @@ -17,99 +17,111 @@ // under the License. suite("nereids_partial_update_native_insert_seq_col", "p0") { - sql "set enable_nereids_dml=true;" - sql "set experimental_enable_nereids_planner=true;" - sql "set enable_fallback_to_original_planner=false;" - sql "sync;" - - def tableName = "nereids_partial_update_native_insert_seq_col" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321", - `update_time` date NULL) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "update_time" - ) - """ - sql """ insert into ${tableName} values - (2, "doris2", 2000, 223, 1, '2023-01-01'), - (1, "doris", 1000, 123, 1, '2023-01-01');""" - sql "sync" - - qt_select_default """ select * from ${tableName} order by id;""" - - // set enable_unique_key_partial_update=false, it's a row update - // the input data don't contains sequence mapping column and the sequence mapping - // column's default value is not CURRENT_TIMESTAMP, will load fail - test { - sql "insert into ${tableName}(id,score) values(2,400),(1,200);" - exception "Table nereids_partial_update_native_insert_seq_col has sequence column, need to specify the sequence column" - } - // set enable_unique_key_partial_update=true, should success - // we don't provide the sequence column in input data, so the updated rows - // should use there original sequence column values. - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - sql "insert into ${tableName}(id,score) values(2,400),(1,200);" - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - - qt_partial_update_without_seq """ select * from ${tableName} order by id;""" - - // provide the sequence column this time, should update according to the - // given sequence values - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """insert into ${tableName}(id,score,update_time) values - (2,2500,"2023-07-19"), - (2,2600,"2023-07-20"), - (1,1300,"2022-07-19"), - (3,1500,"2022-07-20"), - (3,2500,"2022-07-18");""" - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - - qt_partial_update_with_seq """ select * from ${tableName} order by id;""" - - sql "SET show_hidden_columns=true" - sql "sync" - - qt_partial_update_with_seq_hidden_columns """select * from ${tableName} order by id;""" - - sql """ DROP TABLE IF EXISTS ${tableName} """ - - - def tableName2 = "nereids_partial_update_native_insert_seq_col2" - sql """ DROP TABLE IF EXISTS ${tableName2} """ - sql """ - CREATE TABLE ${tableName2} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `score` int(11) NOT NULL COMMENT "用户得分", - `update_time` DATETIMEV2 NULL DEFAULT CURRENT_TIMESTAMP) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "update_time" - )""" - - // don't set enable_unique_key_partial_update, it's a row update - // the input data don't contains sequence mapping column but the sequence mapping - // column's default value is CURRENT_TIMESTAMP, will load successfully - sql "SET show_hidden_columns=false" - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql "insert into ${tableName2}(id,score) values(2,400),(1,200);" - qt_sql """ select id,score from ${tableName2} order by id;""" - sql """ DROP TABLE IF EXISTS ${tableName2}; """ + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + sql "set enable_nereids_dml=true;" + sql "set experimental_enable_nereids_planner=true;" + sql "set enable_fallback_to_original_planner=false;" + sql "sync;" + + def tableName = "nereids_partial_update_native_insert_seq_col" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321", + `update_time` date NULL) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "update_time", + "store_row_column" = "${use_row_store}"); """ + + sql """ insert into ${tableName} values + (2, "doris2", 2000, 223, 1, '2023-01-01'), + (1, "doris", 1000, 123, 1, '2023-01-01');""" + sql "sync" + + qt_select_default """ select * from ${tableName} order by id;""" + + // set enable_unique_key_partial_update=false, it's a row update + // the input data don't contains sequence mapping column and the sequence mapping + // column's default value is not CURRENT_TIMESTAMP, will load fail + test { + sql "insert into ${tableName}(id,score) values(2,400),(1,200);" + exception "Table nereids_partial_update_native_insert_seq_col has sequence column, need to specify the sequence column" + } + + // set enable_unique_key_partial_update=true, should success + // we don't provide the sequence column in input data, so the updated rows + // should use there original sequence column values. + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + sql "insert into ${tableName}(id,score) values(2,400),(1,200);" + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + + qt_partial_update_without_seq """ select * from ${tableName} order by id;""" + + // provide the sequence column this time, should update according to the + // given sequence values + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + sql """insert into ${tableName}(id,score,update_time) values + (2,2500,"2023-07-19"), + (2,2600,"2023-07-20"), + (1,1300,"2022-07-19"), + (3,1500,"2022-07-20"), + (3,2500,"2022-07-18");""" + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + + qt_partial_update_with_seq """ select * from ${tableName} order by id;""" + + sql "SET show_hidden_columns=true" + sql "sync" + + qt_partial_update_with_seq_hidden_columns """select * from ${tableName} order by id;""" + + sql """ DROP TABLE IF EXISTS ${tableName} """ + + + def tableName2 = "nereids_partial_update_native_insert_seq_col2" + sql """ DROP TABLE IF EXISTS ${tableName2} """ + sql """ + CREATE TABLE ${tableName2} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `score` int(11) NOT NULL COMMENT "用户得分", + `update_time` DATETIMEV2 NULL DEFAULT CURRENT_TIMESTAMP) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "update_time", + "store_row_column" = "${use_row_store}"); """ + + // don't set enable_unique_key_partial_update, it's a row update + // the input data don't contains sequence mapping column but the sequence mapping + // column's default value is CURRENT_TIMESTAMP, will load successfully + sql "SET show_hidden_columns=false" + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql "insert into ${tableName2}(id,score) values(2,400),(1,200);" + qt_sql """ select id,score from ${tableName2} order by id;""" + sql """ DROP TABLE IF EXISTS ${tableName2}; """ + } + } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update.groovy index 6af0e869482806..cdf666a051a024 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update.groovy @@ -17,166 +17,176 @@ // under the License. suite("test_primary_key_partial_update", "p0") { - def tableName = "test_primary_key_partial_update" - - // create table - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - // insert 2 lines - sql """ - insert into ${tableName} values(2, "doris2", 2000, 223, 1) - """ - - sql """ - insert into ${tableName} values(1, "doris", 1000, 123, 1) - """ - - // skip 3 lines and file have 4 lines - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score' - - file 'basic.csv' - time 10000 // limit inflight 10s - } - - sql "sync" - - qt_select_default """ - select * from ${tableName} order by id; - """ - - // partial update a row multiple times in one stream load - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score' - - file 'basic_with_duplicate.csv' - time 10000 // limit inflight 10s - } - - sql "sync" - - qt_partial_update_in_one_stream_load """ - select * from ${tableName} order by id; - """ - - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score' - - file 'basic_with_duplicate2.csv' - time 10000 // limit inflight 10s - } - - sql "sync" - - qt_partial_update_in_one_stream_load """ - select * from ${tableName} order by id; - """ - - streamLoad { - table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,name,score' - file 'basic_with_new_keys.csv' - time 10000 // limit inflight 10s - } - - sql "sync" - - qt_partial_update_in_one_stream_load """ - select * from ${tableName} order by id; - """ - - streamLoad { - table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'false' - set 'columns', 'id,name,score' - - file 'basic_with_new_keys.csv' - time 10000 // limit inflight 10s - } - - sql "sync" - - qt_partial_update_in_one_stream_load """ - select * from ${tableName} order by id; - """ - - streamLoad { - table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,name,score' - - file 'basic_with_new_keys_and_invalid.csv' - time 10000// limit inflight 10s - - check {result, exception, startTime, endTime -> - assertTrue(exception == null) - def json = parseJson(result) - assertEquals("Fail", json.Status) + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + def tableName = "test_primary_key_partial_update" + // create table + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + // insert 2 lines + sql """ + insert into ${tableName} values(2, "doris2", 2000, 223, 1) + """ + + sql """ + insert into ${tableName} values(1, "doris", 1000, 123, 1) + """ + + // skip 3 lines and file have 4 lines + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score' + + file 'basic.csv' + time 10000 // limit inflight 10s + } + + sql "sync" + + qt_select_default """ + select * from ${tableName} order by id; + """ + + // partial update a row multiple times in one stream load + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score' + + file 'basic_with_duplicate.csv' + time 10000 // limit inflight 10s + } + + sql "sync" + + qt_partial_update_in_one_stream_load """ + select * from ${tableName} order by id; + """ + + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score' + + file 'basic_with_duplicate2.csv' + time 10000 // limit inflight 10s + } + + sql "sync" + + qt_partial_update_in_one_stream_load """ + select * from ${tableName} order by id; + """ + + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,name,score' + + file 'basic_with_new_keys.csv' + time 10000 // limit inflight 10s + } + + sql "sync" + + qt_partial_update_in_one_stream_load """ + select * from ${tableName} order by id; + """ + + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'false' + set 'columns', 'id,name,score' + + file 'basic_with_new_keys.csv' + time 10000 // limit inflight 10s + } + + sql "sync" + + qt_partial_update_in_one_stream_load """ + select * from ${tableName} order by id; + """ + + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,name,score' + + file 'basic_with_new_keys_and_invalid.csv' + time 10000// limit inflight 10s + + check {result, exception, startTime, endTime -> + assertTrue(exception == null) + def json = parseJson(result) + assertEquals("Fail", json.Status) + } + } + + qt_partial_update_in_one_stream_load """ + select * from ${tableName} order by id; + """ + + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score' + + file 'basic_invalid.csv' + time 10000// limit inflight 10s + + check {result, exception, startTime, endTime -> + assertTrue(exception == null) + def json = parseJson(result) + assertEquals("Fail", json.Status) + assertTrue(json.Message.contains("[INTERNAL_ERROR]too many filtered rows")) + assertEquals(3, json.NumberTotalRows) + assertEquals(1, json.NumberLoadedRows) + assertEquals(2, json.NumberFilteredRows) + } + } + sql "sync" + qt_partial_update_in_one_stream_load """ + select * from ${tableName} order by id; + """ + + // drop drop + sql """ DROP TABLE IF EXISTS ${tableName} """ } } - - qt_partial_update_in_one_stream_load """ - select * from ${tableName} order by id; - """ - - streamLoad { - table "${tableName}" - set 'column_separator', ',' - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score' - - file 'basic_invalid.csv' - time 10000// limit inflight 10s - - check {result, exception, startTime, endTime -> - assertTrue(exception == null) - def json = parseJson(result) - assertEquals("Fail", json.Status) - assertTrue(json.Message.contains("[INTERNAL_ERROR]too many filtered rows")) - assertEquals(3, json.NumberTotalRows) - assertEquals(1, json.NumberLoadedRows) - assertEquals(2, json.NumberFilteredRows) - } - } - sql "sync" - qt_partial_update_in_one_stream_load """ - select * from ${tableName} order by id; - """ - - // drop drop - sql """ DROP TABLE IF EXISTS ${tableName} """ } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_2pc_schema_change.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_2pc_schema_change.groovy index f63ebe9a45ecf1..45fa34d60a7357 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_2pc_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_2pc_schema_change.groovy @@ -36,146 +36,154 @@ import org.apache.http.util.EntityUtils suite("test_partial_update_2pc_schema_change", "p0") { - def tableName = "test_partial_update_2pc_schema_change" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ CREATE TABLE ${tableName} ( - k1 varchar(20) not null, - v1 varchar(20), - v2 varchar(20), - v3 varchar(20), - v4 varchar(20), - v5 varchar(20)) - UNIQUE KEY(k1) DISTRIBUTED BY HASH(k1) BUCKETS 4 - PROPERTIES( - "replication_num" = "1", - "light_schema_change" = "true", - "enable_unique_key_merge_on_write" = "true", - "disable_auto_compaction" = "true")""" - - - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', "k1" - - file 'concurrency_update3.csv' - time 10000 // limit inflight 10s - } - qt_sql """ select * from ${tableName} order by k1;""" - - - def wait_for_schema_change = { - def try_times=100 - while(true){ - def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " - Thread.sleep(10) - if(res[0][9].toString() == "FINISHED"){ - break; + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def tableName = "test_partial_update_2pc_schema_change" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE ${tableName} ( + k1 varchar(20) not null, + v1 varchar(20), + v2 varchar(20), + v3 varchar(20), + v4 varchar(20), + v5 varchar(20)) + UNIQUE KEY(k1) DISTRIBUTED BY HASH(k1) BUCKETS 4 + PROPERTIES( + "replication_num" = "1", + "light_schema_change" = "true", + "enable_unique_key_merge_on_write" = "true", + "disable_auto_compaction" = "true", + "store_row_column" = "${use_row_store}");""" + + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', "k1" + + file 'concurrency_update3.csv' + time 10000 // limit inflight 10s } - assert(try_times>0) - try_times-- - } - } + qt_sql """ select * from ${tableName} order by k1;""" - InetSocketAddress address = context.config.feHttpInetSocketAddress - String user = context.config.feHttpUser - String password = context.config.feHttpPassword - String db = context.config.getDbNameByFile(context.file) - def do_streamload_2pc = { txn_id, txn_operation, name -> - HttpClients.createDefault().withCloseable { client -> - RequestBuilder requestBuilder = RequestBuilder.put("http://${address.hostString}:${address.port}/api/${db}/${name}/_stream_load_2pc") - String encoding = Base64.getEncoder() - .encodeToString((user + ":" + (password == null ? "" : password)).getBytes("UTF-8")) - requestBuilder.setHeader("Authorization", "Basic ${encoding}") - requestBuilder.setHeader("Expect", "100-Continue") - requestBuilder.setHeader("txn_id", "${txn_id}") - requestBuilder.setHeader("txn_operation", "${txn_operation}") - - String backendStreamLoadUri = null - client.execute(requestBuilder.build()).withCloseable { resp -> - resp.withCloseable { - String body = EntityUtils.toString(resp.getEntity()) - def respCode = resp.getStatusLine().getStatusCode() - // should redirect to backend - if (respCode != 307) { - throw new IllegalStateException("Expect frontend stream load response code is 307, " + - "but meet ${respCode}\nbody: ${body}") + def wait_for_schema_change = { + def try_times=100 + while(true){ + def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " + Thread.sleep(10) + if(res[0][9].toString() == "FINISHED"){ + break; } - backendStreamLoadUri = resp.getFirstHeader("location").getValue() + assert(try_times>0) + try_times-- } } - requestBuilder.setUri(backendStreamLoadUri) - try{ - client.execute(requestBuilder.build()).withCloseable { resp -> - resp.withCloseable { - String body = EntityUtils.toString(resp.getEntity()) - def respCode = resp.getStatusLine().getStatusCode() - if (respCode != 200) { - throw new IllegalStateException("Expect backend stream load response code is 200, " + - "but meet ${respCode}\nbody: ${body}") + InetSocketAddress address = context.config.feHttpInetSocketAddress + + def do_streamload_2pc = { txn_id, txn_operation, name -> + HttpClients.createDefault().withCloseable { client -> + RequestBuilder requestBuilder = RequestBuilder.put("http://${address.hostString}:${address.port}/api/${db}/${name}/_stream_load_2pc") + String encoding = Base64.getEncoder() + .encodeToString((user + ":" + (password == null ? "" : password)).getBytes("UTF-8")) + requestBuilder.setHeader("Authorization", "Basic ${encoding}") + requestBuilder.setHeader("Expect", "100-Continue") + requestBuilder.setHeader("txn_id", "${txn_id}") + requestBuilder.setHeader("txn_operation", "${txn_operation}") + + String backendStreamLoadUri = null + client.execute(requestBuilder.build()).withCloseable { resp -> + resp.withCloseable { + String body = EntityUtils.toString(resp.getEntity()) + def respCode = resp.getStatusLine().getStatusCode() + // should redirect to backend + if (respCode != 307) { + throw new IllegalStateException("Expect frontend stream load response code is 307, " + + "but meet ${respCode}\nbody: ${body}") + } + backendStreamLoadUri = resp.getFirstHeader("location").getValue() + } + } + + requestBuilder.setUri(backendStreamLoadUri) + try{ + client.execute(requestBuilder.build()).withCloseable { resp -> + resp.withCloseable { + String body = EntityUtils.toString(resp.getEntity()) + def respCode = resp.getStatusLine().getStatusCode() + if (respCode != 200) { + throw new IllegalStateException("Expect backend stream load response code is 200, " + + "but meet ${respCode}\nbody: ${body}") + } + } } + } catch (Throwable t) { + log.info("StreamLoad Exception: ", t) } } - } catch (Throwable t) { - log.info("StreamLoad Exception: ", t) } - } - } - String txnId - streamLoad { - table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'k1,tmp,v1=substr(tmp,1,20)' - set 'strict_mode', "false" - set 'two_phase_commit', 'true' - file 'concurrency_update2.csv' - time 10000 // limit inflight 10s - check { result, exception, startTime, endTime -> - if (exception != null) { - throw exception + String txnId + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'k1,tmp,v1=substr(tmp,1,20)' + set 'strict_mode', "false" + set 'two_phase_commit', 'true' + file 'concurrency_update2.csv' + time 10000 // limit inflight 10s + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + txnId = json.TxnId + assertEquals("success", json.Status.toLowerCase()) + } } - log.info("Stream load result: ${result}".toString()) - def json = parseJson(result) - txnId = json.TxnId - assertEquals("success", json.Status.toLowerCase()) - } - } - sql """ alter table ${tableName} modify column v2 varchar(40);""" - wait_for_schema_change() + sql """ alter table ${tableName} modify column v2 varchar(40);""" + wait_for_schema_change() - sql """ alter table ${tableName} drop column v3;""" - wait_for_schema_change() + sql """ alter table ${tableName} drop column v3;""" + wait_for_schema_change() - sql """ alter table ${tableName} add column v6 varchar(50);""" - wait_for_schema_change() + sql """ alter table ${tableName} add column v6 varchar(50);""" + wait_for_schema_change() - sql """ alter table ${tableName} rename column v4 renamed_v4;""" - wait_for_schema_change() + sql """ alter table ${tableName} rename column v4 renamed_v4;""" + wait_for_schema_change() - streamLoad { - table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'k1,tmp,v2=substr(tmp,1,40)' - set 'strict_mode', "false" - file 'concurrency_update2.csv' - time 10000 // limit inflight 10s - } + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'k1,tmp,v2=substr(tmp,1,40)' + set 'strict_mode', "false" + file 'concurrency_update2.csv' + time 10000 // limit inflight 10s + } - qt_sql """ select * from ${tableName} order by k1;""" + qt_sql """ select * from ${tableName} order by k1;""" - do_streamload_2pc(txnId, "commit", tableName) - - qt_sql """ select * from ${tableName} order by k1;""" + do_streamload_2pc(txnId, "commit", tableName) + + qt_sql """ select * from ${tableName} order by k1;""" - sql "drop table if exists ${tableName};" + sql "drop table if exists ${tableName};" + } + } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy index f13eb477b790ce..cacdc5113fd4fa 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy @@ -17,48 +17,59 @@ // under the License. suite("test_primary_key_partial_update_default_value", "p0") { - def tableName = "test_primary_key_partial_update_default_value" - // create table - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL DEFAULT "yixiu" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL DEFAULT "4321" COMMENT "test", - `dft` int(11) DEFAULT "4321") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - // insert 2 lines - sql """ - insert into ${tableName} values(2, "doris2", 2000, 223, 1) - """ + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database - sql """ - insert into ${tableName} values(1, "doris", 1000, 123, 1) - """ + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") - // stream load with key not exit before - streamLoad { - table "${tableName}" + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score' + def tableName = "test_primary_key_partial_update_default_value" - file 'default.csv' - time 10000 // limit inflight 10s - } + // create table + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL DEFAULT "yixiu" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL DEFAULT "4321" COMMENT "test", + `dft` int(11) DEFAULT "4321") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + // insert 2 lines + sql """ + insert into ${tableName} values(2, "doris2", 2000, 223, 1) + """ + + sql """ + insert into ${tableName} values(1, "doris", 1000, 123, 1) + """ + + // stream load with key not exit before + streamLoad { + table "${tableName}" - sql "sync" + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score' - qt_select_default """ - select * from ${tableName} order by id; - """ + file 'default.csv' + time 10000 // limit inflight 10s + } - // drop drop - sql """ DROP TABLE IF EXISTS ${tableName} """ + sql "sync" + + qt_select_default """ + select * from ${tableName} order by id; + """ + + // drop drop + sql """ DROP TABLE IF EXISTS ${tableName} """ + } + } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete.groovy index fc413cdcae1d8a..477de59c08c6d2 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete.groovy @@ -16,85 +16,100 @@ // under the License. suite('test_partial_update_delete') { - sql 'set enable_nereids_planner=false' - sql "set experimental_enable_nereids_planner=false;" - sql 'set enable_nereids_dml=false' - def tableName1 = "test_partial_update_delete1" - sql "DROP TABLE IF EXISTS ${tableName1};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName1} ( - `k1` int NOT NULL, - `c1` int, - `c2` int, - `c3` int NOT NULL, - `c4` int - )UNIQUE KEY(k1) - DISTRIBUTED BY HASH(k1) BUCKETS 1 - PROPERTIES ( - "enable_unique_key_merge_on_write" = "true", - "disable_auto_compaction" = "true", - "replication_num" = "1" - );""" + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database - def tableName2 = "test_partial_update_delete2" - sql "DROP TABLE IF EXISTS ${tableName2};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName2} ( - `k` BIGINT NULL - ) UNIQUE KEY(k) - DISTRIBUTED BY HASH(k) BUCKETS 1 - PROPERTIES ( - "enable_unique_key_merge_on_write" = "true", - "disable_auto_compaction" = "true", - "replication_num" = "1" - );""" + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") - sql "insert into ${tableName1} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" - qt_sql "select * from ${tableName1} order by k1;" - sql "insert into ${tableName2} values(1),(2),(3);" - sql "delete from ${tableName1} A using ${tableName2} B where A.k1=B.k;" - qt_sql "select * from ${tableName1} order by k1;" - sql "set skip_delete_sign=true;" - sql "set skip_storage_engine_merge=true;" - sql "set skip_delete_bitmap=true;" - qt_with_delete_sign "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName1} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" - sql "set skip_delete_sign=false;" - sql "set skip_storage_engine_merge=false;" - sql "set skip_delete_bitmap=false;" - def tableName3 = "test_partial_update_delete3" - sql "DROP TABLE IF EXISTS ${tableName3};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName3} ( - `k1` int NOT NULL, - `c1` int, - `c2` int, - `c3` int, - `c4` int - )UNIQUE KEY(k1) - DISTRIBUTED BY HASH(k1) BUCKETS 1 - PROPERTIES ( - "enable_unique_key_merge_on_write" = "true", - "disable_auto_compaction" = "true", - "replication_num" = "1" - );""" - sql "insert into ${tableName3} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" - qt_sql "select k1,c1,c2,c3,c4 from ${tableName3} order by k1,c1,c2,c3,c4;" - streamLoad { - table "${tableName3}" + sql 'set enable_nereids_planner=false' + sql "set experimental_enable_nereids_planner=false;" + sql 'set enable_nereids_dml=false' - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'k1' - set 'partial_columns', 'true' - set 'merge_type', 'DELETE' + def tableName1 = "test_partial_update_delete1" + sql "DROP TABLE IF EXISTS ${tableName1};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName1} ( + `k1` int NOT NULL, + `c1` int, + `c2` int, + `c3` int NOT NULL, + `c4` int + )UNIQUE KEY(k1) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "disable_auto_compaction" = "true", + "replication_num" = "1", + "store_row_column" = "${use_row_store}"); """ - file 'partial_update_delete.csv' - time 10000 - } - sql "sync" - qt_sql "select k1,c1,c2,c3,c4 from ${tableName3} order by k1,c1,c2,c3,c4;" - sql "set skip_delete_sign=true;" - sql "set skip_storage_engine_merge=true;" - sql "set skip_delete_bitmap=true;" - qt_sql "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName3} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + def tableName2 = "test_partial_update_delete2" + sql "DROP TABLE IF EXISTS ${tableName2};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName2} ( + `k` BIGINT NULL + ) UNIQUE KEY(k) + DISTRIBUTED BY HASH(k) BUCKETS 1 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "disable_auto_compaction" = "true", + "replication_num" = "1", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName1} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" + qt_sql "select * from ${tableName1} order by k1;" + sql "insert into ${tableName2} values(1),(2),(3);" + sql "delete from ${tableName1} A using ${tableName2} B where A.k1=B.k;" + qt_sql "select * from ${tableName1} order by k1;" + sql "set skip_delete_sign=true;" + sql "set skip_storage_engine_merge=true;" + sql "set skip_delete_bitmap=true;" + qt_with_delete_sign "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName1} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + sql "drop table if exists ${tableName1};" + sql "drop table if exists ${tableName2};" + + sql "set skip_delete_sign=false;" + sql "set skip_storage_engine_merge=false;" + sql "set skip_delete_bitmap=false;" + def tableName3 = "test_partial_update_delete3" + sql "DROP TABLE IF EXISTS ${tableName3};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName3} ( + `k1` int NOT NULL, + `c1` int, + `c2` int, + `c3` int, + `c4` int + )UNIQUE KEY(k1) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "disable_auto_compaction" = "true", + "replication_num" = "1", + "store_row_column" = "${use_row_store}"); """ + sql "insert into ${tableName3} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" + qt_sql "select k1,c1,c2,c3,c4 from ${tableName3} order by k1,c1,c2,c3,c4;" + streamLoad { + table "${tableName3}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k1' + set 'partial_columns', 'true' + set 'merge_type', 'DELETE' + + file 'partial_update_delete.csv' + time 10000 + } + sql "sync" + qt_sql "select k1,c1,c2,c3,c4 from ${tableName3} order by k1,c1,c2,c3,c4;" + sql "set skip_delete_sign=true;" + sql "set skip_storage_engine_merge=true;" + sql "set skip_delete_bitmap=true;" + qt_sql "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName3} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + sql "drop table if exists ${tableName3};" + } + } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete_sign.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete_sign.groovy index 013ca819566e34..a9c389290b7c5f 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete_sign.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete_sign.groovy @@ -16,172 +16,187 @@ // under the License. suite('test_partial_update_delete_sign') { - sql 'set enable_nereids_planner=false' - sql "set experimental_enable_nereids_planner=false;" - sql 'set enable_nereids_dml=false' - - def tableName1 = "test_partial_update_delete_sign1" - sql "DROP TABLE IF EXISTS ${tableName1};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName1} ( - `k1` int NOT NULL, - `c1` int, - `c2` int, - `c3` int, - `c4` int - )UNIQUE KEY(k1) - DISTRIBUTED BY HASH(k1) BUCKETS 1 - PROPERTIES ( + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + sql 'set enable_nereids_planner=false' + sql "set experimental_enable_nereids_planner=false;" + sql 'set enable_nereids_dml=false' + + def tableName1 = "test_partial_update_delete_sign1" + sql "DROP TABLE IF EXISTS ${tableName1};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName1} ( + `k1` int NOT NULL, + `c1` int, + `c2` int, + `c3` int, + `c4` int + )UNIQUE KEY(k1) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "disable_auto_compaction" = "true", + "replication_num" = "1", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName1} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" + qt_sql "select * from ${tableName1} order by k1,c1,c2,c3,c4;" + streamLoad { + table "${tableName1}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'k1,__DORIS_DELETE_SIGN__' + + file 'delete_sign.csv' + time 10000 // limit inflight 10s + } + sql "sync" + qt_after_delete "select * from ${tableName1} order by k1,c1,c2,c3,c4;" + sql "set skip_delete_sign=true;" + sql "set skip_storage_engine_merge=true;" + sql "set skip_delete_bitmap=true;" + sql "sync" + // // skip_delete_bitmap=true, skip_delete_sign=true + // qt_1 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName1} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + + // sql "set skip_delete_sign=true;" + // sql "set skip_delete_bitmap=false;" + // sql "sync" + // // skip_delete_bitmap=false, skip_delete_sign=true + // qt_2 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName1} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + qt_with_delete_sign "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName1} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + sql "drop table if exists ${tableName1};" + + + // sql "set skip_delete_sign=false;" + // sql "set skip_storage_engine_merge=false;" + // sql "set skip_delete_bitmap=false;" + // sql "sync" + // def tableName2 = "test_partial_update_delete_sign2" + // sql "DROP TABLE IF EXISTS ${tableName2};" + // sql """ CREATE TABLE IF NOT EXISTS ${tableName2} ( + // `k1` int NOT NULL, + // `c1` int, + // `c2` int, + // `c3` int, + // `c4` int + // )UNIQUE KEY(k1) + // DISTRIBUTED BY HASH(k1) BUCKETS 1 + // PROPERTIES ( + // "enable_unique_key_merge_on_write" = "true", + // "disable_auto_compaction" = "true", + // "replication_num" = "1", + // "function_column.sequence_col" = 'c4' + // );""" + + // sql "insert into ${tableName2} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" + // qt_sql "select * from ${tableName2} order by k1,c1,c2,c3,c4;" + // streamLoad { + // table "${tableName2}" + + // set 'column_separator', ',' + // set 'format', 'csv' + // set 'partial_columns', 'true' + // set 'columns', 'k1,__DORIS_DELETE_SIGN__' + + // file 'delete_sign.csv' + // time 10000 // limit inflight 10s + // } + // sql "sync" + // qt_after_delete "select * from ${tableName2} order by k1,c1,c2,c3,c4;" + + // sql "set skip_delete_sign=true;" + // sql "set skip_storage_engine_merge=true;" + // sql "set skip_delete_bitmap=true;" + // sql "sync" + // // skip_delete_bitmap=true, skip_delete_sign=true + // qt_1 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName2} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + + // sql "set skip_delete_sign=true;" + // sql "set skip_delete_bitmap=false;" + // sql "sync" + // // skip_delete_bitmap=false, skip_delete_sign=true + // qt_2 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName2} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + // sql "drop table if exists ${tableName2};" + + + // partial update a row that has been deleted by delete sign(table without sequence column) + sql "set skip_delete_sign=false;" + sql "set skip_storage_engine_merge=false;" + sql "set skip_delete_bitmap=false;" + sql "sync" + def tableName3 = "test_partial_update_delete_sign3" + sql "DROP TABLE IF EXISTS ${tableName3};" + sql """ create table ${tableName3} ( + k int, + v1 int, + v2 int + ) ENGINE=OLAP unique key (k) + distributed by hash(k) buckets 1 + properties("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", - "disable_auto_compaction" = "true", - "replication_num" = "1" - );""" - - sql "insert into ${tableName1} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" - qt_sql "select * from ${tableName1} order by k1,c1,c2,c3,c4;" - streamLoad { - table "${tableName1}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'k1,__DORIS_DELETE_SIGN__' - - file 'delete_sign.csv' - time 10000 // limit inflight 10s - } - sql "sync" - qt_after_delete "select * from ${tableName1} order by k1,c1,c2,c3,c4;" - sql "set skip_delete_sign=true;" - sql "set skip_storage_engine_merge=true;" - sql "set skip_delete_bitmap=true;" - sql "sync" - // skip_delete_bitmap=true, skip_delete_sign=true - qt_1 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName1} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" - - sql "set skip_delete_sign=true;" - sql "set skip_delete_bitmap=false;" - sql "sync" - // skip_delete_bitmap=false, skip_delete_sign=true - qt_2 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName1} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" - sql "drop table if exists ${tableName1};" - - - sql "set skip_delete_sign=false;" - sql "set skip_storage_engine_merge=false;" - sql "set skip_delete_bitmap=false;" - sql "sync" - def tableName2 = "test_partial_update_delete_sign2" - sql "DROP TABLE IF EXISTS ${tableName2};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName2} ( - `k1` int NOT NULL, - `c1` int, - `c2` int, - `c3` int, - `c4` int - )UNIQUE KEY(k1) - DISTRIBUTED BY HASH(k1) BUCKETS 1 - PROPERTIES ( + "store_row_column" = "${use_row_store}"); """ + sql "insert into ${tableName3} values(1,1,1);" + qt_1 "select * from ${tableName3} order by k;" + sql "insert into ${tableName3}(k,v1,v2,__DORIS_DELETE_SIGN__) values(1,1,1,1);" + qt_2 "select * from ${tableName3} order by k;" + streamLoad { + table "${tableName3}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'k,v1' + + file 'test_partial_update_delete_sign_data.csv' + time 10000 // limit inflight 10s + } + sql "sync" + qt_3 "select * from ${tableName3} order by k;" + sql "drop table if exists ${tableName3};" + + + // partial update a row that has been deleted by delete sign(table with sequence column) + def tableName4 = "test_partial_update_delete_sign4" + sql "DROP TABLE IF EXISTS ${tableName4};" + sql """ create table ${tableName4} ( + k int, + v1 int, + v2 int, + c int + ) ENGINE=OLAP unique key (k) + distributed by hash(k) buckets 1 + properties("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", - "disable_auto_compaction" = "true", - "replication_num" = "1", - "function_column.sequence_col" = 'c4' - );""" - - sql "insert into ${tableName2} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" - qt_sql "select * from ${tableName2} order by k1,c1,c2,c3,c4;" - streamLoad { - table "${tableName2}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'k1,__DORIS_DELETE_SIGN__' - - file 'delete_sign.csv' - time 10000 // limit inflight 10s - } - sql "sync" - qt_after_delete "select * from ${tableName2} order by k1,c1,c2,c3,c4;" - - sql "set skip_delete_sign=true;" - sql "set skip_storage_engine_merge=true;" - sql "set skip_delete_bitmap=true;" - sql "sync" - // skip_delete_bitmap=true, skip_delete_sign=true - qt_1 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName2} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" - - sql "set skip_delete_sign=true;" - sql "set skip_delete_bitmap=false;" - sql "sync" - // skip_delete_bitmap=false, skip_delete_sign=true - qt_2 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName2} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" - sql "drop table if exists ${tableName2};" - - - // partial update a row that has been deleted by delete sign(table without sequence column) - sql "set skip_delete_sign=false;" - sql "set skip_storage_engine_merge=false;" - sql "set skip_delete_bitmap=false;" - sql "sync" - def tableName3 = "test_partial_update_delete_sign3" - sql "DROP TABLE IF EXISTS ${tableName3};" - sql """ create table ${tableName3} ( - k int, - v1 int, - v2 int - ) ENGINE=OLAP unique key (k) - distributed by hash(k) buckets 1 - properties("replication_num" = "1", - "enable_unique_key_merge_on_write" = "true"); """ - sql "insert into ${tableName3} values(1,1,1);" - qt_1 "select * from ${tableName3} order by k;" - sql "insert into ${tableName3}(k,v1,v2,__DORIS_DELETE_SIGN__) values(1,1,1,1);" - qt_2 "select * from ${tableName3} order by k;" - streamLoad { - table "${tableName3}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'k,v1' - - file 'test_partial_update_delete_sign_data.csv' - time 10000 // limit inflight 10s - } - sql "sync" - qt_3 "select * from ${tableName3} order by k;" - sql "drop table if exists ${tableName3};" - - - // partial update a row that has been deleted by delete sign(table with sequence column) - def tableName4 = "test_partial_update_delete_sign4" - sql "DROP TABLE IF EXISTS ${tableName4};" - sql """ create table ${tableName4} ( - k int, - v1 int, - v2 int, - c int - ) ENGINE=OLAP unique key (k) - distributed by hash(k) buckets 1 - properties("replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "c"); """ - sql "insert into ${tableName4} values(1,1,1,1);" - qt_1 "select * from ${tableName4} order by k;" - sql "insert into ${tableName4}(k,v1,v2,c,__DORIS_DELETE_SIGN__) values(1,1,1,1,1);" - qt_2 "select * from ${tableName4} order by k;" - streamLoad { - table "${tableName4}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'k,v1' - - file 'test_partial_update_delete_sign_data.csv' - time 10000 // limit inflight 10s + "function_column.sequence_col" = "c", + "store_row_column" = "${use_row_store}"); """ + sql "insert into ${tableName4} values(1,1,1,1);" + qt_1 "select * from ${tableName4} order by k;" + sql "insert into ${tableName4}(k,v1,v2,c,__DORIS_DELETE_SIGN__) values(1,1,1,1,1);" + qt_2 "select * from ${tableName4} order by k;" + streamLoad { + table "${tableName4}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'k,v1' + + file 'test_partial_update_delete_sign_data.csv' + time 10000 // limit inflight 10s + } + sql "sync" + qt_3 "select * from ${tableName4} order by k;" + sql "drop table if exists ${tableName4};" + } } - sql "sync" - qt_3 "select * from ${tableName4} order by k;" - sql "drop table if exists ${tableName4};" } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy index e93f7fa46be8d7..22a063c6dcc5e8 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy @@ -18,305 +18,322 @@ suite("test_partial_update_insert_light_schema_change", "p0") { - // ===== light schema change ===== - // test add value column - def tableName = "test_partial_update_insert_light_schema_change_add_column" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `c0` int NULL, - `c1` int NULL, - `c2` int NULL, - `c3` int NULL, - `c4` int NULL, - `c5` int NULL, - `c6` int NULL, - `c7` int NULL, - `c8` int NULL, - `c9` int NULL) - UNIQUE KEY(`c0`) DISTRIBUTED BY HASH(`c0`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "light_schema_change" = "true", - "enable_unique_key_merge_on_write" = "true") - """ - - sql "insert into ${tableName} values(1, 0, 0, 0, 0, 0, 0, 0, 0, 0);" - sql "sync" - qt_add_value_col_1 " select * from ${tableName} order by c0 " - - // schema change - sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " - def try_times=100 - while(true){ - def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " - Thread.sleep(1200) - if(res[0][9].toString() == "FINISHED"){ - break; - } - assert(try_times>0) - try_times-- - } - sql "sync" - - // test insert data without new column - sql "set enable_unique_key_partial_update=true;" - sql "insert into ${tableName}(c0,c1,c2) values(1,1,1);" - sql "set enable_unique_key_partial_update=false;" - sql "sync" - - // check data, new column is filled by default value. - qt_add_value_col_2 " select * from ${tableName} order by c0 " - - // test insert data with new column - sql "set enable_unique_key_partial_update=true;" - sql "insert into ${tableName}(c0,c1,c2,c10) values(1,1,1,10);" - sql "set enable_unique_key_partial_update=false;" - sql "sync" - - // check data, new column is filled by given value. - qt_add_value_col_3 " select * from ${tableName} order by c0 " - - sql """ DROP TABLE IF EXISTS ${tableName} """ - - - // test delete value column - tableName = "test_partial_update_insert_light_schema_change_delete_column" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """CREATE TABLE ${tableName} ( - `c0` int NULL, - `c1` int NULL, - `c2` int NULL, - `c3` int NULL, - `c4` int NULL, - `c5` int NULL, - `c6` int NULL, - `c7` int NULL, - `c8` int NULL, - `c9` int NULL) - UNIQUE KEY(`c0`) DISTRIBUTED BY HASH(`c0`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "light_schema_change" = "true", - "enable_unique_key_merge_on_write" = "true")""" - - sql "insert into ${tableName} values(1, 0, 0, 0, 0, 0, 0, 0, 0, 0);" - sql "sync" - qt_delete_value_col_1 " select * from ${tableName} order by c0 " - - // schema change - sql " ALTER table ${tableName} DROP COLUMN c8 " - try_times=100 - while(true){ - def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " - Thread.sleep(1200) - if(res[0][9].toString() == "FINISHED"){ - break; - } - assert(try_times>0) - try_times-- - } - sql "sync" + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database - // test insert data without delete column - sql "set enable_unique_key_partial_update=true;" - test { - sql "insert into ${tableName}(c0,c1,c2,c8) values(1,1,1,10);" - exception "Unknown column 'c8' in 'test_partial_update_insert_light_schema_change_delete_column'" - } - sql "insert into ${tableName}(c0,c1,c2) values(1,1,1);" - sql "set enable_unique_key_partial_update=false;" - sql "sync" - qt_delete_value_col_2 " select * from ${tableName} order by c0 " - - sql """ DROP TABLE IF EXISTS ${tableName} """ - - - // test delete sequence col - tableName = "test_partial_update_insert_light_schema_change_delete_seq_col" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """CREATE TABLE ${tableName} ( - `k` int NULL, - `v1` int NULL, - `v2` int NULL, - `c` int NULL) - UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "light_schema_change" = "true", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "c");""" - sql "insert into ${tableName} values(1,1,1,1),(2,20,20,20),(1,10,10,10),(2,10,10,10);" - qt_delete_seq_col_1 "select * from ${tableName} order by k;" - - // schema change - test { - sql " ALTER table ${tableName} DROP COLUMN c;" - exception "Can not drop sequence mapping column[c] in Unique data model table[${tableName}]" - } + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") - // test update value column - tableName = "test_partial_update_insert_light_schema_change_update_column" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ CREATE TABLE ${tableName} ( - `c0` int NULL, - `c1` int NULL, - `c2` int NULL, - `c3` int NULL, - `c4` int NULL, - `c5` int NULL, - `c6` int NULL, - `c7` int NULL, - `c8` int NULL, - `c9` int NULL) - UNIQUE KEY(`c0`) DISTRIBUTED BY HASH(`c0`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "light_schema_change" = "true", - "enable_unique_key_merge_on_write" = "true") """ - - sql "insert into ${tableName} values(1, 0, 0, 0, 0, 0, 0, 0, 0, 0);" - sql "sync" - qt_update_value_col_1 " select * from ${tableName} order by c0 " - - // schema change - sql " ALTER table ${tableName} MODIFY COLUMN c2 double " - try_times=100 - while(true){ - def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " - Thread.sleep(1200) - if(res[0][9].toString() == "FINISHED"){ - break; - } - assert(try_times>0) - try_times-- - } - sql "sync" - - // test insert data with update column - sql "set enable_unique_key_partial_update=true;" - sql "insert into ${tableName}(c0,c1,c2) values(1,1,1.0);" - sql "set enable_unique_key_partial_update=false;" - sql "sync" - qt_update_value_col_2 " select * from ${tableName} order by c0 " - - sql """ DROP TABLE IF EXISTS ${tableName} """ - - - // test add key column - tableName = "test_partial_update_insert_light_schema_change_add_key_column" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ CREATE TABLE ${tableName} ( - `c0` int NULL) - UNIQUE KEY(`c0`) DISTRIBUTED BY HASH(`c0`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "light_schema_change" = "true", - "enable_unique_key_merge_on_write" = "true")""" - sql "insert into ${tableName} values(1);" - sql "sync" - qt_add_key_col_1 " select * from ${tableName} order by c0; " - - // schema change - sql """ ALTER table ${tableName} ADD COLUMN c1 int key default "0"; """ - try_times=100 - while(true){ - def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " - Thread.sleep(1200) - if(res[0][9].toString() == "FINISHED"){ - break; - } - assert(try_times>0) - try_times-- - } - sql "sync" - - sql " ALTER table ${tableName} ADD COLUMN c2 int null " - try_times=100 - while(true){ - def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " - Thread.sleep(1200) - if(res[0][9].toString() == "FINISHED"){ - break; - } - assert(try_times>0) - try_times-- - } - sql "sync" - - sql " ALTER table ${tableName} ADD COLUMN c3 int null " - try_times=100 - while(true){ - def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " - Thread.sleep(1200) - if(res[0][9].toString() == "FINISHED"){ - break; - } - assert(try_times>0) - try_times-- - } - sql "sync" - - // test insert data with all key column, should fail because - // it don't have any value columns - sql "set enable_unique_key_partial_update=true;" - test { - sql "insert into ${tableName}(c0,c1) values(1, 1);" - exception "INTERNAL_ERROR" - } - sql "insert into ${tableName}(c0,c1,c2) values(1,0,10);" - sql "set enable_unique_key_partial_update=false;" - sql "sync" - qt_add_key_col_2 " select * from ${tableName} order by c0; " - - sql """ DROP TABLE IF EXISTS ${tableName} """ - - - // test create index - tableName = "test_partial_update_insert_light_schema_change_create_index" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `c0` int NULL, - `c1` int NULL, - `c2` int NULL, - `c3` int NULL, - `c4` int NULL, - `c5` int NULL, - `c6` int NULL, - `c7` int NULL, - `c8` int NULL, - `c9` int NULL) - UNIQUE KEY(`c0`) DISTRIBUTED BY HASH(`c0`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "light_schema_change" = "true", - "enable_unique_key_merge_on_write" = "true") - """ - - sql "insert into ${tableName} values(1, 0, 0, 0, 0, 0, 0, 0, 0, 0);" - sql "sync" - qt_create_index_1 " select * from ${tableName} order by c0 " - - - sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " - try_times=100 - while(true){ - def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " - Thread.sleep(1200) - if(res[0][9].toString() == "FINISHED"){ - break; + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + // ===== light schema change ===== + // test add value column + def tableName = "test_partial_update_insert_light_schema_change_add_column" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `c0` int NULL, + `c1` int NULL, + `c2` int NULL, + `c3` int NULL, + `c4` int NULL, + `c5` int NULL, + `c6` int NULL, + `c7` int NULL, + `c8` int NULL, + `c9` int NULL) + UNIQUE KEY(`c0`) DISTRIBUTED BY HASH(`c0`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "light_schema_change" = "true", + "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName} values(1, 0, 0, 0, 0, 0, 0, 0, 0, 0);" + sql "sync" + qt_add_value_col_1 " select * from ${tableName} order by c0 " + + // schema change + sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " + def try_times=100 + while(true){ + def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " + Thread.sleep(1200) + if(res[0][9].toString() == "FINISHED"){ + break; + } + assert(try_times>0) + try_times-- + } + sql "sync" + + // test insert data without new column + sql "set enable_unique_key_partial_update=true;" + sql "insert into ${tableName}(c0,c1,c2) values(1,1,1);" + sql "set enable_unique_key_partial_update=false;" + sql "sync" + + // check data, new column is filled by default value. + qt_add_value_col_2 " select * from ${tableName} order by c0 " + + // test insert data with new column + sql "set enable_unique_key_partial_update=true;" + sql "insert into ${tableName}(c0,c1,c2,c10) values(1,1,1,10);" + sql "set enable_unique_key_partial_update=false;" + sql "sync" + + // check data, new column is filled by given value. + qt_add_value_col_3 " select * from ${tableName} order by c0 " + + sql """ DROP TABLE IF EXISTS ${tableName} """ + + + // test delete value column + tableName = "test_partial_update_insert_light_schema_change_delete_column" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """CREATE TABLE ${tableName} ( + `c0` int NULL, + `c1` int NULL, + `c2` int NULL, + `c3` int NULL, + `c4` int NULL, + `c5` int NULL, + `c6` int NULL, + `c7` int NULL, + `c8` int NULL, + `c9` int NULL) + UNIQUE KEY(`c0`) DISTRIBUTED BY HASH(`c0`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "light_schema_change" = "true", + "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName} values(1, 0, 0, 0, 0, 0, 0, 0, 0, 0);" + sql "sync" + qt_delete_value_col_1 " select * from ${tableName} order by c0 " + + // schema change + sql " ALTER table ${tableName} DROP COLUMN c8 " + try_times=100 + while(true){ + def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " + Thread.sleep(1200) + if(res[0][9].toString() == "FINISHED"){ + break; + } + assert(try_times>0) + try_times-- + } + sql "sync" + + // test insert data without delete column + sql "set enable_unique_key_partial_update=true;" + test { + sql "insert into ${tableName}(c0,c1,c2,c8) values(1,1,1,10);" + exception "Unknown column 'c8' in 'test_partial_update_insert_light_schema_change_delete_column'" + } + sql "insert into ${tableName}(c0,c1,c2) values(1,1,1);" + sql "set enable_unique_key_partial_update=false;" + sql "sync" + qt_delete_value_col_2 " select * from ${tableName} order by c0 " + + sql """ DROP TABLE IF EXISTS ${tableName} """ + + + // test delete sequence col + tableName = "test_partial_update_insert_light_schema_change_delete_seq_col" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """CREATE TABLE ${tableName} ( + `k` int NULL, + `v1` int NULL, + `v2` int NULL, + `c` int NULL) + UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "light_schema_change" = "true", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "c", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName} values(1,1,1,1),(2,20,20,20),(1,10,10,10),(2,10,10,10);" + qt_delete_seq_col_1 "select * from ${tableName} order by k;" + + // schema change + test { + sql " ALTER table ${tableName} DROP COLUMN c;" + exception "Can not drop sequence mapping column[c] in Unique data model table[${tableName}]" + } + + // test update value column + tableName = "test_partial_update_insert_light_schema_change_update_column" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE ${tableName} ( + `c0` int NULL, + `c1` int NULL, + `c2` int NULL, + `c3` int NULL, + `c4` int NULL, + `c5` int NULL, + `c6` int NULL, + `c7` int NULL, + `c8` int NULL, + `c9` int NULL) + UNIQUE KEY(`c0`) DISTRIBUTED BY HASH(`c0`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "light_schema_change" = "true", + "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName} values(1, 0, 0, 0, 0, 0, 0, 0, 0, 0);" + sql "sync" + qt_update_value_col_1 " select * from ${tableName} order by c0 " + + // schema change + sql " ALTER table ${tableName} MODIFY COLUMN c2 double " + try_times=100 + while(true){ + def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " + Thread.sleep(1200) + if(res[0][9].toString() == "FINISHED"){ + break; + } + assert(try_times>0) + try_times-- + } + sql "sync" + + // test insert data with update column + sql "set enable_unique_key_partial_update=true;" + sql "insert into ${tableName}(c0,c1,c2) values(1,1,1.0);" + sql "set enable_unique_key_partial_update=false;" + sql "sync" + qt_update_value_col_2 " select * from ${tableName} order by c0 " + + sql """ DROP TABLE IF EXISTS ${tableName} """ + + + // test add key column + tableName = "test_partial_update_insert_light_schema_change_add_key_column" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE ${tableName} ( + `c0` int NULL) + UNIQUE KEY(`c0`) DISTRIBUTED BY HASH(`c0`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "light_schema_change" = "true", + "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName} values(1);" + sql "sync" + qt_add_key_col_1 " select * from ${tableName} order by c0; " + + // schema change + sql """ ALTER table ${tableName} ADD COLUMN c1 int key default "0"; """ + try_times=100 + while(true){ + def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " + Thread.sleep(1200) + if(res[0][9].toString() == "FINISHED"){ + break; + } + assert(try_times>0) + try_times-- + } + sql "sync" + + sql " ALTER table ${tableName} ADD COLUMN c2 int null " + try_times=100 + while(true){ + def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " + Thread.sleep(1200) + if(res[0][9].toString() == "FINISHED"){ + break; + } + assert(try_times>0) + try_times-- + } + sql "sync" + + sql " ALTER table ${tableName} ADD COLUMN c3 int null " + try_times=100 + while(true){ + def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " + Thread.sleep(1200) + if(res[0][9].toString() == "FINISHED"){ + break; + } + assert(try_times>0) + try_times-- + } + sql "sync" + + // test insert data with all key column, should fail because + // it don't have any value columns + sql "set enable_unique_key_partial_update=true;" + test { + sql "insert into ${tableName}(c0,c1) values(1, 1);" + exception "INTERNAL_ERROR" + } + sql "insert into ${tableName}(c0,c1,c2) values(1,0,10);" + sql "set enable_unique_key_partial_update=false;" + sql "sync" + qt_add_key_col_2 " select * from ${tableName} order by c0; " + + sql """ DROP TABLE IF EXISTS ${tableName} """ + + + // test create index + tableName = "test_partial_update_insert_light_schema_change_create_index" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `c0` int NULL, + `c1` int NULL, + `c2` int NULL, + `c3` int NULL, + `c4` int NULL, + `c5` int NULL, + `c6` int NULL, + `c7` int NULL, + `c8` int NULL, + `c9` int NULL) + UNIQUE KEY(`c0`) DISTRIBUTED BY HASH(`c0`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "light_schema_change" = "true", + "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName} values(1, 0, 0, 0, 0, 0, 0, 0, 0, 0);" + sql "sync" + qt_create_index_1 " select * from ${tableName} order by c0 " + + + sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " + try_times=100 + while(true){ + def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " + Thread.sleep(1200) + if(res[0][9].toString() == "FINISHED"){ + break; + } + assert(try_times>0) + try_times-- + } + sql "sync" + + //test insert data with create index + sql "set enable_unique_key_partial_update=true;" + sql "insert into ${tableName}(c0,c1,c2) values(1,1,1);" + sql "set enable_unique_key_partial_update=false;" + sql "sync" + qt_create_index_2 " select * from ${tableName} order by c0 " + sql """ DROP TABLE IF EXISTS ${tableName} """ } - assert(try_times>0) - try_times-- } - sql "sync" - - //test insert data with create index - sql "set enable_unique_key_partial_update=true;" - sql "insert into ${tableName}(c0,c1,c2) values(1,1,1);" - sql "set enable_unique_key_partial_update=false;" - sql "sync" - qt_create_index_2 " select * from ${tableName} order by c0 " - sql """ DROP TABLE IF EXISTS ${tableName} """ } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_seq_col.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_seq_col.groovy index 0f80940ec3393c..15dcf9f1bba72e 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_seq_col.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_seq_col.groovy @@ -17,98 +17,110 @@ // under the License. suite("test_partial_update_native_insert_seq_col_old_planner", "p0") { - sql "set enable_nereids_dml=false;" - sql "set experimental_enable_nereids_planner=false;" - sql "set enable_fallback_to_original_planner=true;" - sql "sync;" - - def tableName = "test_partial_update_native_insert_seq_col_old_planner" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321", - `update_time` date NULL) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "update_time" - ) - """ - sql """ insert into ${tableName} values - (2, "doris2", 2000, 223, 1, '2023-01-01'), - (1, "doris", 1000, 123, 1, '2023-01-01');""" - sql "sync" - - qt_select_default """ select * from ${tableName} order by id;""" - - // don't set partial update header, it's a row update streamload - // the input data don't contains sequence mapping column, will load fail - test { - sql "insert into ${tableName}(id,score) values(2,400),(1,200);" - exception "Table test_partial_update_native_insert_seq_col_old_planner has sequence column, need to specify the sequence column" - } - // set partial update header, should success - // we don't provide the sequence column in input data, so the updated rows - // should use there original sequence column values. - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - sql "insert into ${tableName}(id,score) values(2,400),(1,200);" - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - - qt_partial_update_without_seq """ select * from ${tableName} order by id;""" - - // provide the sequence column this time, should update according to the - // given sequence values - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """insert into ${tableName}(id,score,update_time) values - (2,2500,"2023-07-19"), - (2,2600,"2023-07-20"), - (1,1300,"2022-07-19"), - (3,1500,"2022-07-20"), - (3,2500,"2022-07-18");""" - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - - qt_partial_update_with_seq """ select * from ${tableName} order by id;""" - - sql "SET show_hidden_columns=true" - sql "sync" - - qt_partial_update_with_seq_hidden_columns """select * from ${tableName} order by id;""" - - sql """ DROP TABLE IF EXISTS ${tableName} """ - - - def tableName2 = "nereids_partial_update_native_insert_seq_col2" - sql """ DROP TABLE IF EXISTS ${tableName2} """ - sql """ - CREATE TABLE ${tableName2} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `score` int(11) NOT NULL COMMENT "用户得分", - `update_time` DATETIMEV2 NULL DEFAULT CURRENT_TIMESTAMP) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "update_time" - )""" - - // don't set enable_unique_key_partial_update, it's a row update - // the input data don't contains sequence mapping column but the sequence mapping - // column's default value is CURRENT_TIMESTAMP, will load successfully - sql "SET show_hidden_columns=false" - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql "insert into ${tableName2}(id,score) values(2,400),(1,200);" - qt_sql """ select id,score from ${tableName2} order by id;""" - sql """ DROP TABLE IF EXISTS ${tableName2}; """ + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + sql "set enable_nereids_dml=false;" + sql "set experimental_enable_nereids_planner=false;" + sql "set enable_fallback_to_original_planner=true;" + sql "sync;" + + def tableName = "test_partial_update_native_insert_seq_col_old_planner" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321", + `update_time` date NULL) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "update_time", + "store_row_column" = "${use_row_store}"); """ + + sql """ insert into ${tableName} values + (2, "doris2", 2000, 223, 1, '2023-01-01'), + (1, "doris", 1000, 123, 1, '2023-01-01');""" + sql "sync" + + qt_select_default """ select * from ${tableName} order by id;""" + + // don't set partial update header, it's a row update streamload + // the input data don't contains sequence mapping column, will load fail + test { + sql "insert into ${tableName}(id,score) values(2,400),(1,200);" + exception "Table test_partial_update_native_insert_seq_col_old_planner has sequence column, need to specify the sequence column" + } + + // set partial update header, should success + // we don't provide the sequence column in input data, so the updated rows + // should use there original sequence column values. + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + sql "insert into ${tableName}(id,score) values(2,400),(1,200);" + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + + qt_partial_update_without_seq """ select * from ${tableName} order by id;""" + + // provide the sequence column this time, should update according to the + // given sequence values + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + sql """insert into ${tableName}(id,score,update_time) values + (2,2500,"2023-07-19"), + (2,2600,"2023-07-20"), + (1,1300,"2022-07-19"), + (3,1500,"2022-07-20"), + (3,2500,"2022-07-18");""" + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + + qt_partial_update_with_seq """ select * from ${tableName} order by id;""" + + sql "SET show_hidden_columns=true" + sql "sync" + + qt_partial_update_with_seq_hidden_columns """select * from ${tableName} order by id;""" + + sql """ DROP TABLE IF EXISTS ${tableName} """ + + + def tableName2 = "nereids_partial_update_native_insert_seq_col2" + sql """ DROP TABLE IF EXISTS ${tableName2} """ + sql """ + CREATE TABLE ${tableName2} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `score` int(11) NOT NULL COMMENT "用户得分", + `update_time` DATETIMEV2 NULL DEFAULT CURRENT_TIMESTAMP) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "update_time", + "store_row_column" = "${use_row_store}"); """ + + // don't set enable_unique_key_partial_update, it's a row update + // the input data don't contains sequence mapping column but the sequence mapping + // column's default value is CURRENT_TIMESTAMP, will load successfully + sql "SET show_hidden_columns=false" + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql "insert into ${tableName2}(id,score) values(2,400),(1,200);" + qt_sql """ select id,score from ${tableName2} order by id;""" + sql """ DROP TABLE IF EXISTS ${tableName2}; """ + } + } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy index 01e3846ac2b6f2..8f629d730ef149 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy @@ -17,216 +17,235 @@ // under the License. suite("test_partial_update_native_insert_stmt", "p0") { - sql "set enable_nereids_dml=false;" - sql "set experimental_enable_nereids_planner=false;" - sql "set enable_fallback_to_original_planner=true;" - sql "sync;" - - // sql 'set enable_fallback_to_original_planner=false' - def tableName = "test_partial_update_native_insert_stmt" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL DEFAULT "yixiu" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - sql """insert into ${tableName} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1)""" - qt_1 """ select * from ${tableName} order by id; """ - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - // partial update using insert stmt in non-strict mode, - // existing rows should be updated and new rows should be inserted with unmentioned columns filled with default or null value - sql """insert into ${tableName}(id,score) values(2,400),(1,200),(4,400)""" - qt_1 """ select * from ${tableName} order by id; """ - test { - sql """insert into ${tableName} values(2,400),(1,200),(4,400)""" - exception "You must explicitly specify the columns to be updated when updating partial columns using the INSERT statement." - } - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql """ DROP TABLE IF EXISTS ${tableName} """ - - - def tableName2 = "test_partial_update_native_insert_stmt2" - sql """ DROP TABLE IF EXISTS ${tableName2} """ - sql """ - CREATE TABLE ${tableName2} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321", - `update_time` date NULL) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "update_time" - )""" - sql """ insert into ${tableName2} values - (2, "doris2", 2000, 223, 1, '2023-01-01'), - (1, "doris", 1000, 123, 1, '2023-01-01');""" - qt_2 "select * from ${tableName2} order by id;" - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - // partial update with seq col - sql """ insert into ${tableName2}(id,score,update_time) values - (2,2500,"2023-07-19"), - (2,2600,"2023-07-20"), - (1,1300,"2022-07-19"), - (3,1500,"2022-07-20"), - (3,2500,"2022-07-18"); """ - qt_2 "select * from ${tableName2} order by id;" - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql """ DROP TABLE IF EXISTS ${tableName2}; """ - - - def tableName3 = "test_partial_update_native_insert_stmt3" - sql """ DROP TABLE IF EXISTS ${tableName3}; """ - sql """ - CREATE TABLE ${tableName3} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - sql """insert into ${tableName3} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1);""" - qt_3 """ select * from ${tableName3} order by id; """ - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - // in partial update, the unmentioned columns should have default values or be nullable - // but field `name` is not nullable and doesn't have default value - test { - sql """insert into ${tableName3}(id,score) values(2,400),(1,200),(4,400)""" - exception "INTERNAL_ERROR" - } - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - qt_3 """ select * from ${tableName3} order by id; """ - sql """ DROP TABLE IF EXISTS ${tableName3} """ - - - def tableName4 = "test_partial_update_native_insert_stmt4" - sql """ DROP TABLE IF EXISTS ${tableName4} """ - sql """ - CREATE TABLE ${tableName4} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL DEFAULT "yixiu" COMMENT "用户姓名", - `score` int(11) NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - sql """insert into ${tableName4} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1),(3,"doris3",5000,34,345);""" - qt_4 """ select * from ${tableName4} order by id; """ - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - // partial update with delete sign - sql "insert into ${tableName4}(id,__DORIS_DELETE_SIGN__) values(2,1);" - qt_4 """ select * from ${tableName4} order by id; """ - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql """ DROP TABLE IF EXISTS ${tableName4} """ - - - def tableName5 = "test_partial_update_native_insert_stmt5" - sql """ DROP TABLE IF EXISTS ${tableName5} """ - sql """ - CREATE TABLE ${tableName5} ( - `id` int(11) NULL, - `name` varchar(10) NULL, - `age` int(11) NULL DEFAULT "20", - `city` varchar(10) NOT NULL DEFAULT "beijing", - `balance` decimalv3(9, 0) NULL, - `last_access_time` datetime NULL - ) ENGINE = OLAP UNIQUE KEY(`id`) - COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) - BUCKETS AUTO PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "storage_format" = "V2", - "enable_unique_key_merge_on_write" = "true", - "light_schema_change" = "true", - "disable_auto_compaction" = "false", - "enable_single_replica_compaction" = "false" - ); - """ - sql """insert into ${tableName5} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" - qt_5 """select * from ${tableName5} order by id;""" - sql "set enable_insert_strict = true;" - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - // partial update using insert stmt in strict mode, the max_filter_ratio is always 0 - test { - sql """ insert into ${tableName5}(id,balance,last_access_time) values(1,500,"2023-07-03 12:00:01"),(3,23,"2023-07-03 12:00:02"),(18,9999999,"2023-07-03 12:00:03"); """ - exception "Insert has filtered data in strict mode" - } - qt_5 """select * from ${tableName5} order by id;""" - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """ DROP TABLE IF EXISTS ${tableName5}; """ - - def tableName6 = "test_partial_update_native_insert_stmt6" - sql """ DROP TABLE IF EXISTS ${tableName6} """ - sql """create table ${tableName6} ( - k int null, - v int null, - v2 int null, - v3 int null - ) unique key (k) distributed by hash(k) buckets 1 - properties("replication_num" = "1", - "enable_unique_key_merge_on_write"="true", - "disable_auto_compaction"="true"); """ - sql "insert into ${tableName6} values(1,1,3,4),(2,2,4,5),(3,3,2,3),(4,4,1,2);" - qt_6 "select * from ${tableName6} order by k;" - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - sql "insert into ${tableName6}(k,v) select v2,v3 from ${tableName6};" - qt_6 "select * from ${tableName6} order by k;" - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """ DROP TABLE IF EXISTS ${tableName6}; """ - - def tableName7 = "test_partial_update_native_insert_stmt7" - sql """ DROP TABLE IF EXISTS ${tableName7} """ - sql """create table ${tableName7} ( - k1 int null, - k2 int null, - k3 int null, - v1 int null, - v2 int null - ) unique key (k1,k2,k3) distributed by hash(k1,k2) buckets 4 - properties("replication_num" = "1", - "enable_unique_key_merge_on_write"="true", - "disable_auto_compaction"="true"); """ - sql "insert into ${tableName7} values(1,1,1,3,4),(2,2,2,4,5),(3,3,3,2,3),(4,4,4,1,2);" - qt_7 "select * from ${tableName7} order by k1;" - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - test { - sql "insert into ${tableName7}(k1,k2,v2) select k2,k3,v1 from ${tableName7};" - exception "Partial update should include all key columns, missing: k3" - } - qt_7 "select * from ${tableName7} order by k1;" - sql """ DROP TABLE IF EXISTS ${tableName7}; """ - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict = true;" - sql "set experimental_enable_nereids_planner=true;" - sql "sync;" + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + sql "set enable_nereids_dml=false;" + sql "set experimental_enable_nereids_planner=false;" + sql "set enable_fallback_to_original_planner=true;" + sql "sync;" + + // sql 'set enable_fallback_to_original_planner=false' + def tableName = "test_partial_update_native_insert_stmt" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL DEFAULT "yixiu" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true","store_row_column" = "${use_row_store}"); """ + + sql """insert into ${tableName} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1)""" + qt_1 """ select * from ${tableName} order by id; """ + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + // partial update using insert stmt in non-strict mode, + // existing rows should be updated and new rows should be inserted with unmentioned columns filled with default or null value + sql """insert into ${tableName}(id,score) values(2,400),(1,200),(4,400)""" + qt_1 """ select * from ${tableName} order by id; """ + test { + sql """insert into ${tableName} values(2,400),(1,200),(4,400)""" + exception "You must explicitly specify the columns to be updated when updating partial columns using the INSERT statement." + } + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql """ DROP TABLE IF EXISTS ${tableName} """ + + + def tableName2 = "test_partial_update_native_insert_stmt2" + sql """ DROP TABLE IF EXISTS ${tableName2} """ + sql """ + CREATE TABLE ${tableName2} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321", + `update_time` date NULL) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "update_time", + "store_row_column" = "${use_row_store}"); """ + + sql """ insert into ${tableName2} values + (2, "doris2", 2000, 223, 1, '2023-01-01'), + (1, "doris", 1000, 123, 1, '2023-01-01');""" + qt_2 "select * from ${tableName2} order by id;" + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + // partial update with seq col + sql """ insert into ${tableName2}(id,score,update_time) values + (2,2500,"2023-07-19"), + (2,2600,"2023-07-20"), + (1,1300,"2022-07-19"), + (3,1500,"2022-07-20"), + (3,2500,"2022-07-18"); """ + qt_2 "select * from ${tableName2} order by id;" + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql """ DROP TABLE IF EXISTS ${tableName2}; """ + + + def tableName3 = "test_partial_update_native_insert_stmt3" + sql """ DROP TABLE IF EXISTS ${tableName3}; """ + sql """ + CREATE TABLE ${tableName3} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql """insert into ${tableName3} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1);""" + qt_3 """ select * from ${tableName3} order by id; """ + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + // in partial update, the unmentioned columns should have default values or be nullable + // but field `name` is not nullable and doesn't have default value + test { + sql """insert into ${tableName3}(id,score) values(2,400),(1,200),(4,400)""" + exception "INTERNAL_ERROR" + } + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + qt_3 """ select * from ${tableName3} order by id; """ + sql """ DROP TABLE IF EXISTS ${tableName3} """ + + + def tableName4 = "test_partial_update_native_insert_stmt4" + sql """ DROP TABLE IF EXISTS ${tableName4} """ + sql """ + CREATE TABLE ${tableName4} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL DEFAULT "yixiu" COMMENT "用户姓名", + `score` int(11) NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql """insert into ${tableName4} values(2, "doris2", 2000, 223, 1),(1, "doris", 1000, 123, 1),(3,"doris3",5000,34,345);""" + qt_4 """ select * from ${tableName4} order by id; """ + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + // partial update with delete sign + sql "insert into ${tableName4}(id,__DORIS_DELETE_SIGN__) values(2,1);" + qt_4 """ select * from ${tableName4} order by id; """ + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql """ DROP TABLE IF EXISTS ${tableName4} """ + + + def tableName5 = "test_partial_update_native_insert_stmt5" + sql """ DROP TABLE IF EXISTS ${tableName5} """ + sql """ + CREATE TABLE ${tableName5} ( + `id` int(11) NULL, + `name` varchar(10) NULL, + `age` int(11) NULL DEFAULT "20", + `city` varchar(10) NOT NULL DEFAULT "beijing", + `balance` decimalv3(9, 0) NULL, + `last_access_time` datetime NULL + ) ENGINE = OLAP UNIQUE KEY(`id`) + COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) + BUCKETS AUTO PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "storage_format" = "V2", + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "disable_auto_compaction" = "false", + "enable_single_replica_compaction" = "false", + "store_row_column" = "${use_row_store}"); """ + + sql """insert into ${tableName5} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" + qt_5 """select * from ${tableName5} order by id;""" + sql "set enable_insert_strict = true;" + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + // partial update using insert stmt in strict mode, the max_filter_ratio is always 0 + test { + sql """ insert into ${tableName5}(id,balance,last_access_time) values(1,500,"2023-07-03 12:00:01"),(3,23,"2023-07-03 12:00:02"),(18,9999999,"2023-07-03 12:00:03"); """ + exception "Insert has filtered data in strict mode" + } + qt_5 """select * from ${tableName5} order by id;""" + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict = false;" + sql "sync;" + sql """ DROP TABLE IF EXISTS ${tableName5}; """ + + def tableName6 = "test_partial_update_native_insert_stmt6" + sql """ DROP TABLE IF EXISTS ${tableName6} """ + sql """create table ${tableName6} ( + k int null, + v int null, + v2 int null, + v3 int null + ) unique key (k) distributed by hash(k) buckets 1 + properties("replication_num" = "1", + "enable_unique_key_merge_on_write"="true", + "disable_auto_compaction"="true", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName6} values(1,1,3,4),(2,2,4,5),(3,3,2,3),(4,4,1,2);" + qt_6 "select * from ${tableName6} order by k;" + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + sql "insert into ${tableName6}(k,v) select v2,v3 from ${tableName6};" + qt_6 "select * from ${tableName6} order by k;" + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict = false;" + sql "sync;" + sql """ DROP TABLE IF EXISTS ${tableName6}; """ + + def tableName7 = "test_partial_update_native_insert_stmt7" + sql """ DROP TABLE IF EXISTS ${tableName7} """ + sql """create table ${tableName7} ( + k1 int null, + k2 int null, + k3 int null, + v1 int null, + v2 int null + ) unique key (k1,k2,k3) distributed by hash(k1,k2) buckets 4 + properties("replication_num" = "1", + "enable_unique_key_merge_on_write"="true", + "disable_auto_compaction"="true", + "store_row_column" = "${use_row_store}"); """ + + sql "insert into ${tableName7} values(1,1,1,3,4),(2,2,2,4,5),(3,3,3,2,3),(4,4,4,1,2);" + qt_7 "select * from ${tableName7} order by k1;" + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + test { + sql "insert into ${tableName7}(k1,k2,v2) select k2,k3,v1 from ${tableName7};" + exception "Partial update should include all key columns, missing: k3" + } + qt_7 "select * from ${tableName7} order by k1;" + sql """ DROP TABLE IF EXISTS ${tableName7}; """ + + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict = false;" + sql "set experimental_enable_nereids_planner=true;" + sql "sync;" + } + } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt_complex.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt_complex.groovy index 8dac6e9120e207..62dfed2fa70f63 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt_complex.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt_complex.groovy @@ -16,107 +16,119 @@ // under the License. suite("test_partial_update_native_insert_stmt_complex", "p0") { - sql "set enable_nereids_dml=false;" - sql "set experimental_enable_nereids_planner=false;" - sql "set enable_fallback_to_original_planner=true;" - sql "sync;" - - // test complex partial update - def tbName1 = "test_partial_update_native_insert_stmt_complex1" - def tbName2 = "test_partial_update_native_insert_stmt_complex2" - def tbName3 = "test_partial_update_native_insert_stmt_complex3" - - sql "DROP TABLE IF EXISTS ${tbName1}" - sql "DROP TABLE IF EXISTS ${tbName2}" - sql "DROP TABLE IF EXISTS ${tbName3}" - - sql """create table ${tbName1} ( - id int, - c1 bigint, - c2 string, - c3 double, - c4 date) unique key (id) distributed by hash(id) - properties('replication_num'='1', 'enable_unique_key_merge_on_write' = 'true');""" - - sql """create table ${tbName2} ( - id int, - c1 bigint, - c2 string, - c3 double, - c4 date) unique key (id) distributed by hash(id) - properties('replication_num'='1', 'enable_unique_key_merge_on_write' = 'true');""" - - sql """create table ${tbName3} (id int) distributed by hash (id) properties('replication_num'='1');""" - - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql """insert into ${tbName1} values - (1, 1, '1', 1.0, '2000-01-01'), - (2, 2, '2', 2.0, '2000-01-02'), - (3, 3, '3', 3.0, '2000-01-03');""" - sql """insert into ${tbName2} values - (1, 10, '10', 10.0, '2000-01-10'), - (2, 20, '20', 20.0, '2000-01-20'), - (3, 30, '30', 30.0, '2000-01-30'), - (4, 4, '4', 4.0, '2000-01-04'), - (5, 5, '5', 5.0, '2000-01-05');""" - sql """insert into ${tbName3} values(1), (3), (5);""" - - qt_tbl1 "select * from ${tbName1} order by id;" - qt_tbl2 "select * from ${tbName2} order by id;" - qt_tbl3 "select * from ${tbName3} order by id;" - - qt_select_result """select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 - from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id order by ${tbName2}.id;""" - - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """insert into ${tbName1}(id, c1, c3) - select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 - from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id; """ - - qt_complex_update """select * from ${tbName1} order by id;""" - test { - sql """insert into ${tbName1} - select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 - from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id; """ - exception "You must explicitly specify the columns to be updated when updating partial columns using the INSERT statement." + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + sql "set enable_nereids_dml=false;" + sql "set experimental_enable_nereids_planner=false;" + sql "set enable_fallback_to_original_planner=true;" + sql "sync;" + + // test complex partial update + def tbName1 = "test_partial_update_native_insert_stmt_complex1" + def tbName2 = "test_partial_update_native_insert_stmt_complex2" + def tbName3 = "test_partial_update_native_insert_stmt_complex3" + + sql "DROP TABLE IF EXISTS ${tbName1}" + sql "DROP TABLE IF EXISTS ${tbName2}" + sql "DROP TABLE IF EXISTS ${tbName3}" + + sql """create table ${tbName1} ( + id int, + c1 bigint, + c2 string, + c3 double, + c4 date) unique key (id) distributed by hash(id) + properties('replication_num'='1', 'enable_unique_key_merge_on_write' = 'true',"store_row_column" = "${use_row_store}"); """ + + sql """create table ${tbName2} ( + id int, + c1 bigint, + c2 string, + c3 double, + c4 date) unique key (id) distributed by hash(id) + properties('replication_num'='1', 'enable_unique_key_merge_on_write' = 'true',"store_row_column" = "${use_row_store}"); """ + + sql """create table ${tbName3} (id int) distributed by hash (id) properties('replication_num'='1');""" + + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql """insert into ${tbName1} values + (1, 1, '1', 1.0, '2000-01-01'), + (2, 2, '2', 2.0, '2000-01-02'), + (3, 3, '3', 3.0, '2000-01-03');""" + sql """insert into ${tbName2} values + (1, 10, '10', 10.0, '2000-01-10'), + (2, 20, '20', 20.0, '2000-01-20'), + (3, 30, '30', 30.0, '2000-01-30'), + (4, 4, '4', 4.0, '2000-01-04'), + (5, 5, '5', 5.0, '2000-01-05');""" + sql """insert into ${tbName3} values(1), (3), (5);""" + + qt_tbl1 "select * from ${tbName1} order by id;" + qt_tbl2 "select * from ${tbName2} order by id;" + qt_tbl3 "select * from ${tbName3} order by id;" + + qt_select_result """select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 + from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id order by ${tbName2}.id;""" + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + sql """insert into ${tbName1}(id, c1, c3) + select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 + from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id; """ + + qt_complex_update """select * from ${tbName1} order by id;""" + test { + sql """insert into ${tbName1} + select ${tbName2}.id, ${tbName2}.c1, ${tbName2}.c3 * 100 + from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id; """ + exception "You must explicitly specify the columns to be updated when updating partial columns using the INSERT statement." + } + sql "truncate table ${tbName1};" + sql "truncate table ${tbName2};" + sql "truncate table ${tbName3};" + + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + sql """insert into ${tbName1} values + (1, 1, '1', 1.0, '2000-01-01'), + (2, 2, '2', 2.0, '2000-01-02'), + (3, 3, '3', 3.0, '2000-01-03');""" + sql """insert into ${tbName2} values + (1, 10, '10', 10.0, '2000-01-10'), + (2, 20, '20', 20.0, '2000-01-20'), + (3, 30, '30', 30.0, '2000-01-30'), + (4, 4, '4', 4.0, '2000-01-04'), + (5, 5, '5', 5.0, '2000-01-05');""" + sql """insert into ${tbName3} values(1), (3), (5);""" + + qt_select_result "select ${tbName2}.id,1 from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id order by ${tbName2}.id;" + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict = false;" + sql "sync;" + sql """ insert into ${tbName1}(id, __DORIS_DELETE_SIGN__) + select ${tbName2}.id,1 from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id;""" + + qt_complex_delete """select * from ${tbName1} order by id;""" + + sql "DROP TABLE IF EXISTS ${tbName1}" + sql "DROP TABLE IF EXISTS ${tbName2}" + sql "DROP TABLE IF EXISTS ${tbName3}" + + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict = false;" + sql "set experimental_enable_nereids_planner=true;" + sql "sync;" + } } - sql "truncate table ${tbName1};" - sql "truncate table ${tbName2};" - sql "truncate table ${tbName3};" - - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - sql """insert into ${tbName1} values - (1, 1, '1', 1.0, '2000-01-01'), - (2, 2, '2', 2.0, '2000-01-02'), - (3, 3, '3', 3.0, '2000-01-03');""" - sql """insert into ${tbName2} values - (1, 10, '10', 10.0, '2000-01-10'), - (2, 20, '20', 20.0, '2000-01-20'), - (3, 30, '30', 30.0, '2000-01-30'), - (4, 4, '4', 4.0, '2000-01-04'), - (5, 5, '5', 5.0, '2000-01-05');""" - sql """insert into ${tbName3} values(1), (3), (5);""" - - qt_select_result "select ${tbName2}.id,1 from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id order by ${tbName2}.id;" - - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """ insert into ${tbName1}(id, __DORIS_DELETE_SIGN__) - select ${tbName2}.id,1 from ${tbName2} inner join ${tbName3} on ${tbName2}.id = ${tbName3}.id;""" - - qt_complex_delete """select * from ${tbName1} order by id;""" - - sql "DROP TABLE IF EXISTS ${tbName1}" - sql "DROP TABLE IF EXISTS ${tbName2}" - sql "DROP TABLE IF EXISTS ${tbName3}" - - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict = true;" - sql "set experimental_enable_nereids_planner=true;" - sql "sync;" } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_col.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_col.groovy index 4a99f2a770021d..3a91e5a27fb136 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_col.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_col.groovy @@ -17,142 +17,152 @@ // under the License. suite("test_primary_key_partial_update_seq_col", "p0") { - def tableName = "test_primary_key_partial_update_seq_col" - - // create table - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321", - `update_time` date NULL) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "update_time" - ) - """ - // insert 2 lines - sql """ - insert into ${tableName} values - (2, "doris2", 2000, 223, 1, '2023-01-01'), - (1, "doris", 1000, 123, 1, '2023-01-01') - """ - - sql "sync" - - qt_select_default """ - select * from ${tableName} order by id; - """ - - // don't set partial update header, it's a row update streamload - // the input data don't contains sequence mapping column, will load fail - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'id,score' - - file 'basic.csv' - time 10000 // limit inflight 10s - - check { result, exception, startTime, endTime -> - if (exception != null) { - throw exception + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def tableName = "test_primary_key_partial_update_seq_col" + + // create table + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321", + `update_time` date NULL) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "update_time", + "store_row_column" = "${use_row_store}"); """ + // insert 2 lines + sql """ + insert into ${tableName} values + (2, "doris2", 2000, 223, 1, '2023-01-01'), + (1, "doris", 1000, 123, 1, '2023-01-01') + """ + + sql "sync" + + qt_select_default """ + select * from ${tableName} order by id; + """ + + // don't set partial update header, it's a row update streamload + // the input data don't contains sequence mapping column, will load fail + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'id,score' + + file 'basic.csv' + time 10000 // limit inflight 10s + + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("fail", json.Status.toLowerCase()) + assertTrue(json.Message.contains('need to specify the sequence column')) + } } - log.info("Stream load result: ${result}".toString()) - def json = parseJson(result) - assertEquals("fail", json.Status.toLowerCase()) - assertTrue(json.Message.contains('need to specify the sequence column')) - } - } - // set partial update header, should success - // we don't provide the sequence column in input data, so the updated rows - // should use there original sequence column values. - streamLoad { - table "${tableName}" + // set partial update header, should success + // we don't provide the sequence column in input data, so the updated rows + // should use there original sequence column values. + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score' + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score' - file 'basic.csv' - time 10000 // limit inflight 10s - } + file 'basic.csv' + time 10000 // limit inflight 10s + } - sql "sync" + sql "sync" - qt_partial_update_without_seq """ - select * from ${tableName} order by id; - """ + qt_partial_update_without_seq """ + select * from ${tableName} order by id; + """ - // provide the sequence column this time, should update according to the - // given sequence values - streamLoad { - table "${tableName}" + // provide the sequence column this time, should update according to the + // given sequence values + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score,update_time' + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score,update_time' - file 'basic_with_seq.csv' - time 10000 // limit inflight 10s - } + file 'basic_with_seq.csv' + time 10000 // limit inflight 10s + } - sql "sync" - - qt_partial_update_with_seq """ - select * from ${tableName} order by id; - """ - - sql "SET show_hidden_columns=true" - - sql "sync" - - qt_partial_update_with_seq_hidden_columns """ - select * from ${tableName} order by id; - """ - - // drop drop - sql """ DROP TABLE IF EXISTS ${tableName} """ - - - def tableName2 = "nereids_partial_update_native_insert_seq_col2" - sql """ DROP TABLE IF EXISTS ${tableName2} """ - sql """ - CREATE TABLE ${tableName2} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `score` int(11) NOT NULL COMMENT "用户得分", - `update_time` DATETIMEV2 NULL DEFAULT CURRENT_TIMESTAMP) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "update_time" - )""" - - // don't set partial update header, it's a row update streamload - // the input data don't contains sequence mapping column but the sequence mapping - // column's default value is CURRENT_TIMESTAMP, will load successfully - streamLoad { - table "${tableName2}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'id,score' - - file 'basic.csv' - time 10000 // limit inflight 10s + sql "sync" + + qt_partial_update_with_seq """ + select * from ${tableName} order by id; + """ + + sql "SET show_hidden_columns=true" + + sql "sync" + + qt_partial_update_with_seq_hidden_columns """ + select * from ${tableName} order by id; + """ + + // drop drop + sql """ DROP TABLE IF EXISTS ${tableName} """ + + + def tableName2 = "nereids_partial_update_native_insert_seq_col2" + sql """ DROP TABLE IF EXISTS ${tableName2} """ + sql """ + CREATE TABLE ${tableName2} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `score` int(11) NOT NULL COMMENT "用户得分", + `update_time` DATETIMEV2 NULL DEFAULT CURRENT_TIMESTAMP) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "update_time", + "store_row_column" = "${use_row_store}"); """ + // don't set partial update header, it's a row update streamload + // the input data don't contains sequence mapping column but the sequence mapping + // column's default value is CURRENT_TIMESTAMP, will load successfully + streamLoad { + table "${tableName2}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'id,score' + + file 'basic.csv' + time 10000 // limit inflight 10s + } + qt_sql """ select id,score from ${tableName2} order by id;""" + sql """ DROP TABLE IF EXISTS ${tableName2}; """ + } } - qt_sql """ select id,score from ${tableName2} order by id;""" - sql """ DROP TABLE IF EXISTS ${tableName2}; """ } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.groovy index 79b7ca783166b2..c6cfdba1106b6f 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.groovy @@ -17,89 +17,100 @@ // under the License. suite("test_primary_key_partial_update_seq_col_delete", "p0") { - def tableName = "test_primary_key_partial_update_seq_col_delete" - - // create table - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321", - `update_time` date NULL) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_col" = "update_time" - ) - """ - // insert 2 lines - sql """ - insert into ${tableName} values - (2, "doris2", 2000, 223, 1, '2023-01-01'), - (1, "doris", 1000, 123, 1, '2023-01-01') - """ - - sql "sync" - - qt_select_default """ - select * from ${tableName} order by id; - """ - - // set partial update header, should success - // we don't provide the sequence column in input data, so the updated rows - // should use there original sequence column values. - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score' - set 'merge_type', 'DELETE' - - file 'basic.csv' - time 10000 // limit inflight 10s - } - - sql "sync" - - qt_partial_update_without_seq """ - select * from ${tableName} order by id; - """ - // provide the sequence column this time, should update according to the - // given sequence values - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score,update_time' - set 'merge_type', 'DELETE' - - file 'basic_with_seq.csv' - time 10000 // limit inflight 10s + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def tableName = "test_primary_key_partial_update_seq_col_delete" + + // create table + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321", + `update_time` date NULL) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "update_time", + "store_row_column" = "${use_row_store}"); """ + // insert 2 lines + sql """ + insert into ${tableName} values + (2, "doris2", 2000, 223, 1, '2023-01-01'), + (1, "doris", 1000, 123, 1, '2023-01-01') + """ + + sql "sync" + + qt_select_default """ + select * from ${tableName} order by id; + """ + + // set partial update header, should success + // we don't provide the sequence column in input data, so the updated rows + // should use there original sequence column values. + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score' + set 'merge_type', 'DELETE' + + file 'basic.csv' + time 10000 // limit inflight 10s + } + + sql "sync" + + qt_partial_update_without_seq """ + select * from ${tableName} order by id; + """ + + // provide the sequence column this time, should update according to the + // given sequence values + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score,update_time' + set 'merge_type', 'DELETE' + + file 'basic_with_seq.csv' + time 10000 // limit inflight 10s + } + + sql "sync" + + qt_partial_update_with_seq """ + select * from ${tableName} order by id; + """ + + sql "SET show_hidden_columns=true" + + sql "sync" + + qt_partial_update_with_seq_hidden_columns """ + select * from ${tableName} order by id; + """ + + // drop drop + sql """ DROP TABLE IF EXISTS ${tableName} """ + } } - - sql "sync" - - qt_partial_update_with_seq """ - select * from ${tableName} order by id; - """ - - sql "SET show_hidden_columns=true" - - sql "sync" - - qt_partial_update_with_seq_hidden_columns """ - select * from ${tableName} order by id; - """ - - // drop drop - sql """ DROP TABLE IF EXISTS ${tableName} """ } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_type.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_type.groovy index 1ca7079c974fa0..57eaf7c7f79cfd 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_type.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_type.groovy @@ -17,161 +17,172 @@ // under the License. suite("test_primary_key_partial_update_seq_type", "p0") { - def tableName = "test_primary_key_partial_update_seq_type" - - // create table - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321", - `update_time` date NULL) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_type" = "int" - ) - """ - // insert 2 lines - sql """ - insert into ${tableName} values - (2, "doris2", 2000, 223, 1, '2023-01-01'), - (1, "doris", 1000, 123, 1, '2023-01-01') - """ - - sql "sync" - - qt_select_default """ - select * from ${tableName} order by id; - """ - - // no sequence column header, stream load should fail - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score,test,update_time' - - file 'basic_with_test.csv' - time 10000 // limit inflight 10s - - check { result, exception, startTime, endTime -> - if (exception != null) { - throw exception + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def tableName = "test_primary_key_partial_update_seq_type" + + // create table + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321", + `update_time` date NULL) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_type" = "int", + "store_row_column" = "${use_row_store}"); """ + // insert 2 lines + sql """ + insert into ${tableName} values + (2, "doris2", 2000, 223, 1, '2023-01-01'), + (1, "doris", 1000, 123, 1, '2023-01-01') + """ + + sql "sync" + + qt_select_default """ + select * from ${tableName} order by id; + """ + + // no sequence column header, stream load should fail + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score,test,update_time' + + file 'basic_with_test.csv' + time 10000 // limit inflight 10s + + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("fail", json.Status.toLowerCase()) + assertTrue(json.Message.contains('need to specify the sequence column')) + } } - log.info("Stream load result: ${result}".toString()) - def json = parseJson(result) - assertEquals("fail", json.Status.toLowerCase()) - assertTrue(json.Message.contains('need to specify the sequence column')) - } - } - sql "sync" + sql "sync" - // both partial_columns and sequence column header, stream load should success - streamLoad { - table "${tableName}" + // both partial_columns and sequence column header, stream load should success + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score,test,update_time' - set 'function_column.sequence_col', 'score' + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score,test,update_time' + set 'function_column.sequence_col', 'score' - file 'basic_with_test.csv' - time 10000 // limit inflight 10s - } + file 'basic_with_test.csv' + time 10000 // limit inflight 10s + } - sql "sync" + sql "sync" - qt_partial_update_with_seq_score """ - select * from ${tableName} order by id; - """ + qt_partial_update_with_seq_score """ + select * from ${tableName} order by id; + """ - sql "SET show_hidden_columns=true" + sql "SET show_hidden_columns=true" - sql "sync" + sql "sync" - qt_partial_update_with_seq_score_hidden """ - select * from ${tableName} order by id; - """ + qt_partial_update_with_seq_score_hidden """ + select * from ${tableName} order by id; + """ - // use test as sequence column - streamLoad { - table "${tableName}" + // use test as sequence column + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score,test,update_time' - set 'function_column.sequence_col', 'test' + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score,test,update_time' + set 'function_column.sequence_col', 'test' - file 'basic_with_test.csv' - time 10000 // limit inflight 10s - } + file 'basic_with_test.csv' + time 10000 // limit inflight 10s + } - sql "SET show_hidden_columns=false" + sql "SET show_hidden_columns=false" - sql "sync" + sql "sync" - qt_partial_update_with_seq_test """ - select * from ${tableName} order by id; - """ + qt_partial_update_with_seq_test """ + select * from ${tableName} order by id; + """ - sql "SET show_hidden_columns=true" + sql "SET show_hidden_columns=true" - sql "sync" + sql "sync" - qt_partial_update_with_seq_test_hidden """ - select * from ${tableName} order by id; - """ + qt_partial_update_with_seq_test_hidden """ + select * from ${tableName} order by id; + """ - // no partial update header, stream load should success, - // but the missing columns will be filled with default values - streamLoad { - table "${tableName}" + // no partial update header, stream load should success, + // but the missing columns will be filled with default values + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'id,score,test,update_time' - set 'function_column.sequence_col', 'score' + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'id,score,test,update_time' + set 'function_column.sequence_col', 'score' - file 'basic_with_test2.csv' - time 10000 // limit inflight 10s - } + file 'basic_with_test2.csv' + time 10000 // limit inflight 10s + } - sql "sync" + sql "sync" - qt_select_no_partial_update_score """ - select * from ${tableName} order by id; - """ + qt_select_no_partial_update_score """ + select * from ${tableName} order by id; + """ - // no partial update header, stream load should success, - // but the missing columns will be filled with default values - streamLoad { - table "${tableName}" + // no partial update header, stream load should success, + // but the missing columns will be filled with default values + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'id,score,test,update_time' - set 'function_column.sequence_col', 'test' + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'id,score,test,update_time' + set 'function_column.sequence_col', 'test' - file 'basic_with_test2.csv' - time 10000 // limit inflight 10s - } + file 'basic_with_test2.csv' + time 10000 // limit inflight 10s + } - sql "sync" + sql "sync" - qt_select_no_partial_update_test """ - select * from ${tableName} order by id; - """ + qt_select_no_partial_update_test """ + select * from ${tableName} order by id; + """ - // drop table - sql """ DROP TABLE IF EXISTS ${tableName} """ + // drop table + sql """ DROP TABLE IF EXISTS ${tableName} """ + } + } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.groovy index 90985616bb5a72..23ef1a5dfbbef6 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.groovy @@ -17,166 +17,177 @@ // under the License. suite("test_primary_key_partial_update_seq_type_delete", "p0") { - def tableName = "test_primary_key_partial_update_seq_type_delete" - - // create table - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321", - `update_time` date NULL) - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES( - "replication_num" = "1", - "enable_unique_key_merge_on_write" = "true", - "function_column.sequence_type" = "int" - ) - """ - // insert 2 lines - sql """ - insert into ${tableName} values - (2, "doris2", 2000, 223, 1, '2023-01-01'), - (1, "doris", 1000, 123, 1, '2023-01-01') - """ - - sql "sync" - - qt_select_default """ - select * from ${tableName} order by id; - """ - - // no sequence column header, stream load should fail - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score,test,update_time' - set 'merge_type', 'DELETE' - - file 'basic_with_test.csv' - time 10000 // limit inflight 10s - - check { result, exception, startTime, endTime -> - if (exception != null) { - throw exception + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def tableName = "test_primary_key_partial_update_seq_type_delete" + + // create table + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) DEFAULT "unknown" COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321", + `update_time` date NULL) + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_type" = "int", + "store_row_column" = "${use_row_store}"); """ + // insert 2 lines + sql """ + insert into ${tableName} values + (2, "doris2", 2000, 223, 1, '2023-01-01'), + (1, "doris", 1000, 123, 1, '2023-01-01') + """ + + sql "sync" + + qt_select_default """ + select * from ${tableName} order by id; + """ + + // no sequence column header, stream load should fail + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score,test,update_time' + set 'merge_type', 'DELETE' + + file 'basic_with_test.csv' + time 10000 // limit inflight 10s + + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("fail", json.Status.toLowerCase()) + assertTrue(json.Message.contains('need to specify the sequence column')) + } } - log.info("Stream load result: ${result}".toString()) - def json = parseJson(result) - assertEquals("fail", json.Status.toLowerCase()) - assertTrue(json.Message.contains('need to specify the sequence column')) - } - } - sql "sync" + sql "sync" - // both partial_columns and sequence column header, stream load should success - streamLoad { - table "${tableName}" + // both partial_columns and sequence column header, stream load should success + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score,test,update_time' - set 'function_column.sequence_col', 'score' - set 'merge_type', 'DELETE' + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score,test,update_time' + set 'function_column.sequence_col', 'score' + set 'merge_type', 'DELETE' - file 'basic_with_test.csv' - time 10000 // limit inflight 10s - } + file 'basic_with_test.csv' + time 10000 // limit inflight 10s + } - sql "sync" + sql "sync" - qt_partial_update_with_seq_score """ - select * from ${tableName} order by id; - """ + qt_partial_update_with_seq_score """ + select * from ${tableName} order by id; + """ - sql "SET show_hidden_columns=true" + sql "SET show_hidden_columns=true" - sql "sync" + sql "sync" - qt_partial_update_with_seq_score_hidden """ - select * from ${tableName} order by id; - """ + qt_partial_update_with_seq_score_hidden """ + select * from ${tableName} order by id; + """ - // use test as sequence column - streamLoad { - table "${tableName}" + // use test as sequence column + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,score,test,update_time' - set 'function_column.sequence_col', 'test' - set 'merge_type', 'DELETE' + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,score,test,update_time' + set 'function_column.sequence_col', 'test' + set 'merge_type', 'DELETE' - file 'basic_with_test.csv' - time 10000 // limit inflight 10s - } + file 'basic_with_test.csv' + time 10000 // limit inflight 10s + } - sql "SET show_hidden_columns=false" + sql "SET show_hidden_columns=false" - sql "sync" + sql "sync" - qt_partial_update_with_seq_test """ - select * from ${tableName} order by id; - """ + qt_partial_update_with_seq_test """ + select * from ${tableName} order by id; + """ - sql "SET show_hidden_columns=true" + sql "SET show_hidden_columns=true" - sql "sync" + sql "sync" - qt_partial_update_with_seq_test_hidden """ - select * from ${tableName} order by id; - """ + qt_partial_update_with_seq_test_hidden """ + select * from ${tableName} order by id; + """ - // no partial update header, stream load should success, - // but the missing columns will be filled with default values - streamLoad { - table "${tableName}" + // no partial update header, stream load should success, + // but the missing columns will be filled with default values + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'id,score,test,update_time' - set 'function_column.sequence_col', 'score' - set 'merge_type', 'DELETE' + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'id,score,test,update_time' + set 'function_column.sequence_col', 'score' + set 'merge_type', 'DELETE' - file 'basic_with_test2.csv' - time 10000 // limit inflight 10s - } + file 'basic_with_test2.csv' + time 10000 // limit inflight 10s + } - sql "sync" + sql "sync" - qt_select_no_partial_update_score """ - select * from ${tableName} order by id; - """ + qt_select_no_partial_update_score """ + select * from ${tableName} order by id; + """ - // no partial update header, stream load should success, - // but the missing columns will be filled with default values - streamLoad { - table "${tableName}" + // no partial update header, stream load should success, + // but the missing columns will be filled with default values + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'id,score,test,update_time' - set 'function_column.sequence_col', 'test' - set 'merge_type', 'DELETE' + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'id,score,test,update_time' + set 'function_column.sequence_col', 'test' + set 'merge_type', 'DELETE' - file 'basic_with_test2.csv' - time 10000 // limit inflight 10s - } + file 'basic_with_test2.csv' + time 10000 // limit inflight 10s + } - sql "sync" + sql "sync" - qt_select_no_partial_update_test """ - select * from ${tableName} order by id; - """ + qt_select_no_partial_update_test """ + select * from ${tableName} order by id; + """ - // drop table - sql """ DROP TABLE IF EXISTS ${tableName} """ + // drop table + sql """ DROP TABLE IF EXISTS ${tableName} """ + } + } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_strict_mode.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_strict_mode.groovy index 3b7a875f2c39e0..726e04c684e569 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_strict_mode.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_strict_mode.groovy @@ -18,198 +18,207 @@ suite("test_partial_update_strict_mode", "p0") { - def tableName = "test_partial_update_strict_mode" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NULL, - `name` varchar(10) NULL, - `age` int(11) NULL DEFAULT "20", - `city` varchar(10) NOT NULL DEFAULT "beijing", - `balance` decimalv3(9, 0) NULL, - `last_access_time` datetime NULL - ) ENGINE = OLAP UNIQUE KEY(`id`) - COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) - BUCKETS 4 PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "storage_format" = "V2", - "enable_unique_key_merge_on_write" = "true", - "light_schema_change" = "true", - "disable_auto_compaction" = "false", - "enable_single_replica_compaction" = "false" - ); - """ - sql """insert into ${tableName} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" - qt_sql """select * from ${tableName} order by id;""" - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,balance,last_access_time' - set 'strict_mode', 'true' - set 'max_filter_ratio', '1' - - file 'upsert.csv' - time 10000 // limit inflight 10s - - check {result, exception, startTime, endTime -> - assertTrue(exception == null) - def json = parseJson(result) - assertEquals("Success", json.Status) - assertEquals(3, json.NumberTotalRows) - assertEquals(1, json.NumberLoadedRows) - assertEquals(2, json.NumberFilteredRows) + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def tableName = "test_partial_update_strict_mode" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(10) NULL, + `age` int(11) NULL DEFAULT "20", + `city` varchar(10) NOT NULL DEFAULT "beijing", + `balance` decimalv3(9, 0) NULL, + `last_access_time` datetime NULL + ) ENGINE = OLAP UNIQUE KEY(`id`) + COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) + BUCKETS 4 PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "storage_format" = "V2", + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "disable_auto_compaction" = "false", + "enable_single_replica_compaction" = "false", + "store_row_column" = "${use_row_store}"); """ + sql """insert into ${tableName} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" + qt_sql """select * from ${tableName} order by id;""" + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,balance,last_access_time' + set 'strict_mode', 'true' + set 'max_filter_ratio', '1' + + file 'upsert.csv' + time 10000 // limit inflight 10s + + check {result, exception, startTime, endTime -> + assertTrue(exception == null) + def json = parseJson(result) + assertEquals("Success", json.Status) + assertEquals(3, json.NumberTotalRows) + assertEquals(1, json.NumberLoadedRows) + assertEquals(2, json.NumberFilteredRows) + } + } + sql "sync" + qt_sql """select * from ${tableName} order by id;""" + sql """ DROP TABLE IF EXISTS ${tableName}; """ + + + def tableName2 = "test_partial_update_strict_mode2" + sql """ DROP TABLE IF EXISTS ${tableName2} """ + sql """ + CREATE TABLE ${tableName2} ( + `id` int(11) NULL, + `name` varchar(10) NULL, + `age` int(11) NULL DEFAULT "20", + `city` varchar(10) NOT NULL DEFAULT "beijing", + `balance` decimalv3(9, 0) NULL, + `last_access_time` datetime NULL + ) ENGINE = OLAP UNIQUE KEY(`id`) + COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) + BUCKETS 4 PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "storage_format" = "V2", + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "disable_auto_compaction" = "false", + "enable_single_replica_compaction" = "false", + "store_row_column" = "${use_row_store}"); """ + sql """insert into ${tableName2} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" + qt_sql """select * from ${tableName2} order by id;""" + streamLoad { + table "${tableName2}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,balance,last_access_time' + set 'strict_mode', 'true' + set 'max_filter_ratio', '0.5' + + file 'upsert.csv' + time 10000 // limit inflight 10s + + check {result, exception, startTime, endTime -> + assertTrue(exception == null) + def json = parseJson(result) + assertEquals("Fail", json.Status) + assertTrue(json.Message.contains("[INTERNAL_ERROR]too many filtered rows")) + assertEquals(3, json.NumberTotalRows) + assertEquals(1, json.NumberLoadedRows) + assertEquals(2, json.NumberFilteredRows) + } + } + sql "sync" + qt_sql """select * from ${tableName2} order by id;""" + sql """ DROP TABLE IF EXISTS ${tableName2}; """ + + def tableName3 = "test_partial_update_strict_mode3"; + sql """ DROP TABLE IF EXISTS ${tableName3} """ + sql """ + CREATE TABLE ${tableName3} ( + `id` int(11) NULL, + `name` varchar(10) NULL, + `age` int(11) NULL DEFAULT "20", + `city` varchar(10) NOT NULL DEFAULT "beijing", + `balance` decimalv3(9, 0) NULL, + `last_access_time` datetime NULL + ) ENGINE = OLAP UNIQUE KEY(`id`) + COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) + BUCKETS 4 PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "storage_format" = "V2", + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "disable_auto_compaction" = "false", + "enable_single_replica_compaction" = "false", + "store_row_column" = "${use_row_store}"); """ + sql """insert into ${tableName3} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" + qt_sql """select * from ${tableName3} order by id;""" + streamLoad { + table "${tableName3}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,balance,last_access_time' + set 'strict_mode', 'false' + set 'max_filter_ratio', '0.5' + + file 'upsert_invalid.csv' + time 10000 // limit inflight 10s + + check {result, exception, startTime, endTime -> + assertTrue(exception == null) + def json = parseJson(result) + assertEquals("Fail", json.Status) + assertTrue(json.Message.contains("[INTERNAL_ERROR]too many filtered rows")) + assertEquals(3, json.NumberTotalRows) + assertEquals(1, json.NumberLoadedRows) + assertEquals(2, json.NumberFilteredRows) + } + } + sql "sync" + qt_sql """select * from ${tableName3} order by id;""" + sql """ DROP TABLE IF EXISTS ${tableName3}; """ + + // all columns valid, partial columns do not exist in file + def tableName4 = "test_partial_update_strict_mode4" + sql """ DROP TABLE IF EXISTS ${tableName4} """ + sql """ + CREATE TABLE ${tableName4} ( + `id` int(11) NULL, + `name` varchar(10) NULL, + `age` int(11) NULL DEFAULT "20", + `city` varchar(10) NOT NULL DEFAULT "beijing", + `balance` decimalv3(9, 0) NULL, + `last_access_time` datetime NULL + ) ENGINE = OLAP UNIQUE KEY(`id`) + COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) + BUCKETS 4 PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "storage_format" = "V2", + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "disable_auto_compaction" = "false", + "enable_single_replica_compaction" = "false", + "store_row_column" = "${use_row_store}"); """ + sql """insert into ${tableName4} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" + sql """insert into ${tableName4} values(3,"steve",23,"beijing",500,"2023-07-03 12:00:02");""" + qt_sql """select * from ${tableName4} order by id;""" + streamLoad { + table tableName4 + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,balance,last_access_time' + set 'strict_mode', 'true' + + file 'upsert_missing_par_cols.csv' + time 10000 // limit inflight 10s + + check {result, exception, startTime, endTime -> + assertTrue(exception == null) + def json = parseJson(result) + assertEquals("Fail", json.Status) + } + } + + sql "sync" + qt_sql """select * from ${tableName4} order by id;""" + sql """ DROP TABLE IF EXISTS ${tableName4}; """ } } - sql "sync" - qt_sql """select * from ${tableName} order by id;""" - sql """ DROP TABLE IF EXISTS ${tableName} """ - - - def tableName2 = "test_partial_update_strict_mode2" - sql """ DROP TABLE IF EXISTS ${tableName2} """ - sql """ - CREATE TABLE ${tableName2} ( - `id` int(11) NULL, - `name` varchar(10) NULL, - `age` int(11) NULL DEFAULT "20", - `city` varchar(10) NOT NULL DEFAULT "beijing", - `balance` decimalv3(9, 0) NULL, - `last_access_time` datetime NULL - ) ENGINE = OLAP UNIQUE KEY(`id`) - COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) - BUCKETS 4 PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "storage_format" = "V2", - "enable_unique_key_merge_on_write" = "true", - "light_schema_change" = "true", - "disable_auto_compaction" = "false", - "enable_single_replica_compaction" = "false" - ); - """ - sql """insert into ${tableName2} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" - qt_sql """select * from ${tableName2} order by id;""" - streamLoad { - table "${tableName2}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,balance,last_access_time' - set 'strict_mode', 'true' - set 'max_filter_ratio', '0.5' - - file 'upsert.csv' - time 10000 // limit inflight 10s - - check {result, exception, startTime, endTime -> - assertTrue(exception == null) - def json = parseJson(result) - assertEquals("Fail", json.Status) - assertTrue(json.Message.contains("[INTERNAL_ERROR]too many filtered rows")) - assertEquals(3, json.NumberTotalRows) - assertEquals(1, json.NumberLoadedRows) - assertEquals(2, json.NumberFilteredRows) - } - } - sql "sync" - qt_sql """select * from ${tableName2} order by id;""" - - def tableName3 = "test_partial_update_strict_mode3"; - sql """ DROP TABLE IF EXISTS ${tableName3} """ - sql """ - CREATE TABLE ${tableName3} ( - `id` int(11) NULL, - `name` varchar(10) NULL, - `age` int(11) NULL DEFAULT "20", - `city` varchar(10) NOT NULL DEFAULT "beijing", - `balance` decimalv3(9, 0) NULL, - `last_access_time` datetime NULL - ) ENGINE = OLAP UNIQUE KEY(`id`) - COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) - BUCKETS 4 PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "storage_format" = "V2", - "enable_unique_key_merge_on_write" = "true", - "light_schema_change" = "true", - "disable_auto_compaction" = "false", - "enable_single_replica_compaction" = "false" - ); - """ - sql """insert into ${tableName3} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" - qt_sql """select * from ${tableName3} order by id;""" - streamLoad { - table "${tableName3}" - - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,balance,last_access_time' - set 'strict_mode', 'false' - set 'max_filter_ratio', '0.5' - - file 'upsert_invalid.csv' - time 10000 // limit inflight 10s - - check {result, exception, startTime, endTime -> - assertTrue(exception == null) - def json = parseJson(result) - assertEquals("Fail", json.Status) - assertTrue(json.Message.contains("[INTERNAL_ERROR]too many filtered rows")) - assertEquals(3, json.NumberTotalRows) - assertEquals(1, json.NumberLoadedRows) - assertEquals(2, json.NumberFilteredRows) - } - } - sql "sync" - qt_sql """select * from ${tableName3} order by id;""" - - // all columns valid, partial columns do not exist in file - def tableName4 = "test_partial_update_strict_mode4" - sql """ DROP TABLE IF EXISTS ${tableName4} """ - sql """ - CREATE TABLE ${tableName4} ( - `id` int(11) NULL, - `name` varchar(10) NULL, - `age` int(11) NULL DEFAULT "20", - `city` varchar(10) NOT NULL DEFAULT "beijing", - `balance` decimalv3(9, 0) NULL, - `last_access_time` datetime NULL - ) ENGINE = OLAP UNIQUE KEY(`id`) - COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) - BUCKETS 4 PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "storage_format" = "V2", - "enable_unique_key_merge_on_write" = "true", - "light_schema_change" = "true", - "disable_auto_compaction" = "false", - "enable_single_replica_compaction" = "false" - ); - """ - sql """insert into ${tableName4} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" - sql """insert into ${tableName4} values(3,"steve",23,"beijing",500,"2023-07-03 12:00:02");""" - qt_sql """select * from ${tableName4} order by id;""" - streamLoad { - table tableName4 - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,balance,last_access_time' - set 'strict_mode', 'true' - - file 'upsert_missing_par_cols.csv' - time 10000 // limit inflight 10s - - check {result, exception, startTime, endTime -> - assertTrue(exception == null) - def json = parseJson(result) - assertEquals("Fail", json.Status) - } - } - - sql "sync" - qt_sql """select * from ${tableName4} order by id;""" - sql """ DROP TABLE IF EXISTS ${tableName4}; """ } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_upsert.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_upsert.groovy index 3c02ee3bb6879f..5e4a02f5407904 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_upsert.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_upsert.groovy @@ -18,87 +18,96 @@ suite("test_partial_update_upsert", "p0") { - def tableName = "test_partial_update_upsert1" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NULL, - `name` varchar(10) NULL, - `age` int(11) NULL DEFAULT "20", - `city` varchar(10) NOT NULL DEFAULT "beijing", - `balance` decimalv3(9, 0) NULL, - `last_access_time` datetime NULL - ) ENGINE = OLAP UNIQUE KEY(`id`) - COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) - BUCKETS AUTO PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "storage_format" = "V2", - "enable_unique_key_merge_on_write" = "true", - "light_schema_change" = "true", - "disable_auto_compaction" = "false", - "enable_single_replica_compaction" = "false" - ); - """ - sql """insert into ${tableName} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" - qt_sql """select * from ${tableName} order by id;""" - streamLoad { - table "${tableName}" + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,balance,last_access_time' - set 'strict_mode', 'false' + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") - file 'upsert.csv' - time 10000 // limit inflight 10s - } - sql "sync" - qt_sql """select * from ${tableName} order by id;""" - sql """ DROP TABLE IF EXISTS ${tableName} """ + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def tableName = "test_partial_update_upsert1" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(10) NULL, + `age` int(11) NULL DEFAULT "20", + `city` varchar(10) NOT NULL DEFAULT "beijing", + `balance` decimalv3(9, 0) NULL, + `last_access_time` datetime NULL + ) ENGINE = OLAP UNIQUE KEY(`id`) + COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) + BUCKETS AUTO PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "storage_format" = "V2", + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "disable_auto_compaction" = "false", + "enable_single_replica_compaction" = "false", + "store_row_column" = "${use_row_store}"); """ + sql """insert into ${tableName} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" + qt_sql """select * from ${tableName} order by id;""" + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,balance,last_access_time' + set 'strict_mode', 'false' + + file 'upsert.csv' + time 10000 // limit inflight 10s + } + sql "sync" + qt_sql """select * from ${tableName} order by id;""" + sql """ DROP TABLE IF EXISTS ${tableName} """ - def tableName2 = "test_partial_update_upsert2" - sql """ DROP TABLE IF EXISTS ${tableName2} """ - sql """ - CREATE TABLE ${tableName2} ( - `id` int(11) NULL, - `name` varchar(10) NULL, - `age` int(11) NULL DEFAULT "20", - `city` varchar(10) NOT NULL DEFAULT "beijing", - `balance` decimalv3(9, 0) NULL, - `last_access_time` datetime NULL - ) ENGINE = OLAP UNIQUE KEY(`id`) - COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) - BUCKETS AUTO PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "storage_format" = "V2", - "enable_unique_key_merge_on_write" = "true", - "light_schema_change" = "true", - "disable_auto_compaction" = "false", - "enable_single_replica_compaction" = "false" - ); - """ - sql """insert into ${tableName2} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" - qt_sql """select * from ${tableName2} order by id;""" - streamLoad { - table "${tableName2}" + def tableName2 = "test_partial_update_upsert2" + sql """ DROP TABLE IF EXISTS ${tableName2} """ + sql """ + CREATE TABLE ${tableName2} ( + `id` int(11) NULL, + `name` varchar(10) NULL, + `age` int(11) NULL DEFAULT "20", + `city` varchar(10) NOT NULL DEFAULT "beijing", + `balance` decimalv3(9, 0) NULL, + `last_access_time` datetime NULL + ) ENGINE = OLAP UNIQUE KEY(`id`) + COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) + BUCKETS AUTO PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "storage_format" = "V2", + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "disable_auto_compaction" = "false", + "enable_single_replica_compaction" = "false", + "store_row_column" = "${use_row_store}"); """ + sql """insert into ${tableName2} values(1,"kevin",18,"shenzhen",400,"2023-07-01 12:00:00");""" + qt_sql """select * from ${tableName2} order by id;""" + streamLoad { + table "${tableName2}" - set 'column_separator', ',' - set 'format', 'csv' - set 'partial_columns', 'true' - set 'columns', 'id,balance,last_access_time' - set 'strict_mode', 'true' + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'id,balance,last_access_time' + set 'strict_mode', 'true' - file 'upsert.csv' - time 10000 // limit inflight 10s + file 'upsert.csv' + time 10000 // limit inflight 10s - check {result, exception, startTime, endTime -> - assertTrue(exception == null) - def json = parseJson(result) - assertEquals("fail", json.Status.toLowerCase()) + check {result, exception, startTime, endTime -> + assertTrue(exception == null) + def json = parseJson(result) + assertEquals("fail", json.Status.toLowerCase()) + } + } + sql "sync" + sql """ DROP TABLE IF EXISTS ${tableName2} """ } } - sql "sync" - sql """ DROP TABLE IF EXISTS ${tableName2} """ } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_with_delete_stmt.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_with_delete_stmt.groovy index db33ec6fe46db8..b853ce2d328295 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_with_delete_stmt.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_with_delete_stmt.groovy @@ -17,136 +17,145 @@ // under the License. suite("test_primary_key_partial_update_with_delete_stmt", "p0") { - def tableName = "test_primary_key_partial_update_with_delete_stmt" - def tableNameJoinA = "test_primary_key_partial_update_with_delete_stmt_join_a" - def tableNameJoinB = "test_primary_key_partial_update_with_delete_stmt_join_b" - - // create table - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ DROP TABLE IF EXISTS ${tableNameJoinA} """ - sql """ DROP TABLE IF EXISTS ${tableNameJoinB} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - // insert 3 lines - sql """ - insert into ${tableName} values(3, "doris3", 3000, 123, 1) - """ - sql """ - insert into ${tableName} values(2, "doris2", 2000, 223, 1) - """ - sql """ - insert into ${tableName} values(1, "doris", 1000, 123, 1) - """ - - //sql """ - // alter table ${tableName} ENABLE FEATURE "BATCH_DELETE" - //""" - - sql "sync" - - // case 1: delete exist key - sql """ - delete from ${tableName} where id = 3 - """ - - sql "sync" - - qt_select_default """ - select * from ${tableName} order by id; - """ - - // case 2: delete non-exist key - def result1 = sql """ - delete from ${tableName} where id = 4 - """ - - assertTrue(result1.size() == 1) - assertTrue(result1[0].size() == 1) - assertTrue(result1[0][0] == 0, "Query OK, 0 rows affected") - - - // create two tables for join - sql """ - CREATE TABLE ${tableNameJoinA} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL COMMENT "用户姓名") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - - sql """ - CREATE TABLE ${tableNameJoinB} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `score` int(11) NOT NULL COMMENT "用户得分") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - - // case 3: non-exsit key with join - sql """ - insert into ${tableNameJoinA} values(4, "doris6") - """ - sql """ - insert into ${tableNameJoinB} values(4, 4000) - """ - def result2 = sql """ - delete from ${tableName} using ${tableNameJoinA} inner join ${tableNameJoinB} on ${tableNameJoinA}.id = ${tableNameJoinB}.id - where ${tableName}.id = ${tableNameJoinA}.id - """ - assertTrue(result2.size() == 1) - assertTrue(result2[0].size() == 1) - assertTrue(result2[0][0] == 0, "Query OK, 0 rows affected") - - sql "sync" - - // case 4: delete in join - sql """ - insert into ${tableNameJoinA} values(2, "doris4") - """ - - sql """ - insert into ${tableNameJoinA} values(1, "doris3") - """ - - sql """ - insert into ${tableNameJoinB} values(2, 8000) - """ - - sql """ - insert into ${tableNameJoinB} values(3, 7000) - """ - - sql "sync" - - qt_select_before_delete """ - select ${tableName}.id from ${tableName} inner join ${tableNameJoinA} inner join ${tableNameJoinB} on ${tableNameJoinA}.id = ${tableNameJoinB}.id - and ${tableName}.id = ${tableNameJoinA}.id - """ - - def result3 = sql """ - delete from ${tableName} using ${tableNameJoinA} inner join ${tableNameJoinB} on ${tableNameJoinA}.id = ${tableNameJoinB}.id - where ${tableName}.id = ${tableNameJoinA}.id - """ - assertTrue(result3.size() == 1) - assertTrue(result3[0].size() == 1) - assertTrue(result3[0][0] == 1, "Query OK, 1 row affected") - - sql "sync" - - qt_select_join """ - select * from ${tableName} order by id; - """ - - - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ DROP TABLE IF EXISTS ${tableNameJoinA} """ - sql """ DROP TABLE IF EXISTS ${tableNameJoinB} """ + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def tableName = "test_primary_key_partial_update_with_delete_stmt" + def tableNameJoinA = "test_primary_key_partial_update_with_delete_stmt_join_a" + def tableNameJoinB = "test_primary_key_partial_update_with_delete_stmt_join_b" + + // create table + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ DROP TABLE IF EXISTS ${tableNameJoinA} """ + sql """ DROP TABLE IF EXISTS ${tableNameJoinB} """ + sql """CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + // insert 3 lines + sql """ + insert into ${tableName} values(3, "doris3", 3000, 123, 1) + """ + sql """ + insert into ${tableName} values(2, "doris2", 2000, 223, 1) + """ + sql """ + insert into ${tableName} values(1, "doris", 1000, 123, 1) + """ + + //sql """ + // alter table ${tableName} ENABLE FEATURE "BATCH_DELETE" + //""" + + sql "sync" + + // case 1: delete exist key + sql """ + delete from ${tableName} where id = 3 + """ + + sql "sync" + + qt_select_default """ + select * from ${tableName} order by id; + """ + + // case 2: delete non-exist key + def result1 = sql """ + delete from ${tableName} where id = 4 + """ + + assertTrue(result1.size() == 1) + assertTrue(result1[0].size() == 1) + assertTrue(result1[0][0] == 0, "Query OK, 0 rows affected") + + + // create two tables for join + sql """CREATE TABLE ${tableNameJoinA} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL COMMENT "用户姓名") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql """CREATE TABLE ${tableNameJoinB} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `score` int(11) NOT NULL COMMENT "用户得分") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + // case 3: non-exsit key with join + sql """ + insert into ${tableNameJoinA} values(4, "doris6") + """ + sql """ + insert into ${tableNameJoinB} values(4, 4000) + """ + def result2 = sql """ + delete from ${tableName} using ${tableNameJoinA} inner join ${tableNameJoinB} on ${tableNameJoinA}.id = ${tableNameJoinB}.id + where ${tableName}.id = ${tableNameJoinA}.id + """ + assertTrue(result2.size() == 1) + assertTrue(result2[0].size() == 1) + assertTrue(result2[0][0] == 0, "Query OK, 0 rows affected") + + sql "sync" + + // case 4: delete in join + sql """ + insert into ${tableNameJoinA} values(2, "doris4") + """ + + sql """ + insert into ${tableNameJoinA} values(1, "doris3") + """ + + sql """ + insert into ${tableNameJoinB} values(2, 8000) + """ + + sql """ + insert into ${tableNameJoinB} values(3, 7000) + """ + + sql "sync" + + qt_select_before_delete """ + select ${tableName}.id from ${tableName} inner join ${tableNameJoinA} inner join ${tableNameJoinB} on ${tableNameJoinA}.id = ${tableNameJoinB}.id + and ${tableName}.id = ${tableNameJoinA}.id + """ + + def result3 = sql """ + delete from ${tableName} using ${tableNameJoinA} inner join ${tableNameJoinB} on ${tableNameJoinA}.id = ${tableNameJoinB}.id + where ${tableName}.id = ${tableNameJoinA}.id + """ + assertTrue(result3.size() == 1) + assertTrue(result3[0].size() == 1) + assertTrue(result3[0][0] == 1, "Query OK, 1 row affected") + + sql "sync" + + qt_select_join """ + select * from ${tableName} order by id; + """ + + + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ DROP TABLE IF EXISTS ${tableNameJoinA} """ + sql """ DROP TABLE IF EXISTS ${tableNameJoinB} """ + } + } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_with_update_stmt.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_with_update_stmt.groovy index 376cc50b92c984..c04c535c080d88 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_with_update_stmt.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_with_update_stmt.groovy @@ -17,120 +17,130 @@ // under the License. suite("test_primary_key_partial_update_with_update_stmt", "p0") { - def tableName = "test_primary_key_partial_update_with_update_stmt" - def tableNameJoinA = "test_primary_key_partial_update_with_update_stmt_join_a" - def tableNameJoinB = "test_primary_key_partial_update_with_update_stmt_join_b" - - // create table - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ DROP TABLE IF EXISTS ${tableNameJoinA} """ - sql """ DROP TABLE IF EXISTS ${tableNameJoinB} """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL COMMENT "用户姓名", - `score` int(11) NOT NULL COMMENT "用户得分", - `test` int(11) NULL COMMENT "null test", - `dft` int(11) DEFAULT "4321") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - // insert 2 lines - sql """ - insert into ${tableName} values(2, "doris2", 2000, 223, 1) - """ - - sql """ - insert into ${tableName} values(1, "doris", 1000, 123, 1) - """ - - // case 1: partially update normally - sql """ - update ${tableName} set score = 4000 where id = 1 - """ - - sql "sync" - - qt_select_default """ - select * from ${tableName} order by id; - """ - - // case 2: partially update non-exist key - def result1 = sql """ - update ${tableName} set score = 2000 where id = 3 - """ - assertTrue(result1.size() == 1) - assertTrue(result1[0].size() == 1) - assertTrue(result1[0][0] == 0, "Query OK, 0 rows affected") - - sql "sync" - - // create two table for join - sql """ - CREATE TABLE ${tableNameJoinA} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `name` varchar(65533) NOT NULL COMMENT "用户姓名") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - - sql """ - CREATE TABLE ${tableNameJoinB} ( - `id` int(11) NOT NULL COMMENT "用户 ID", - `score` int(11) NOT NULL COMMENT "用户得分") - UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true") - """ - - // case 3: non-exsit key with join - sql """ - insert into ${tableNameJoinA} values(4, "doris6") - """ - sql """ - insert into ${tableNameJoinB} values(4, 4000) - """ - def result2 = sql """ - update ${tableName} set ${tableName}.score = ${tableNameJoinB}.score, ${tableName}.name = ${tableNameJoinA}.name - from ${tableNameJoinA} inner join ${tableNameJoinB} on ${tableNameJoinA}.id = ${tableNameJoinB}.id - where ${tableName}.id = ${tableNameJoinA}.id - """ - assertTrue(result2.size() == 1) - assertTrue(result2[0].size() == 1) - assertTrue(result2[0][0] == 0, "Query OK, 0 rows affected") - - sql "sync" - - // case 4: partially update normally with join - sql """ - insert into ${tableNameJoinA} values(2, "doris4") - """ - - sql """ - insert into ${tableNameJoinA} values(1, "doris3") - """ - - sql """ - insert into ${tableNameJoinB} values(2, 8000) - """ - - sql """ - insert into ${tableNameJoinB} values(3, 7000) - """ - - sql """ - update ${tableName} set ${tableName}.score = ${tableNameJoinB}.score, ${tableName}.name = ${tableNameJoinA}.name - from ${tableNameJoinA} inner join ${tableNameJoinB} on ${tableNameJoinA}.id = ${tableNameJoinB}.id - where ${tableName}.id = ${tableNameJoinA}.id - """ - - sql "sync" - - qt_select_join """ - select * from ${tableName} order by id; - """ - - - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ DROP TABLE IF EXISTS ${tableNameJoinA} """ - sql """ DROP TABLE IF EXISTS ${tableNameJoinB} """ + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def tableName = "test_primary_key_partial_update_with_update_stmt" + def tableNameJoinA = "test_primary_key_partial_update_with_update_stmt_join_a" + def tableNameJoinB = "test_primary_key_partial_update_with_update_stmt_join_b" + + // create table + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ DROP TABLE IF EXISTS ${tableNameJoinA} """ + sql """ DROP TABLE IF EXISTS ${tableNameJoinB} """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL COMMENT "用户姓名", + `score` int(11) NOT NULL COMMENT "用户得分", + `test` int(11) NULL COMMENT "null test", + `dft` int(11) DEFAULT "4321") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + // insert 2 lines + sql """ + insert into ${tableName} values(2, "doris2", 2000, 223, 1) + """ + + sql """ + insert into ${tableName} values(1, "doris", 1000, 123, 1) + """ + + // case 1: partially update normally + sql """ + update ${tableName} set score = 4000 where id = 1 + """ + + sql "sync" + + qt_select_default """ + select * from ${tableName} order by id; + """ + + // case 2: partially update non-exist key + def result1 = sql """ + update ${tableName} set score = 2000 where id = 3 + """ + assertTrue(result1.size() == 1) + assertTrue(result1[0].size() == 1) + assertTrue(result1[0][0] == 0, "Query OK, 0 rows affected") + + sql "sync" + + // create two table for join + sql """CREATE TABLE ${tableNameJoinA} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `name` varchar(65533) NOT NULL COMMENT "用户姓名") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql """CREATE TABLE ${tableNameJoinB} ( + `id` int(11) NOT NULL COMMENT "用户 ID", + `score` int(11) NOT NULL COMMENT "用户得分") + UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + // case 3: non-exsit key with join + sql """ + insert into ${tableNameJoinA} values(4, "doris6") + """ + sql """ + insert into ${tableNameJoinB} values(4, 4000) + """ + def result2 = sql """ + update ${tableName} set ${tableName}.score = ${tableNameJoinB}.score, ${tableName}.name = ${tableNameJoinA}.name + from ${tableNameJoinA} inner join ${tableNameJoinB} on ${tableNameJoinA}.id = ${tableNameJoinB}.id + where ${tableName}.id = ${tableNameJoinA}.id + """ + assertTrue(result2.size() == 1) + assertTrue(result2[0].size() == 1) + assertTrue(result2[0][0] == 0, "Query OK, 0 rows affected") + + sql "sync" + + // case 4: partially update normally with join + sql """ + insert into ${tableNameJoinA} values(2, "doris4") + """ + + sql """ + insert into ${tableNameJoinA} values(1, "doris3") + """ + + sql """ + insert into ${tableNameJoinB} values(2, 8000) + """ + + sql """ + insert into ${tableNameJoinB} values(3, 7000) + """ + + sql """ + update ${tableName} set ${tableName}.score = ${tableNameJoinB}.score, ${tableName}.name = ${tableNameJoinA}.name + from ${tableNameJoinA} inner join ${tableNameJoinB} on ${tableNameJoinA}.id = ${tableNameJoinB}.id + where ${tableName}.id = ${tableNameJoinA}.id + """ + + sql "sync" + + qt_select_join """ + select * from ${tableName} order by id; + """ + + + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ DROP TABLE IF EXISTS ${tableNameJoinA} """ + sql """ DROP TABLE IF EXISTS ${tableNameJoinB} """ + } + } }