Skip to content

Commit

Permalink
[cherry-pick][test](doc) add table design example in doris's doc to r…
Browse files Browse the repository at this point in the history
…egression test (#42444) (#43023)

(cherry picked from commit 051128f)

### What problem does this PR solve?
<!--
You need to clearly describe your PR in this part:

1. What problem was fixed (it's best to include specific error reporting
information). How it was fixed.
2. Which behaviors were modified. What was the previous behavior, what
is it now, why was it modified, and what possible impacts might there
be.
3. What features were added. Why this function was added.
4. Which codes were refactored and why this part of the code was
refactored.
5. Which functions were optimized and what is the difference before and
after the optimization.

The description of the PR needs to enable reviewers to quickly and
clearly understand the logic of the code modification.
-->

<!--
If there are related issues, please fill in the issue number.
- If you want the issue to be closed after the PR is merged, please use
"close #12345". Otherwise, use "ref #12345"
-->
Issue Number: close #xxx

<!--
If this PR is followup a preivous PR, for example, fix the bug that
introduced by a related PR,
link the PR here
-->
Related PR: #42444

Problem Summary:

### Check List (For Committer)

- Test <!-- At least one of them must be included. -->

    - [x] Regression test
    - [ ] Unit Test
    - [ ] Manual test (add detailed scripts or steps below)
    - [ ] No need to test or manual test. Explain why:
- [ ] This is a refactor/code format and no logic has been changed.
        - [ ] Previous test can cover this change.
        - [ ] No colde files have been changed.
        - [ ] Other reason <!-- Add your reason?  -->

- Behavior changed:

    - [x] No.
    - [ ] Yes. <!-- Explain the behavior change -->

- Does this need documentation?

    - [x] No.
- [ ] Yes. <!-- Add document PR link here. eg:
apache/doris-website#1214 -->

- Release note

    <!-- bugfix, feat, behavior changed need a release note -->
    <!-- Add one line release note for this PR. -->
    None

### Check List (For Reviewer who merge this PR)

- [ ] Confirm the release note
- [ ] Confirm test cases
- [ ] Confirm document
- [ ] Add branch pick label <!-- Add branch pick label that this PR
should merge into -->
  • Loading branch information
yagagagaga authored Nov 7, 2024
1 parent 6b86e56 commit ab748ef
Show file tree
Hide file tree
Showing 14 changed files with 1,610 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -971,6 +971,31 @@ class Suite implements GroovyInterceptable {
Assert.assertEquals(0, code)
}

String cmd(String cmd, int timeoutSecond = 0) {
var processBuilder = new ProcessBuilder()
processBuilder.command("/bin/bash", "-c", cmd)
var process = processBuilder.start()
def outBuf = new StringBuilder()
def errBuf = new StringBuilder()
process.consumeProcessOutput(outBuf, errBuf)
var reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
String line
while ((line = reader.readLine()) != null) {
System.out.println(line)
}
// wait until cmd finish
if (timeoutSecond > 0) {
process.waitForOrKill(timeoutSecond * 1000)
} else {
process.waitFor()
}
if (process.exitValue() != 0) {
println outBuf
throw new RuntimeException(errBuf.toString())
}
return outBuf.toString()
}

void sshExec(String username, String host, String cmd, boolean alert=true) {
String command = "ssh ${username}@${host} '${cmd}'"
def cmds = ["/bin/bash", "-c", command]
Expand Down
296 changes: 296 additions & 0 deletions regression-test/suites/doc/table-design/auto-increment.md.groovy
Original file line number Diff line number Diff line change
@@ -0,0 +1,296 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

import org.junit.jupiter.api.Assertions;

suite("docs/table-design/auto-increment.md") {
try {
multi_sql "create database if not exists demo; use demo;"
sql "drop table if exists `demo`.`tbl`"
sql """
CREATE TABLE `demo`.`tbl` (
`id` BIGINT NOT NULL AUTO_INCREMENT,
`value` BIGINT NOT NULL
) ENGINE=OLAP
DUPLICATE KEY(`id`)
DISTRIBUTED BY HASH(`id`) BUCKETS 10
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""

sql "drop table if exists `demo`.`tbl`"
sql """
CREATE TABLE `demo`.`tbl` (
`id` BIGINT NOT NULL AUTO_INCREMENT(100),
`value` BIGINT NOT NULL
) ENGINE=OLAP
DUPLICATE KEY(`id`)
DISTRIBUTED BY HASH(`id`) BUCKETS 10
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""

sql "drop table if exists `demo`.`tbl`"
sql """
CREATE TABLE `demo`.`tbl` (
`uid` BIGINT NOT NULL,
`name` BIGINT NOT NULL,
`id` BIGINT NOT NULL AUTO_INCREMENT,
`value` BIGINT NOT NULL
) ENGINE=OLAP
DUPLICATE KEY(`uid`, `name`)
DISTRIBUTED BY HASH(`uid`) BUCKETS 10
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""

sql "drop table if exists `demo`.`tbl`"
sql """
CREATE TABLE `demo`.`tbl` (
`id` BIGINT NOT NULL AUTO_INCREMENT,
`name` varchar(65533) NOT NULL,
`value` int(11) NOT NULL
) ENGINE=OLAP
UNIQUE KEY(`id`)
DISTRIBUTED BY HASH(`id`) BUCKETS 10
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""

sql "drop table if exists `demo`.`tbl`"
sql """
CREATE TABLE `demo`.`tbl` (
`text` varchar(65533) NOT NULL,
`id` BIGINT NOT NULL AUTO_INCREMENT,
) ENGINE=OLAP
UNIQUE KEY(`text`)
DISTRIBUTED BY HASH(`text`) BUCKETS 10
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""

sql "drop table if exists `demo`.`tbl`"
sql """
CREATE TABLE `demo`.`tbl` (
`text` varchar(65533) NOT NULL,
`id` BIGINT NOT NULL AUTO_INCREMENT,
) ENGINE=OLAP
UNIQUE KEY(`text`)
DISTRIBUTED BY HASH(`text`) BUCKETS 10
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""

sql "drop table if exists `demo`.`tbl`"
sql """
CREATE TABLE `demo`.`tbl` (
`id` BIGINT NOT NULL AUTO_INCREMENT,
`name` varchar(65533) NOT NULL,
`value` int(11) NOT NULL
) ENGINE=OLAP
UNIQUE KEY(`id`)
DISTRIBUTED BY HASH(`id`) BUCKETS 10
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""
multi_sql """
insert into tbl(name, value) values("Bob", 10), ("Alice", 20), ("Jack", 30);
select * from tbl order by id;
"""

cmd """
curl --location-trusted -u ${context.config.jdbcUser}:${context.config.jdbcPassword} -H "columns:name,value" -H "column_separator:," -T ${context.file.parent}/test_data/test.csv http://${context.config.feHttpAddress}/api/demo/tbl/_stream_load
"""
sql "select * from tbl order by id"

multi_sql """
insert into tbl(id, name, value) values(null, "Doris", 60), (null, "Nereids", 70);
select * from tbl order by id;
"""

sql "drop table if exists `demo`.`tbl2`"
multi_sql """
CREATE TABLE `demo`.`tbl2` (
`id` BIGINT NOT NULL AUTO_INCREMENT,
`name` varchar(65533) NOT NULL,
`value` int(11) NOT NULL DEFAULT "0"
) ENGINE=OLAP
UNIQUE KEY(`id`)
DISTRIBUTED BY HASH(`id`) BUCKETS 10
PROPERTIES (
"replication_allocation" = "tag.location.default: 1",
"enable_unique_key_merge_on_write" = "true"
);
insert into tbl2(id, name, value) values(1, "Bob", 10), (2, "Alice", 20), (3, "Jack", 30);
select * from tbl2 order by id;
set enable_unique_key_partial_update=true;
set enable_insert_strict=false;
insert into tbl2(id, name) values(1, "modified"), (4, "added");
select * from tbl2 order by id;
"""

sql "drop table if exists `demo`.`tbl3`"
multi_sql """
CREATE TABLE `demo`.`tbl3` (
`id` BIGINT NOT NULL,
`name` varchar(100) NOT NULL,
`score` BIGINT NOT NULL,
`aid` BIGINT NOT NULL AUTO_INCREMENT
) ENGINE=OLAP
UNIQUE KEY(`id`)
DISTRIBUTED BY HASH(`id`) BUCKETS 1
PROPERTIES (
"replication_allocation" = "tag.location.default: 1",
"enable_unique_key_merge_on_write" = "true"
);
insert into tbl3(id, name, score) values(1, "Doris", 100), (2, "Nereids", 200), (3, "Bob", 300);
select * from tbl3 order by id;
set enable_unique_key_partial_update=true;
set enable_insert_strict=false;
insert into tbl3(id, score) values(1, 999), (2, 888);
select * from tbl3 order by id;
insert into tbl3(id, aid) values(1, 1000), (3, 500);
select * from tbl3 order by id;
"""

sql "drop table if exists `demo`.`dwd_dup_tbl`"
sql """
CREATE TABLE `demo`.`dwd_dup_tbl` (
`user_id` varchar(50) NOT NULL,
`dim1` varchar(50) NOT NULL,
`dim2` varchar(50) NOT NULL,
`dim3` varchar(50) NOT NULL,
`dim4` varchar(50) NOT NULL,
`dim5` varchar(50) NOT NULL,
`visit_time` DATE NOT NULL
) ENGINE=OLAP
DUPLICATE KEY(`user_id`)
DISTRIBUTED BY HASH(`user_id`) BUCKETS 32
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""
sql "drop table if exists `demo`.`dictionary_tbl`"
sql """
CREATE TABLE `demo`.`dictionary_tbl` (
`user_id` varchar(50) NOT NULL,
`aid` BIGINT NOT NULL AUTO_INCREMENT
) ENGINE=OLAP
UNIQUE KEY(`user_id`)
DISTRIBUTED BY HASH(`user_id`) BUCKETS 32
PROPERTIES (
"replication_allocation" = "tag.location.default: 1",
"enable_unique_key_merge_on_write" = "true"
)
"""
sql """
insert into dictionary_tbl(user_id)
select user_id from dwd_dup_tbl group by user_id
"""
sql """
insert into dictionary_tbl(user_id)
select dwd_dup_tbl.user_id from dwd_dup_tbl left join dictionary_tbl
on dwd_dup_tbl.user_id = dictionary_tbl.user_id where dwd_dup_tbl.visit_time > '2023-12-10' and dictionary_tbl.user_id is NULL
"""
sql "drop table if exists `demo`.`dws_agg_tbl`"
sql """
CREATE TABLE `demo`.`dws_agg_tbl` (
`dim1` varchar(50) NOT NULL,
`dim3` varchar(50) NOT NULL,
`dim5` varchar(50) NOT NULL,
`user_id_bitmap` BITMAP BITMAP_UNION NOT NULL,
`pv` BIGINT SUM NOT NULL
) ENGINE=OLAP
AGGREGATE KEY(`dim1`,`dim3`,`dim5`)
DISTRIBUTED BY HASH(`dim1`) BUCKETS 32
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""
sql """
insert into dws_agg_tbl
select dwd_dup_tbl.dim1, dwd_dup_tbl.dim3, dwd_dup_tbl.dim5, BITMAP_UNION(TO_BITMAP(dictionary_tbl.aid)), COUNT(1)
from dwd_dup_tbl INNER JOIN dictionary_tbl on dwd_dup_tbl.user_id = dictionary_tbl.user_id
group by dwd_dup_tbl.dim1, dwd_dup_tbl.dim3, dwd_dup_tbl.dim5
"""
sql """
select dim1, dim3, dim5, bitmap_count(user_id_bitmap) as uv, pv from dws_agg_tbl
"""

sql "drop table if exists `demo`.`records_tbl`"
sql """
CREATE TABLE `demo`.`records_tbl` (
`user_id` int(11) NOT NULL COMMENT "",
`name` varchar(26) NOT NULL COMMENT "",
`address` varchar(41) NOT NULL COMMENT "",
`city` varchar(11) NOT NULL COMMENT "",
`nation` varchar(16) NOT NULL COMMENT "",
`region` varchar(13) NOT NULL COMMENT "",
`phone` varchar(16) NOT NULL COMMENT "",
`mktsegment` varchar(11) NOT NULL COMMENT ""
) DUPLICATE KEY (`user_id`, `name`)
DISTRIBUTED BY HASH(`user_id`) BUCKETS 10
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""
sql "select * from records_tbl order by user_id, name limit 100"
sql "select * from records_tbl order by user_id, name limit 100 offset 100"

sql "drop table if exists `demo`.`records_tbl2`"
sql """
CREATE TABLE `demo`.`records_tbl2` (
`user_id` int(11) NOT NULL COMMENT "",
`name` varchar(26) NOT NULL COMMENT "",
`address` varchar(41) NOT NULL COMMENT "",
`city` varchar(11) NOT NULL COMMENT "",
`nation` varchar(16) NOT NULL COMMENT "",
`region` varchar(13) NOT NULL COMMENT "",
`phone` varchar(16) NOT NULL COMMENT "",
`mktsegment` varchar(11) NOT NULL COMMENT "",
`unique_value` BIGINT NOT NULL AUTO_INCREMENT
) DUPLICATE KEY (`user_id`, `name`)
DISTRIBUTED BY HASH(`user_id`) BUCKETS 10
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
)
"""
sql "select * from records_tbl2 order by unique_value limit 100"
sql "select * from records_tbl2 where unique_value > 99 order by unique_value limit 100"
sql """
select user_id, name, address, city, nation, region, phone, mktsegment
from records_tbl2, (select unique_value as max_value from records_tbl2 order by unique_value limit 1 offset 9999) as previous_data
where records_tbl2.unique_value > previous_data.max_value
order by unique_value limit 100
"""
} catch (Throwable t) {
Assertions.fail("examples in docs/table-design/auto-increment.md failed to exec, please fix it", t)
}
}
Loading

0 comments on commit ab748ef

Please sign in to comment.