Skip to content

Commit

Permalink
tests(ticdc): add lossy_ddl test (#8859) (#8986)
Browse files Browse the repository at this point in the history
close #8686
  • Loading branch information
ti-chi-bot authored May 20, 2023
1 parent fd068cb commit 1881877
Show file tree
Hide file tree
Showing 2 changed files with 246 additions and 0 deletions.
187 changes: 187 additions & 0 deletions tests/integration_tests/lossy_ddl/data/prepare.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
DROP DATABASE IF EXISTS `lossy_ddl`;
CREATE DATABASE `lossy_ddl`;
USE `lossy_ddl`;

-- int -> unsigned int
CREATE TABLE example1
(
id INT PRIMARY KEY,
b INT
);
INSERT INTO example1 (id, b)
VALUES (1, 1);
ALTER TABLE example1 MODIFY COLUMN b INT UNSIGNED;

-- int -> varchar
CREATE TABLE example2
(
id INT PRIMARY KEY,
b INT
);
INSERT INTO example2 (id, b)
VALUES (1, 1);
ALTER TABLE example2 MODIFY COLUMN b VARCHAR (100);


-- timestamp -> datetime
CREATE TABLE example3
(
id INT PRIMARY KEY,
b TIMESTAMP
);
INSERT INTO example3 (id, b)
VALUES (1, '2023-04-19 11:48:00');
ALTER TABLE example3 MODIFY COLUMN b DATETIME;

-- varchar(256) -> varchar(100)
CREATE TABLE example4
(
id INT PRIMARY KEY,
b VARCHAR(256)
);
INSERT INTO example4 (id, b)
VALUES (1, '2023-04-19 11:48:00');
ALTER TABLE example4 MODIFY COLUMN b VARCHAR (100);

-- Drop column
CREATE TABLE example5
(
id INT PRIMARY KEY,
b INT
);
INSERT INTO example5 (id, b)
VALUES (1, -1);
ALTER TABLE example5 DROP COLUMN b;

-- Add column
CREATE TABLE example6
(
id INT PRIMARY KEY,
b INT
);
INSERT INTO example6 (id, b)
VALUES (1, -1);
ALTER TABLE example6
ADD COLUMN c INT;

-- Modify collation
CREATE TABLE example7
(
id INT PRIMARY KEY,
b VARCHAR(256) CHARACTER SET utf8 COLLATE utf8_general_ci
);
INSERT INTO example7 (id, b)
VALUES (1, '2023-04-19 11:48:00');
ALTER TABLE example7 MODIFY COLUMN b VARCHAR (256) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin;

-- Drop partition
CREATE TABLE example8
(
id INT PRIMARY KEY,
b INT
) PARTITION BY RANGE (id) (
PARTITION b0 VALUES LESS THAN (0),
PARTITION b1 VALUES LESS THAN MAXVALUE
);
INSERT INTO example8 (id, b)
VALUES (-1, -1);
INSERT INTO example8 (id, b)
VALUES (1, 1);
ALTER TABLE example8 DROP PARTITION b0;

-- varchar(256) -> varchar(300)
CREATE TABLE example9
(
id INT PRIMARY KEY,
b VARCHAR(256)
);
INSERT INTO example9 (id, b)
VALUES (1, '2023-04-19 11:48:00');
ALTER TABLE example9 MODIFY COLUMN b VARCHAR (300);


-- double -> float
CREATE TABLE example10
(
id INT PRIMARY KEY,
b DOUBLE
);
INSERT INTO example10 (id, b)
VALUES (1, 1.0);
ALTER TABLE example10 MODIFY COLUMN b FLOAT;

-- bigint -> int
CREATE TABLE example11
(
id BIGINT PRIMARY KEY,
b BIGINT
);
INSERT INTO example11 (id, b)
VALUES (1, 1);
ALTER TABLE example11 MODIFY COLUMN b INT;

-- longtext -> varchar(100)
CREATE TABLE example12
(
id INT PRIMARY KEY,
b LONGTEXT
);
INSERT INTO example12 (id, b)
VALUES (1, '2023-04-19 11:48:00');
ALTER TABLE example12 MODIFY COLUMN b VARCHAR (100);

-- Enum('a', 'b', 'c') -> Enum('a', 'b')
CREATE TABLE example13
(
id INT PRIMARY KEY,
b ENUM('a', 'b', 'c')
);
INSERT INTO example13 (id, b)
VALUES (1, 'a');
ALTER TABLE example13 MODIFY COLUMN b ENUM('a', 'b');

-- Set No STRICT_TRANS_TABLES
SET
@@SESSION.sql_mode = 'NO_ENGINE_SUBSTITUTION';

-- varchar(256) -> varchar(1) with a long value.
CREATE TABLE example14
(
id INT PRIMARY KEY,
b VARCHAR(256)
);
INSERT INTO example14 (id, b)
VALUES (1, '2023-04-19 11:48:00');
ALTER TABLE example14 MODIFY COLUMN b VARCHAR (1);

-- int -> unsigned int with a negative value.
CREATE TABLE example15
(
id INT PRIMARY KEY,
b INT
);
INSERT INTO example15 (id, b)
VALUES (1, -1);
ALTER TABLE example15 MODIFY COLUMN b INT UNSIGNED;

-- exchange partition
CREATE TABLE example16
(
a INT PRIMARY KEY
) PARTITION BY RANGE ( a ) ( PARTITION p0 VALUES LESS THAN (6),PARTITION p1 VALUES LESS THAN (11),PARTITION p2 VALUES LESS THAN (21));
INSERT INTO example16
VALUES (1);
CREATE TABLE example17
(
a INT PRIMARY KEY
);
INSERT INTO example17
VALUES (18);
ALTER TABLE example16 EXCHANGE PARTITION p2 WITH TABLE example17;


CREATE TABLE `finish_mark`
(
id INT PRIMARY KEY,
name VARCHAR(20)
);
59 changes: 59 additions & 0 deletions tests/integration_tests/lossy_ddl/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
#!/bin/bash

set -eu

CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
source $CUR/../_utils/test_prepare
WORK_DIR=$OUT_DIR/$TEST_NAME
CDC_BINARY=cdc.test
SINK_TYPE=$1

MAX_RETRIES=10

# Because we want the lossy DDL to not cause any data updates, so we can check the
# data in the blackhole sink to see if any row is updated.
function check_lossy_ddl() {
# Check finish_mark is written to the log.
is_finish_mark_exist=$(grep "BlackHoleSink: DDL Event" "$1/cdc.log" | grep -c "finish_mark")
if [[ "$is_finish_mark_exist" -ne 1 ]]; then
echo "can't found finish mark"
exit 1
fi

row_logs=$(grep "BlackHoleSink: WriteEvents" "$1/cdc.log")
echo $row_logs
row_logs_count=$(grep "BlackHoleSink: WriteEvents" -c "$1/cdc.log")
if [[ "$row_logs_count" -ne 18 ]]; then
echo "can't found 22 row logs, got $row_logs_count"
exit 1
fi
}

export -f check_lossy_ddl

function run() {
# Use blackhole sink to check if the DDL is lossy.
# So no need to run this test for other sinks.
if [ "$SINK_TYPE" != "kafka" ]; then
return
fi

rm -rf $WORK_DIR && mkdir -p $WORK_DIR
start_tidb_cluster --workdir $WORK_DIR
cd $WORK_DIR

pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1"
SINK_URI="blackhole://"

run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --addr "127.0.0.1:8300" --pd $pd_addr
cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI"

run_sql_file $CUR/data/prepare.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT}

ensure $MAX_RETRIES check_lossy_ddl $WORK_DIR
cleanup_process $CDC_BINARY
}

trap stop_tidb_cluster EXIT
run $*
echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>"

0 comments on commit 1881877

Please sign in to comment.