From 758eb74d4b255d1f3ef40bdefb88e16fef08eaa5 Mon Sep 17 00:00:00 2001 From: lidezhu <47731263+lidezhu@users.noreply.github.com> Date: Wed, 23 Oct 2024 14:51:57 +0800 Subject: [PATCH 1/2] This is an automated cherry-pick of #11678 Signed-off-by: ti-chi-bot --- tests/integration_tests/run_group.sh | 5 ++ .../safe_mode/conf/diff_config.toml | 29 +++++++++++ .../safe_mode/data/create_table.sql | 5 ++ .../safe_mode/data/insert.sql | 3 ++ .../safe_mode/data/update.sql | 4 ++ tests/integration_tests/safe_mode/run.sh | 51 +++++++++++++++++++ 6 files changed, 97 insertions(+) create mode 100644 tests/integration_tests/safe_mode/conf/diff_config.toml create mode 100644 tests/integration_tests/safe_mode/data/create_table.sql create mode 100644 tests/integration_tests/safe_mode/data/insert.sql create mode 100644 tests/integration_tests/safe_mode/data/update.sql create mode 100755 tests/integration_tests/safe_mode/run.sh diff --git a/tests/integration_tests/run_group.sh b/tests/integration_tests/run_group.sh index abdfee3cd85..891ad462557 100755 --- a/tests/integration_tests/run_group.sh +++ b/tests/integration_tests/run_group.sh @@ -10,8 +10,13 @@ group=$2 # Other tests that only support mysql: batch_update_to_no_batch ddl_reentrant # changefeed_fast_fail changefeed_resume_with_checkpoint_ts sequence # multi_cdc_cluster capture_suicide_while_balance_table +<<<<<<< HEAD mysql_only="bdr_mode capture_suicide_while_balance_table syncpoint server_config_compatibility changefeed_dup_error_restart" mysql_only_http="http_api http_api_tls api_v2" +======= +mysql_only="bdr_mode capture_suicide_while_balance_table syncpoint syncpoint_check_ts server_config_compatibility changefeed_dup_error_restart safe_mode" +mysql_only_http="http_api http_api_tls api_v2 http_api_tls_with_user_auth cli_tls_with_auth" +>>>>>>> ade045ed8c (*(ticdc): add integration test for safe mode (#11678)) mysql_only_consistent_replicate="consistent_replicate_ddl consistent_replicate_gbk consistent_replicate_nfs consistent_replicate_storage_file consistent_replicate_storage_file_large_value consistent_replicate_storage_s3 consistent_partition_table" kafka_only="kafka_big_messages kafka_compression kafka_messages kafka_sink_error_resume mq_sink_lost_callback mq_sink_dispatcher kafka_column_selector kafka_column_selector_avro" diff --git a/tests/integration_tests/safe_mode/conf/diff_config.toml b/tests/integration_tests/safe_mode/conf/diff_config.toml new file mode 100644 index 00000000000..7bf5d3ffe98 --- /dev/null +++ b/tests/integration_tests/safe_mode/conf/diff_config.toml @@ -0,0 +1,29 @@ +# diff Configuration. + +check-thread-count = 4 + +export-fix-sql = true + +check-struct-only = false + +[task] + output-dir = "/tmp/tidb_cdc_test/safe_mode/sync_diff/output" + + source-instances = ["mysql1"] + + target-instance = "tidb0" + + target-check-tables = ["safe_mode.t"] + +[data-sources] +[data-sources.mysql1] + host = "127.0.0.1" + port = 4000 + user = "root" + password = "" + +[data-sources.tidb0] + host = "127.0.0.1" + port = 3306 + user = "root" + password = "" diff --git a/tests/integration_tests/safe_mode/data/create_table.sql b/tests/integration_tests/safe_mode/data/create_table.sql new file mode 100644 index 00000000000..9ed09d75ae4 --- /dev/null +++ b/tests/integration_tests/safe_mode/data/create_table.sql @@ -0,0 +1,5 @@ +drop database if exists `safe_mode`; +create database `safe_mode`; +use `safe_mode`; + +create table t(id int key, a varchar(200)); diff --git a/tests/integration_tests/safe_mode/data/insert.sql b/tests/integration_tests/safe_mode/data/insert.sql new file mode 100644 index 00000000000..2cd44003173 --- /dev/null +++ b/tests/integration_tests/safe_mode/data/insert.sql @@ -0,0 +1,3 @@ +use `safe_mode`; +insert into t values(1, "hello"); +insert into t values(2, "world"); diff --git a/tests/integration_tests/safe_mode/data/update.sql b/tests/integration_tests/safe_mode/data/update.sql new file mode 100644 index 00000000000..97963570c7d --- /dev/null +++ b/tests/integration_tests/safe_mode/data/update.sql @@ -0,0 +1,4 @@ +use `safe_mode`; +-- update non key column +update t set a = "hello2" where id = 1; +update t set a = "world2" where id = 2; diff --git a/tests/integration_tests/safe_mode/run.sh b/tests/integration_tests/safe_mode/run.sh new file mode 100755 index 00000000000..eac8e3c8352 --- /dev/null +++ b/tests/integration_tests/safe_mode/run.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +set -eu + +CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +source $CUR/../_utils/test_prepare +WORK_DIR=$OUT_DIR/$TEST_NAME +CDC_BINARY=cdc.test +SINK_TYPE=$1 + +function run() { + if [ "$SINK_TYPE" != "mysql" ]; then + return + fi + + rm -rf $WORK_DIR && mkdir -p $WORK_DIR + + start_tidb_cluster --workdir $WORK_DIR + + cd $WORK_DIR + + run_sql_file $CUR/data/create_table.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} + run_sql_file $CUR/data/create_table.sql ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} + + # insert data into upstream but not downstream + run_sql_file $CUR/data/insert.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} + + run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY + + case $SINK_TYPE in + *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?safe-mode=true" ;; + esac + run_cdc_cli changefeed create --sink-uri="$SINK_URI" + + # test update sql can be split into delete + replace at all times in safe mode + # otherwise the update sql will have no effect on the downstream and the downstream will have no data. + sleep 10 + run_sql_file $CUR/data/update.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} + + run_sql "CREATE TABLE safe_mode.finish_mark (a int primary key);" + sleep 30 + check_table_exists "safe_mode.finish_mark" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} 60 + check_sync_diff $WORK_DIR $CUR/conf/diff_config.toml + + cleanup_process $CDC_BINARY +} + +trap stop_tidb_cluster EXIT +run $* +check_logs $WORK_DIR +echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>" From b8f4a88d29d5dc00b79f9ff8aee590e3cd974f96 Mon Sep 17 00:00:00 2001 From: lidezhu <47731263+lidezhu@users.noreply.github.com> Date: Wed, 23 Oct 2024 14:58:53 +0800 Subject: [PATCH 2/2] Update run_group.sh --- tests/integration_tests/run_group.sh | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tests/integration_tests/run_group.sh b/tests/integration_tests/run_group.sh index 891ad462557..d7fb39a03e1 100755 --- a/tests/integration_tests/run_group.sh +++ b/tests/integration_tests/run_group.sh @@ -10,13 +10,8 @@ group=$2 # Other tests that only support mysql: batch_update_to_no_batch ddl_reentrant # changefeed_fast_fail changefeed_resume_with_checkpoint_ts sequence # multi_cdc_cluster capture_suicide_while_balance_table -<<<<<<< HEAD -mysql_only="bdr_mode capture_suicide_while_balance_table syncpoint server_config_compatibility changefeed_dup_error_restart" +mysql_only="bdr_mode capture_suicide_while_balance_table syncpoint server_config_compatibility changefeed_dup_error_restart safe_mode" mysql_only_http="http_api http_api_tls api_v2" -======= -mysql_only="bdr_mode capture_suicide_while_balance_table syncpoint syncpoint_check_ts server_config_compatibility changefeed_dup_error_restart safe_mode" -mysql_only_http="http_api http_api_tls api_v2 http_api_tls_with_user_auth cli_tls_with_auth" ->>>>>>> ade045ed8c (*(ticdc): add integration test for safe mode (#11678)) mysql_only_consistent_replicate="consistent_replicate_ddl consistent_replicate_gbk consistent_replicate_nfs consistent_replicate_storage_file consistent_replicate_storage_file_large_value consistent_replicate_storage_s3 consistent_partition_table" kafka_only="kafka_big_messages kafka_compression kafka_messages kafka_sink_error_resume mq_sink_lost_callback mq_sink_dispatcher kafka_column_selector kafka_column_selector_avro"