diff --git a/syncer/ghost.go b/syncer/ghost.go index 6cd2b51c71..ef7ac6f667 100644 --- a/syncer/ghost.go +++ b/syncer/ghost.go @@ -14,7 +14,6 @@ package syncer import ( - "fmt" "strings" "github.com/pingcap/errors" @@ -127,16 +126,6 @@ func (g *Ghost) Apply(tables []*filter.Table, statement string, stmt ast.StmtNod return nil, schema, table, nil } -// InOnlineDDL implements interface -func (g *Ghost) InOnlineDDL(schema, table string) bool { - if g == nil { - return false - } - - ghostInfo := g.storge.Get(schema, table) - return ghostInfo != nil -} - // Finish implements interface func (g *Ghost) Finish(schema, table string) error { if g == nil { @@ -172,25 +161,6 @@ func (g *Ghost) RealName(schema, table string) (string, string) { return schema, table } -// GhostName implements interface -func (g *Ghost) GhostName(schema, table string) (string, string) { - tp := g.TableType(table) - if tp == ghostTable { - return schema, table - } - - if tp == trashTable { - table = table[1 : len(table)-4] - } - - return schema, fmt.Sprintf("_%s_gho", table) -} - -// SchemeName implements interface -func (g *Ghost) SchemeName() string { - return config.GHOST -} - // Clear clears online ddl information func (g *Ghost) Clear() error { return errors.Trace(g.storge.Clear()) diff --git a/syncer/online_ddl.go b/syncer/online_ddl.go index 318f0d695c..19ae82e3f1 100644 --- a/syncer/online_ddl.go +++ b/syncer/online_ddl.go @@ -43,18 +43,12 @@ type OnlinePlugin interface { // * apply online ddl on real table // returns sqls, replaced/self schema, repliaced/slef table, error Apply(tables []*filter.Table, statement string, stmt ast.StmtNode) ([]string, string, string, error) - // InOnlineDDL returns true if an online ddl is unresolved - InOnlineDDL(schema, table string) bool // Finish would delete online ddl from memory and storage Finish(schema, table string) error // TableType returns ghhost/real table TableType(table string) TableType // RealName returns real table name that removed ghost suffix and handled by table router RealName(schema, table string) (string, string) - // GhostName returns ghost table name of a table - GhostName(schema, table string) (string, string) - // SchemaName returns scheme name (gh-ost/pt) - SchemeName() string // Clear clears all online information Clear() error // Close closes online ddl plugin diff --git a/syncer/pt_osc.go b/syncer/pt_osc.go index ab88d3a5e0..8f64cc06bd 100644 --- a/syncer/pt_osc.go +++ b/syncer/pt_osc.go @@ -14,7 +14,6 @@ package syncer import ( - "fmt" "strings" "github.com/pingcap/errors" @@ -127,16 +126,6 @@ func (p *PT) Apply(tables []*filter.Table, statement string, stmt ast.StmtNode) return nil, schema, table, nil } -// InOnlineDDL implements interface -func (p *PT) InOnlineDDL(schema, table string) bool { - if p == nil { - return false - } - - ghostInfo := p.storge.Get(schema, table) - return ghostInfo != nil -} - // Finish implements interface func (p *PT) Finish(schema, table string) error { if p == nil { @@ -173,26 +162,6 @@ func (p *PT) RealName(schema, table string) (string, string) { return schema, table } -// GhostName implements interface -func (p *PT) GhostName(schema, table string) (string, string) { - tp := p.TableType(table) - if tp == ghostTable { - return schema, table - } - - if tp == trashTable { - table = strings.TrimLeft(table, "_") - table = table[:len(table)-4] - } - - return schema, fmt.Sprintf("_%s_new", table) -} - -// SchemeName implements interface -func (p *PT) SchemeName() string { - return config.PT -} - // Clear clears online ddl information func (p *PT) Clear() error { return errors.Trace(p.storge.Clear()) diff --git a/tests/README.md b/tests/README.md index 97e5755a22..14dc30c0bc 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,13 +1,15 @@ ## Preparations -1. The following five executables must be copied or generated or linked into these locations, `mydumper` and `sync_diff_inspector` can be downloaded from [tidb-enterprise-tools-latest-linux-amd64](http://download.pingcap.org/tidb-enterprise-tools-latest-linux-amd64.tar.gz): +1. The following executables must be copied or generated or linked into these locations, `mydumper` and `sync_diff_inspector` can be downloaded from [tidb-enterprise-tools-latest-linux-amd64](http://download.pingcap.org/tidb-enterprise-tools-latest-linux-amd64.tar.gz): * `bin/tidb-server` * `bin/sync_diff_inspector` * `bin/mydumper` * `bin/dm-master.test` # generated by `make dm_integration_test_build` * `bin/dm-worker.test` # generated by `make dm_integration_test_build` + * [gh-ost](https://github.com/github/gh-ost) # must be added to path, or you can `export GHOST_BINARY=/path/to/gh-ost-binary` + * [pt-online-schema-change](https://www.percona.com/doc/percona-toolkit/LATEST/pt-online-schema-change.html) # must be added to path, or you can `export PTOSC_BINARY=/path/to/pt-osc-binary` 2. The following programs must be installed: @@ -33,7 +35,9 @@ 1. Check that all required executables exist. 2. Execute `tests/run.sh` - > If want to run one integration test case only, just pass the CASE parameter, such as `make integration_test CASE=sharding` + > If want to run one integration test case only, just pass the CASE parameter, such as `make integration_test CASE=sharding`. + + > The online DDL test using pt-osc doesn't work if the upstream MySQL has different connect port and bind port (often caused by port forwarding via NAT). In this case, you must specify the real IP and port of MySQL. Otherwise you can skip online DDL test by `export ONLINE_DDL_ENABLE=false`. 4. After executing the tests, run `make coverage` to get a coverage report at `/tmp/dm_test/all_cov.html`. diff --git a/tests/_utils/run_sql_file b/tests/_utils/run_sql_file index c2b5344303..2ede445bd3 100755 --- a/tests/_utils/run_sql_file +++ b/tests/_utils/run_sql_file @@ -1,8 +1,9 @@ #!/bin/sh # parameter 1: sql file -# parameter 2: port +# parameter 2: host +# parameter 3: port set -eu echo "[$(date)] Executing SQL: $1" > "$TEST_DIR/sql_res.$TEST_NAME.txt" -mysql -uroot -h127.0.0.1 -P$2 --default-character-set utf8 -vv < "$1" >> "$TEST_DIR/sql_res.$TEST_NAME.txt" +mysql -uroot -h$2 -P$3 --default-character-set utf8 -vv < "$1" >> "$TEST_DIR/sql_res.$TEST_NAME.txt" diff --git a/tests/_utils/run_sql_file_online_ddl b/tests/_utils/run_sql_file_online_ddl new file mode 100755 index 0000000000..f4de7bba8f --- /dev/null +++ b/tests/_utils/run_sql_file_online_ddl @@ -0,0 +1,49 @@ +#!/bin/bash +# parameter 1: sql file +# parameter 2: host +# parameter 3: port +# parameter 4: db +# parameter 5: online ddl tool, pt or gh-ost + +set -eu + +sql_file=$1 +host=$2 +port=$3 +schema=$4 +ghost_bin=${GHOST_BINARY:-gh-ost} +ptosc_bin=${PTOSC_BINARY:-pt-online-schema-change} + +echo "[$(date)] Executing SQL: $sql_file" > "$TEST_DIR/sql_res.$TEST_NAME.txt" + +# we use lower case `alter table` in test sql, if want case insensetive, +# just set `shopt -s nocasematch` +ddl_regex="^alter table.*" +while IFS= read -r line +do + if [[ "$line" =~ $ddl_regex ]]; then + table=$(echo $line | cut -d " " -f3) + alter=$(echo $line | cut -d " " -f4-) + # gh-ost check connection port whether equals to `select @@global.port`. + # if we have test MySQL in container and port mapping, these two ports + # may different. So we cheat gh-ost that we are running on aliyun rds, + # on which will disable the port check. + if [ "$5" == "gh-ost" ]; then + $ghost_bin --user=root --host=$host --port=$port \ + --database=$schema --table=$table --alter="$alter" \ + --serve-socket-file="$TEST_DIR/gh-ost.$schema.$table.$port.sock" \ + --allow-on-master --allow-master-master --initially-drop-ghost-table \ + --initially-drop-old-table -ok-to-drop-table -aliyun-rds -execute \ + >> $TEST_DIR/gh-ost.log + elif [ "$5" == "pt" ]; then + $ptosc_bin --user=root --host=$host --port=$port \ + --alter="$alter" D=$schema,t=$table \ + --recursion-method=none --print --execute \ + >> $TEST_DIR/pt-osc.log + else + mysql -uroot -h$host -P$port --default-character-set utf8 -E -e "use $schema; $line" >> "$TEST_DIR/sql_res.$TEST_NAME.txt" + fi + else + mysql -uroot -h$host -P$port --default-character-set utf8 -E -e "use $schema; $line" >> "$TEST_DIR/sql_res.$TEST_NAME.txt" + fi +done <"$sql_file" diff --git a/tests/_utils/test_prepare b/tests/_utils/test_prepare new file mode 100644 index 0000000000..335fac7208 --- /dev/null +++ b/tests/_utils/test_prepare @@ -0,0 +1,23 @@ +MYSQL_HOST1=${MYSQL_HOST1:-127.0.0.1} +MYSQL_HOST2=${MYSQL_HOST2:-127.0.0.1} +MYSQL_PORT1=${MYSQL_PORT1:-3306} +MYSQL_PORT2=${MYSQL_PORT2:-3307} +TIDB_PORT=4000 +MASTER_PORT=8261 +WORKER1_PORT=8262 +WORKER2_PORT=8263 + +# we do clean staff at beginning of each run, so we can keep logs of the latset run +function cleanup1() { + rm -rf $WORK_DIR + mkdir $WORK_DIR + for target_db in "$@"; do + run_sql "drop database if exists ${target_db}" $TIDB_PORT + done + run_sql "drop database if exists dm_meta" $TIDB_PORT +} + +function cleanup2() { + pkill -hup dm-worker.test 2>/dev/null || true + pkill -hup dm-master.test 2>/dev/null || true +} diff --git a/tests/all_mode/run.sh b/tests/all_mode/run.sh index 0a230608ac..1d5f85dbe0 100755 --- a/tests/all_mode/run.sh +++ b/tests/all_mode/run.sh @@ -3,32 +3,13 @@ set -eu cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -PWD=$(pwd) -DB1_PORT=3306 -DB2_PORT=3307 -TIDB_PORT=4000 -MASTER_PORT=8261 -WORKER1_PORT=8262 -WORKER2_PORT=8263 -WORK_DIR=$TEST_DIR/all_mode - -# we do clean staff at beginning of each run, so we can keep logs of the latset run -function cleanup1() { - rm -rf $WORK_DIR - mkdir $WORK_DIR - run_sql "drop database if exists all_mode" $TIDB_PORT - run_sql "drop database if exists dm_meta" $TIDB_PORT -} - -function cleanup2() { - pkill -hup dm-worker.test 2>/dev/null || true - pkill -hup dm-master.test 2>/dev/null || true -} +source $cur/../_utils/test_prepare +WORK_DIR=$TEST_DIR/$TEST_NAME function run() { - run_sql_file $cur/data/db1.prepare.sql $DB1_PORT + run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 check_contains 'Query OK, 2 rows affected' - run_sql_file $cur/data/db2.prepare.sql $DB2_PORT + run_sql_file $cur/data/db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 check_contains 'Query OK, 3 rows affected' run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml @@ -46,14 +27,14 @@ function run() { # use sync_diff_inspector to check full dump loader check_sync_diff $WORK_DIR $cur/conf/diff_config.toml - run_sql_file $cur/data/db1.increment.sql $DB1_PORT - run_sql_file $cur/data/db2.increment.sql $DB2_PORT + run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 + run_sql_file $cur/data/db2.increment.sql $MYSQL_HOST2 $MYSQL_PORT2 # use sync_diff_inspector to check data now! check_sync_diff $WORK_DIR $cur/conf/diff_config.toml } -cleanup1 $* +cleanup1 all_mode # also cleanup dm processes in case of last run failed cleanup2 $* run $* diff --git a/tests/online_ddl/conf/diff_config.toml b/tests/online_ddl/conf/diff_config.toml new file mode 100644 index 0000000000..293fca2678 --- /dev/null +++ b/tests/online_ddl/conf/diff_config.toml @@ -0,0 +1,57 @@ +# diff Configuration. + +log-level = "info" + +chunk-size = 1000 + +check-thread-count = 4 + +sample-percent = 100 + +use-rowid = false + +use-checksum = true + +fix-sql-file = "fix.sql" + +# tables need to check. +[[check-tables]] +schema = "online_ddl" +tables = ["t_target"] + +[[table-config]] +schema = "online_ddl" +table = "t_target" +ignore-columns = ["id"] +is-online_ddl = true +index-field = "uid" + +[[table-config.source-tables]] +instance-id = "source-1" +schema = "online_ddl" +table = "~t.*" + +[[table-config.source-tables]] +instance-id = "source-2" +schema = "online_ddl" +table = "~t.*" + +[[source-db]] +host = "127.0.0.1" +port = 3306 +user = "root" +password = "" +instance-id = "source-1" + +[[source-db]] +host = "127.0.0.1" +port = 3307 +user = "root" +password = "" +instance-id = "source-2" + +[target-db] +host = "127.0.0.1" +port = 4000 +user = "root" +password = "" diff --git a/tests/online_ddl/conf/dm-master.toml b/tests/online_ddl/conf/dm-master.toml new file mode 100644 index 0000000000..334e0de993 --- /dev/null +++ b/tests/online_ddl/conf/dm-master.toml @@ -0,0 +1,9 @@ +# Master Configuration. + +[[deploy]] +source-id = "mysql-replica-01" +dm-worker = "127.0.0.1:8262" + +[[deploy]] +source-id = "mysql-replica-02" +dm-worker = "127.0.0.1:8263" diff --git a/tests/online_ddl/conf/dm-task.yaml b/tests/online_ddl/conf/dm-task.yaml new file mode 100644 index 0000000000..7c0c09547e --- /dev/null +++ b/tests/online_ddl/conf/dm-task.yaml @@ -0,0 +1,91 @@ +--- +name: test +task-mode: all +is-sharding: true +meta-schema: "dm_meta" +remove-meta: true +disable-heartbeat: true +timezone: "Asia/Shanghai" +online-ddl-scheme: online-ddl-scheme-placeholder + +target-database: + host: "127.0.0.1" + port: 4000 + user: "root" + password: "" + +mysql-instances: + - source-id: "mysql-replica-01" + server-id: 101 + black-white-list: "instance" + route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] + column-mapping-rules: ["instance-1"] + mydumper-config-name: "global" + loader-config-name: "global" + syncer-config-name: "global" + + - source-id: "mysql-replica-02" + server-id: 102 + meta: + binlog-name: binlog.000001 + binlog-pos: 4 + black-white-list: "instance" + route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] + column-mapping-rules: ["instance-2"] + mydumper-config-name: "global" + loader-config-name: "global" + syncer-config-name: "global" + +black-white-list: + instance: + do-dbs: ["online_ddl"] + do-tables: + - db-name: "online_ddl" + tbl-name: "~^t[\\d]+" + +routes: + sharding-route-rules-table: + schema-pattern: online_ddl + table-pattern: t* + target-schema: online_ddl + target-table: t_target + + sharding-route-rules-schema: + schema-pattern: online_ddl + target-schema: online_ddl + +column-mappings: + instance-1: + schema-pattern: "online_ddl" + table-pattern: "t*" + expression: "partition id" + source-column: "id" + target-column: "id" + arguments: ["1", "", "t"] + + instance-2: + schema-pattern: "online_ddl" + table-pattern: "t*" + expression: "partition id" + source-column: "id" + target-column: "id" + arguments: ["2", "", "t"] + +mydumpers: + global: + mydumper-path: "./bin/mydumper" + threads: 4 + chunk-filesize: 64 + skip-tz-utc: true + +loaders: + global: + pool-size: 16 + dir: "./dumped_data" + extra-args: "-B online_ddl" + +syncers: + global: + worker-count: 16 + batch: 100 + max-retry: 100 diff --git a/tests/online_ddl/conf/dm-worker1.toml b/tests/online_ddl/conf/dm-worker1.toml new file mode 100644 index 0000000000..62157e6cd2 --- /dev/null +++ b/tests/online_ddl/conf/dm-worker1.toml @@ -0,0 +1,15 @@ +# Worker Configuration. + +server-id = 101 +source-id = "mysql-replica-01" +flavor = "mysql" +meta-file = "relay.meta" +enable-gtid = false +relay-binlog-name = "" +relay-binlog-gtid = "" + +[from] +host = "127.0.0.1" +user = "root" +password = "" +port = 3306 diff --git a/tests/online_ddl/conf/dm-worker2.toml b/tests/online_ddl/conf/dm-worker2.toml new file mode 100644 index 0000000000..7edb4c5415 --- /dev/null +++ b/tests/online_ddl/conf/dm-worker2.toml @@ -0,0 +1,15 @@ +# Worker Configuration. + +server-id = 102 +source-id = "mysql-replica-02" +flavor = "mysql" +meta-file = "relay.meta" +enable-gtid = false +relay-binlog-name = "" +relay-binlog-gtid = "" + +[from] +host = "127.0.0.1" +user = "root" +password = "" +port = 3307 diff --git a/tests/online_ddl/data/db1.increment.sql b/tests/online_ddl/data/db1.increment.sql new file mode 100644 index 0000000000..6c887f4b43 --- /dev/null +++ b/tests/online_ddl/data/db1.increment.sql @@ -0,0 +1,16 @@ +use online_ddl; +insert into t1 (uid, name) values (10003, 'Buenos Aires'); +update t1 set name = 'Gabriel José de la Concordia García Márquez' where `uid` = 10001; +update t1 set name = 'One Hundred Years of Solitude' where name = 'Cien años de soledad'; +alter table t1 add column age int; +alter table t2 add column age int; +insert into t2 (uid, name, age, info) values (20004, 'Colonel Aureliano Buendía', 301, '{}'); +alter table t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +insert into t1 (uid, name, info) values (10004, 'Buenos Aires', '{"age": 10}'); +insert into t2 (uid, name, info) values (20005, 'Buenos Aires', '{"age": 100}'); +insert into t2 (uid, name, info) values (20006, 'Buenos Aires', '{"age": 1000}'); +alter table t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table t2 add key name (name); +alter table t1 add key name (name); +insert into t1 (uid, name, info) values (10005, 'Buenos Aires', '{"age": 100}'); +insert into t2 (uid, name, info) values (20007, 'Buenos Aires', '{"age": 200}'); diff --git a/tests/online_ddl/data/db1.prepare.sql b/tests/online_ddl/data/db1.prepare.sql new file mode 100644 index 0000000000..5b0009bd74 --- /dev/null +++ b/tests/online_ddl/data/db1.prepare.sql @@ -0,0 +1,7 @@ +drop database if exists `online_ddl`; +create database `online_ddl`; +use `online_ddl`; +create table t1 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4; +create table t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4; +insert into t1 (uid, name) values (10001, 'Gabriel García Márquez'), (10002, 'Cien años de soledad'); +insert into t2 (uid, name) values (20001, 'José Arcadio Buendía'), (20002, 'Úrsula Iguarán'), (20003, 'José Arcadio'); diff --git a/tests/online_ddl/data/db2.increment.sql b/tests/online_ddl/data/db2.increment.sql new file mode 100644 index 0000000000..1a428ea043 --- /dev/null +++ b/tests/online_ddl/data/db2.increment.sql @@ -0,0 +1,12 @@ +use online_ddl; +delete from t3 where name = 'Santa Sofía de la Piedad'; +alter table t2 add column age int; +update t2 set uid = uid + 10000; +alter table t3 add column age int; +update t3 set age = 1; +alter table t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +update t3 set age = age + 10; +alter table t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table t2 add key name (name); +alter table t3 add key name (name); +update t2 set age = age + 10; diff --git a/tests/online_ddl/data/db2.prepare.sql b/tests/online_ddl/data/db2.prepare.sql new file mode 100644 index 0000000000..0e7fabf08a --- /dev/null +++ b/tests/online_ddl/data/db2.prepare.sql @@ -0,0 +1,7 @@ +drop database if exists `online_ddl`; +create database `online_ddl`; +use `online_ddl`; +create table t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4; +create table t3 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4; +insert into t2 (uid, name, info) values (40000, 'Remedios Moscote', '{}'), (40001, 'Amaranta', '{"age": 0}'); +insert into t3 (uid, name, info) values (30001, 'Aureliano José', '{}'), (30002, 'Santa Sofía de la Piedad', '{}'), (30003, '17 Aurelianos', NULL); diff --git a/tests/online_ddl/dmctl.go b/tests/online_ddl/dmctl.go new file mode 100644 index 0000000000..7011ed1bd9 --- /dev/null +++ b/tests/online_ddl/dmctl.go @@ -0,0 +1,33 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "os" + + "github.com/pingcap/dm/tests/utils" +) + +func main() { + cli, err := utils.CreateDmCtl("127.0.0.1:8261") + if err != nil { + utils.ExitWithError(err) + } + conf := os.Args[1] + err = utils.StartTask(context.Background(), cli, conf, nil) + if err != nil { + utils.ExitWithError(err) + } +} diff --git a/tests/online_ddl/run.sh b/tests/online_ddl/run.sh new file mode 100755 index 0000000000..d9167e4aab --- /dev/null +++ b/tests/online_ddl/run.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +set -eu + +cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $cur/../_utils/test_prepare +ONLINE_DDL_ENABLE=${ONLINE_DDL_ENABLE:-true} +BASE_TEST_NAME=$TEST_NAME + +function real_run() { + online_ddl_scheme=$1 + run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 + check_contains 'Query OK, 2 rows affected' + run_sql_file $cur/data/db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 + check_contains 'Query OK, 3 rows affected' + + run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml + run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml + run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml + + check_port_alive $MASTER_PORT + check_port_alive $WORKER1_PORT + check_port_alive $WORKER2_PORT + + cd $cur && GO111MODULE=on go build -o bin/dmctl && cd - + # start DM task only + cp $cur/conf/dm-task.yaml $WORK_DIR/dm-task-${online_ddl_scheme}.yaml + sed -i "s/online-ddl-scheme-placeholder/${online_ddl_scheme}/g" $WORK_DIR/dm-task-${online_ddl_scheme}.yaml + $cur/bin/dmctl "$WORK_DIR/dm-task-${online_ddl_scheme}.yaml" + + # use sync_diff_inspector to check full dump loader + check_sync_diff $WORK_DIR $cur/conf/diff_config.toml + + run_sql_file_online_ddl $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 online_ddl $online_ddl_scheme + run_sql_file_online_ddl $cur/data/db2.increment.sql $MYSQL_HOST2 $MYSQL_PORT2 online_ddl $online_ddl_scheme + + # use sync_diff_inspector to check data now! + check_sync_diff $WORK_DIR $cur/conf/diff_config.toml +} + +function run() { + online_ddl_scheme=$1 + TEST_NAME=${BASE_TEST_NAME}_$online_ddl_scheme + WORK_DIR=$TEST_DIR/$TEST_NAME + + cleanup1 online_ddl + # also cleanup dm processes in case of last run failed + cleanup2 $* + real_run $* + cleanup2 $* + + wait_process_exit dm-master.test + wait_process_exit dm-worker.test + + echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>" +} + +if [ "$ONLINE_DDL_ENABLE" == true ]; then + run gh-ost + run pt +else + echo "[$(date)] <<<<<< skip online ddl test! >>>>>>" +fi diff --git a/tests/sharding/run.sh b/tests/sharding/run.sh index 5d52a619a7..7898be3143 100755 --- a/tests/sharding/run.sh +++ b/tests/sharding/run.sh @@ -3,32 +3,13 @@ set -eu cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -PWD=$(pwd) -DB1_PORT=3306 -DB2_PORT=3307 -TIDB_PORT=4000 -MASTER_PORT=8261 -WORKER1_PORT=8262 -WORKER2_PORT=8263 -WORK_DIR=$TEST_DIR/sharding - -# we do clean staff at beginning of each run, so we can keep logs of the latset run -function cleanup1() { - rm -rf $WORK_DIR - mkdir $WORK_DIR - run_sql "drop database if exists db_target" $TIDB_PORT - run_sql "drop database if exists dm_meta" $TIDB_PORT -} - -function cleanup2() { - pkill -hup dm-worker.test 2>/dev/null || true - pkill -hup dm-master.test 2>/dev/null || true -} +source $cur/../_utils/test_prepare +WORK_DIR=$TEST_DIR/$TEST_NAME function run() { - run_sql_file $cur/data/db1.prepare.sql $DB1_PORT + run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 check_contains 'Query OK, 2 rows affected' - run_sql_file $cur/data/db2.prepare.sql $DB2_PORT + run_sql_file $cur/data/db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 check_contains 'Query OK, 3 rows affected' run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml @@ -47,15 +28,15 @@ function run() { # use sync_diff_inspector to check full dump loader check_sync_diff $WORK_DIR $cur/conf/diff_config.toml - run_sql_file $cur/data/db1.increment.sql $DB1_PORT - run_sql_file $cur/data/db2.increment.sql $DB2_PORT + run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 + run_sql_file $cur/data/db2.increment.sql $MYSQL_HOST2 $MYSQL_PORT2 # TODO: check sharding partition id # use sync_diff_inspector to check data now! check_sync_diff $WORK_DIR $cur/conf/diff_config.toml } -cleanup1 $* +cleanup1 db_target # also cleanup dm processes in case of last run failed cleanup2 $* run $*