From 3415e095f3e2a70d0b617717ae8d34bc3bcf42e2 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Tue, 7 Feb 2023 17:31:59 +0800 Subject: [PATCH 1/7] fix bug, add integration test --- br/pkg/lightning/restore/meta_manager.go | 14 +++---- br/pkg/lightning/restore/table_restore.go | 4 +- .../config1.toml | 41 +++++++++++++++++++ .../config2.toml | 41 +++++++++++++++++++ .../data/dup_resolve_detect-schema-create.sql | 1 + .../data/dup_resolve_detect.ta-schema.sql | 6 +++ .../data/dup_resolve_detect.ta.0.sql | 20 +++++++++ .../data/dup_resolve_detect.ta.1.sql | 16 ++++++++ .../run.sh | 40 ++++++++++++++++++ 9 files changed, 173 insertions(+), 10 deletions(-) create mode 100644 br/tests/lightning_duplicate_resolution_incremental/config1.toml create mode 100644 br/tests/lightning_duplicate_resolution_incremental/config2.toml create mode 100644 br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect-schema-create.sql create mode 100644 br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql create mode 100644 br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta.0.sql create mode 100644 br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta.1.sql create mode 100644 br/tests/lightning_duplicate_resolution_incremental/run.sh diff --git a/br/pkg/lightning/restore/meta_manager.go b/br/pkg/lightning/restore/meta_manager.go index 2d9875ad56960..155c464a77cca 100644 --- a/br/pkg/lightning/restore/meta_manager.go +++ b/br/pkg/lightning/restore/meta_manager.go @@ -370,7 +370,7 @@ func (m *dbTableMetaMgr) UpdateTableStatus(ctx context.Context, status metaStatu } func (m *dbTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checksum *verify.KVChecksum, hasLocalDupes bool) ( - needChecksum bool, needRemoteDupe bool, baseTotalChecksum *verify.KVChecksum, err error, + otherHasDupe bool, needRemoteDupe bool, baseTotalChecksum *verify.KVChecksum, err error, ) { conn, err := m.session.Conn(ctx) if err != nil { @@ -393,7 +393,7 @@ func (m *dbTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checks taskHasDuplicates bool ) newStatus := metaStatusChecksuming - needChecksum = true + otherHasDupe = false needRemoteDupe = true err = exec.Transact(ctx, "checksum pre-check", func(ctx context.Context, tx *sql.Tx) error { rows, err := tx.QueryContext( @@ -423,9 +423,7 @@ func (m *dbTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checks return err } - if taskHasDuplicates { - needChecksum = false - } + otherHasDupe = otherHasDupe || taskHasDuplicates // skip finished meta if status >= metaStatusFinished { @@ -436,7 +434,6 @@ func (m *dbTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checks if status >= metaStatusChecksuming { newStatus = status needRemoteDupe = status == metaStatusChecksuming - needChecksum = needChecksum && needRemoteDupe return nil } @@ -445,7 +442,6 @@ func (m *dbTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checks if status < metaStatusChecksuming { newStatus = metaStatusChecksumSkipped - needChecksum = false needRemoteDupe = false break } else if status == metaStatusChecksuming { @@ -475,12 +471,12 @@ func (m *dbTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checks return false, false, nil, err } - if needChecksum { + if !otherHasDupe && needRemoteDupe { ck := verify.MakeKVChecksum(totalBytes, totalKvs, totalChecksum) baseTotalChecksum = &ck } log.FromContext(ctx).Info("check table checksum", zap.String("table", m.tr.tableName), - zap.Bool("checksum", needChecksum), zap.String("new_status", newStatus.String())) + zap.Bool("checksum", otherHasDupe), zap.String("new_status", newStatus.String())) return } diff --git a/br/pkg/lightning/restore/table_restore.go b/br/pkg/lightning/restore/table_restore.go index 311659f6ffa51..e28144f042a8c 100644 --- a/br/pkg/lightning/restore/table_restore.go +++ b/br/pkg/lightning/restore/table_restore.go @@ -795,10 +795,12 @@ func (tr *TableRestore) postProcess( hasDupe = hasLocalDupe } - needChecksum, needRemoteDupe, baseTotalChecksum, err := metaMgr.CheckAndUpdateLocalChecksum(ctx, &localChecksum, hasDupe) + otherHasDupe, needRemoteDupe, baseTotalChecksum, err := metaMgr.CheckAndUpdateLocalChecksum(ctx, &localChecksum, hasDupe) if err != nil { return false, err } + needChecksum := !otherHasDupe && needRemoteDupe + hasDupe = hasDupe || otherHasDupe if needRemoteDupe && rc.cfg.TikvImporter.DuplicateResolution != config.DupeResAlgNone { opts := &kv.SessionOptions{ diff --git a/br/tests/lightning_duplicate_resolution_incremental/config1.toml b/br/tests/lightning_duplicate_resolution_incremental/config1.toml new file mode 100644 index 0000000000000..aa8948072b3fc --- /dev/null +++ b/br/tests/lightning_duplicate_resolution_incremental/config1.toml @@ -0,0 +1,41 @@ +[lightning] +task-info-schema-name = 'lightning_task_info_dupe_resolve_incremental' +index-concurrency = 10 +table-concurrency = 10 + +[tikv-importer] +backend = "local" +on-duplicate = "replace" +duplicate-resolution = "remove" +incremental-import = true + +[checkpoint] +enable = true +schema = "tidb_lightning_checkpoint_dupe_resolve_incremental" +driver = "mysql" + +[[mydumper.files]] +pattern = '(?i).*(-schema-trigger|-schema-post)\.sql$' +type = 'ignore' + +[[mydumper.files]] +pattern = '(?i)^(?:[^/]*/)*([^/.]+)-schema-create\.sql$' +schema = '$1' +type = 'schema-schema' + +[[mydumper.files]] +pattern = '(?i)^(?:[^/]*/)*([^/.]+)\.(.*?)-schema\.sql$' +schema = '$1' +table = '$2' +type = 'table-schema' + +[[mydumper.files]] +pattern = '(?i)^(?:[^/]*/)*([^/.]+)\.(.*?)\.0\.sql$' +schema = '$1' +table = '$2' +key = '0' +type = 'sql' + +[post-restore] +analyze = false +checksum = "optional" diff --git a/br/tests/lightning_duplicate_resolution_incremental/config2.toml b/br/tests/lightning_duplicate_resolution_incremental/config2.toml new file mode 100644 index 0000000000000..0aaf7a6632ceb --- /dev/null +++ b/br/tests/lightning_duplicate_resolution_incremental/config2.toml @@ -0,0 +1,41 @@ +[lightning] +task-info-schema-name = 'lightning_task_info_dupe_resolve_incremental' +index-concurrency = 10 +table-concurrency = 10 + +[tikv-importer] +backend = "local" +on-duplicate = "replace" +duplicate-resolution = "remove" +incremental-import = true + +[checkpoint] +enable = true +schema = "tidb_lightning_checkpoint_dupe_resolve_incremental" +driver = "mysql" + +[[mydumper.files]] +pattern = '(?i).*(-schema-trigger|-schema-post)\.sql$' +type = 'ignore' + +[[mydumper.files]] +pattern = '(?i)^(?:[^/]*/)*([^/.]+)-schema-create\.sql$' +schema = '$1' +type = 'schema-schema' + +[[mydumper.files]] +pattern = '(?i)^(?:[^/]*/)*([^/.]+)\.(.*?)-schema\.sql$' +schema = '$1' +table = '$2' +type = 'table-schema' + +[[mydumper.files]] +pattern = '(?i)^(?:[^/]*/)*([^/.]+)\.(.*?)\.1\.sql$' +schema = '$1' +table = '$2' +key = '1' +type = 'sql' + +[post-restore] +analyze = false +checksum = "optional" diff --git a/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect-schema-create.sql b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect-schema-create.sql new file mode 100644 index 0000000000000..202de81067861 --- /dev/null +++ b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect-schema-create.sql @@ -0,0 +1 @@ +create schema dup_resolve_detect; diff --git a/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql new file mode 100644 index 0000000000000..06913acd83f73 --- /dev/null +++ b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql @@ -0,0 +1,6 @@ +create table ta ( + id int not null primary key clustered, + name varchar(20) not null, + size bigint not null, + unique key uni_name(name) +); diff --git a/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta.0.sql b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta.0.sql new file mode 100644 index 0000000000000..ee29f689e8792 --- /dev/null +++ b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta.0.sql @@ -0,0 +1,20 @@ +insert into ta values (3, '3c49f3bd', 6194643990092531757); +insert into ta values (13, '1da87b44', 3724743701402246028); +insert into ta values (6, '8b080186', 4840750639653607661); +insert into ta values (1, 'c83c0e6a', 5057094372111243649); +insert into ta values (12, 'dd73baf5', 2295098755414696158); +insert into ta values (4, '1cf99fa1', 2520784525406914042); +insert into ta values (11, 'b238a0e6', 3314555604794199537); +insert into ta values (10, 'a489c47a', 7706822128523578708); +insert into ta values (10, '9a54941e', 4969369552499069659); +insert into ta values (2, 'e7c90179', 1305347797378229715); +insert into ta values (9, '75e0344a', 500154046394880294); +insert into ta values (9, 'c3e8fc36', 5880042654284780409); +insert into ta values (6, 'd6835599', 2703142091339420770); +insert into ta values (5, 'c4a9c3a3', 6725275961959702206); +insert into ta values (14, 'eb1ab0dd', 5442878220607642694); +insert into ta values (7, '78e166f4', 7062852002089313920); +insert into ta values (8, '20986b65', 5485014514564267319); +insert into ta values (8, '9bd4d7a9', 9085469020413045798); +insert into ta values (15, 'd4aa9a8a', 546189610059969690); +insert into ta values (7, 'a7870c06', 3615729521258364152); diff --git a/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta.1.sql b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta.1.sql new file mode 100644 index 0000000000000..88b67b051fe6e --- /dev/null +++ b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta.1.sql @@ -0,0 +1,16 @@ +insert into ta values (111, 'bcf4e75f', 3304674741328415661); +insert into ta values (112, 'c08078e9', 7464585077725645791); +insert into ta values (113, 'ca05b4b2', 1280363363179468054); +insert into ta values (114, '8a094c96', 107578474892900608); +insert into ta values (115, 'f38efac2', 5273601814057696410); +insert into ta values (116, '5bf0cb56', 7276272767003446282); +insert into ta values (117, 'c8836b45', 653431702983792793); +insert into ta values (118, '7470ba67', 5617407618564683998); +insert into ta values (119, '466e1e95', 6827370124386922419); +insert into ta values (120, '41df97f3', 2296443172527920942); +insert into ta values (121, 'bd644f43', 6038622426427289955); +insert into ta values (122, '96aeb918', 1496857236328804363); +insert into ta values (123, '232448f7', 1199921720244646472); +insert into ta values (124, 'd296d6e4', 5705035255191089143); +insert into ta values (125, '194ec1d8', 6895413645725179445); +insert into ta values (126, 'a53238ec', 1527836891202149330); diff --git a/br/tests/lightning_duplicate_resolution_incremental/run.sh b/br/tests/lightning_duplicate_resolution_incremental/run.sh new file mode 100644 index 0000000000000..6b13f5b368871 --- /dev/null +++ b/br/tests/lightning_duplicate_resolution_incremental/run.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# +# Copyright 2022 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux + +check_cluster_version 5 2 0 'duplicate detection' || exit 0 + +LOG_FILE1="$TEST_DIR/lightning-duplicate-resolution1.log" +LOG_FILE2="$TEST_DIR/lightning-duplicate-resolution2.log" + +# make sure after first lightning finishes importing, the second lightning then starts +run_lightning --backend local --sorted-kv-dir "$TEST_DIR/lightning_duplicate_resolution_incremental.sorted1" \ + --enable-checkpoint=1 --log-file "$LOG_FILE1" --config "tests/$TEST_NAME/config1.toml" +run_lightning --backend local --sorted-kv-dir "$TEST_DIR/lightning_duplicate_resolution_incremental.sorted2" \ + --enable-checkpoint=1 --log-file "$LOG_FILE2" --config "tests/$TEST_NAME/config2.toml" + +# Ensure table is consistent. +run_sql 'admin check table dup_resolve_detect.ta' + +# Check data correctness +run_sql 'select count(*), sum(id) from dup_resolve_detect.ta where id < 100' +check_contains 'count(*): 5' +check_contains 'sum(id): 80' + +run_sql 'select count(*), sum(id) from dup_resolve_detect.ta where id > 100' +check_contains 'count(*): 16' +check_contains 'sum(id): 1896' From c21367737d8c29f3476dbd86446349bb9e27849b Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Tue, 7 Feb 2023 18:04:08 +0800 Subject: [PATCH 2/7] fix --- br/pkg/lightning/restore/meta_manager.go | 4 ++-- br/pkg/lightning/restore/table_restore.go | 1 + .../lightning_duplicate_resolution_incremental/run.sh | 9 ++++++--- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/br/pkg/lightning/restore/meta_manager.go b/br/pkg/lightning/restore/meta_manager.go index 155c464a77cca..ee5fb93ace19e 100644 --- a/br/pkg/lightning/restore/meta_manager.go +++ b/br/pkg/lightning/restore/meta_manager.go @@ -87,7 +87,7 @@ type tableMetaMgr interface { UpdateTableStatus(ctx context.Context, status metaStatus) error UpdateTableBaseChecksum(ctx context.Context, checksum *verify.KVChecksum) error CheckAndUpdateLocalChecksum(ctx context.Context, checksum *verify.KVChecksum, hasLocalDupes bool) ( - needChecksum bool, needRemoteDupe bool, baseTotalChecksum *verify.KVChecksum, err error) + otherHasDupe bool, needRemoteDupe bool, baseTotalChecksum *verify.KVChecksum, err error) FinishTable(ctx context.Context) error } @@ -1069,7 +1069,7 @@ func (m noopTableMetaMgr) UpdateTableBaseChecksum(ctx context.Context, checksum } func (m noopTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checksum *verify.KVChecksum, hasLocalDupes bool) (bool, bool, *verify.KVChecksum, error) { - return true, true, &verify.KVChecksum{}, nil + return false, true, &verify.KVChecksum{}, nil } func (m noopTableMetaMgr) FinishTable(ctx context.Context) error { diff --git a/br/pkg/lightning/restore/table_restore.go b/br/pkg/lightning/restore/table_restore.go index e28144f042a8c..8339e7694dc03 100644 --- a/br/pkg/lightning/restore/table_restore.go +++ b/br/pkg/lightning/restore/table_restore.go @@ -794,6 +794,7 @@ func (tr *TableRestore) postProcess( } hasDupe = hasLocalDupe } + failpoint.Inject("SlowDownCheckDupe", func() {}) otherHasDupe, needRemoteDupe, baseTotalChecksum, err := metaMgr.CheckAndUpdateLocalChecksum(ctx, &localChecksum, hasDupe) if err != nil { diff --git a/br/tests/lightning_duplicate_resolution_incremental/run.sh b/br/tests/lightning_duplicate_resolution_incremental/run.sh index 6b13f5b368871..8f0e27108b9c6 100644 --- a/br/tests/lightning_duplicate_resolution_incremental/run.sh +++ b/br/tests/lightning_duplicate_resolution_incremental/run.sh @@ -21,11 +21,14 @@ check_cluster_version 5 2 0 'duplicate detection' || exit 0 LOG_FILE1="$TEST_DIR/lightning-duplicate-resolution1.log" LOG_FILE2="$TEST_DIR/lightning-duplicate-resolution2.log" -# make sure after first lightning finishes importing, the second lightning then starts +# let lightning run a bit slow to avoid some table in the first lightning finish too fast. +export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/lightning/restore/SlowDownCheckDupe=sleep(5000)" run_lightning --backend local --sorted-kv-dir "$TEST_DIR/lightning_duplicate_resolution_incremental.sorted1" \ - --enable-checkpoint=1 --log-file "$LOG_FILE1" --config "tests/$TEST_NAME/config1.toml" + --enable-checkpoint=1 --log-file "$LOG_FILE1" --config "tests/$TEST_NAME/config1.toml" & run_lightning --backend local --sorted-kv-dir "$TEST_DIR/lightning_duplicate_resolution_incremental.sorted2" \ - --enable-checkpoint=1 --log-file "$LOG_FILE2" --config "tests/$TEST_NAME/config2.toml" + --enable-checkpoint=1 --log-file "$LOG_FILE2" --config "tests/$TEST_NAME/config2.toml" & + +wait # Ensure table is consistent. run_sql 'admin check table dup_resolve_detect.ta' From db2cdf9afe1f0f19c6196189bd0372deb6093197 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Wed, 8 Feb 2023 15:58:39 +0800 Subject: [PATCH 3/7] fix integration test --- .../lightning_duplicate_resolution_incremental/config1.toml | 2 +- .../lightning_duplicate_resolution_incremental/config2.toml | 2 +- br/tests/lightning_duplicate_resolution_incremental/run.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/br/tests/lightning_duplicate_resolution_incremental/config1.toml b/br/tests/lightning_duplicate_resolution_incremental/config1.toml index aa8948072b3fc..a72bc7a3718cb 100644 --- a/br/tests/lightning_duplicate_resolution_incremental/config1.toml +++ b/br/tests/lightning_duplicate_resolution_incremental/config1.toml @@ -11,7 +11,7 @@ incremental-import = true [checkpoint] enable = true -schema = "tidb_lightning_checkpoint_dupe_resolve_incremental" +schema = "tidb_lightning_checkpoint_dupe_resolve_incremental1" driver = "mysql" [[mydumper.files]] diff --git a/br/tests/lightning_duplicate_resolution_incremental/config2.toml b/br/tests/lightning_duplicate_resolution_incremental/config2.toml index 0aaf7a6632ceb..bb29511a9b432 100644 --- a/br/tests/lightning_duplicate_resolution_incremental/config2.toml +++ b/br/tests/lightning_duplicate_resolution_incremental/config2.toml @@ -11,7 +11,7 @@ incremental-import = true [checkpoint] enable = true -schema = "tidb_lightning_checkpoint_dupe_resolve_incremental" +schema = "tidb_lightning_checkpoint_dupe_resolve_incremental2" driver = "mysql" [[mydumper.files]] diff --git a/br/tests/lightning_duplicate_resolution_incremental/run.sh b/br/tests/lightning_duplicate_resolution_incremental/run.sh index 8f0e27108b9c6..feef4da8f609b 100644 --- a/br/tests/lightning_duplicate_resolution_incremental/run.sh +++ b/br/tests/lightning_duplicate_resolution_incremental/run.sh @@ -35,7 +35,7 @@ run_sql 'admin check table dup_resolve_detect.ta' # Check data correctness run_sql 'select count(*), sum(id) from dup_resolve_detect.ta where id < 100' -check_contains 'count(*): 5' +check_contains 'count(*): 10' check_contains 'sum(id): 80' run_sql 'select count(*), sum(id) from dup_resolve_detect.ta where id > 100' From b10400ce61f818653438e0c302640156c08557d5 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Wed, 8 Feb 2023 16:11:49 +0800 Subject: [PATCH 4/7] fix --- br/pkg/lightning/restore/meta_manager.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/br/pkg/lightning/restore/meta_manager.go b/br/pkg/lightning/restore/meta_manager.go index ee5fb93ace19e..7f1546d552aa7 100644 --- a/br/pkg/lightning/restore/meta_manager.go +++ b/br/pkg/lightning/restore/meta_manager.go @@ -476,7 +476,8 @@ func (m *dbTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checks baseTotalChecksum = &ck } log.FromContext(ctx).Info("check table checksum", zap.String("table", m.tr.tableName), - zap.Bool("checksum", otherHasDupe), zap.String("new_status", newStatus.String())) + zap.Bool("otherHasDupe", otherHasDupe), zap.Bool("needRemoteDupe", needRemoteDupe), + zap.String("new_status", newStatus.String())) return } From 69234af3b1c2b39ac7855b72c46c65c9386f87d8 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Wed, 8 Feb 2023 16:20:52 +0800 Subject: [PATCH 5/7] fix data --- .../data/dup_resolve_detect.ta-schema.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql index 06913acd83f73..0b8caac671776 100644 --- a/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql +++ b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql @@ -1,5 +1,5 @@ create table ta ( - id int not null primary key clustered, + id int not null primary key nonclustered, name varchar(20) not null, size bigint not null, unique key uni_name(name) From 8aecf0af0ece13acdb6780e1b6fcd15db8495760 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Wed, 8 Feb 2023 16:36:36 +0800 Subject: [PATCH 6/7] fix test --- .../data/dup_resolve_detect.ta-schema.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql index 0b8caac671776..fb6cf2d5a7651 100644 --- a/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql +++ b/br/tests/lightning_duplicate_resolution_incremental/data/dup_resolve_detect.ta-schema.sql @@ -1,5 +1,5 @@ create table ta ( - id int not null primary key nonclustered, + id varchar(11) not null primary key nonclustered, -- use varchar here to make sure _tidb_rowid will be generated name varchar(20) not null, size bigint not null, unique key uni_name(name) From a4ebdeef63a4dbdb86e2dc84ce22f0a601b7f79a Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Wed, 8 Feb 2023 22:54:53 +0800 Subject: [PATCH 7/7] address comments --- br/pkg/lightning/restore/table_restore.go | 7 ++++++- .../run.sh | 21 ++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/br/pkg/lightning/restore/table_restore.go b/br/pkg/lightning/restore/table_restore.go index 8339e7694dc03..6b1372c0bca9e 100644 --- a/br/pkg/lightning/restore/table_restore.go +++ b/br/pkg/lightning/restore/table_restore.go @@ -794,7 +794,12 @@ func (tr *TableRestore) postProcess( } hasDupe = hasLocalDupe } - failpoint.Inject("SlowDownCheckDupe", func() {}) + failpoint.Inject("SlowDownCheckDupe", func(v failpoint.Value) { + sec := v.(int) + tr.logger.Warn("start to sleep several seconds before checking other dupe", + zap.Int("seconds", sec)) + time.Sleep(time.Duration(sec) * time.Second) + }) otherHasDupe, needRemoteDupe, baseTotalChecksum, err := metaMgr.CheckAndUpdateLocalChecksum(ctx, &localChecksum, hasDupe) if err != nil { diff --git a/br/tests/lightning_duplicate_resolution_incremental/run.sh b/br/tests/lightning_duplicate_resolution_incremental/run.sh index feef4da8f609b..b1bf1e3869d27 100644 --- a/br/tests/lightning_duplicate_resolution_incremental/run.sh +++ b/br/tests/lightning_duplicate_resolution_incremental/run.sh @@ -22,14 +22,33 @@ LOG_FILE1="$TEST_DIR/lightning-duplicate-resolution1.log" LOG_FILE2="$TEST_DIR/lightning-duplicate-resolution2.log" # let lightning run a bit slow to avoid some table in the first lightning finish too fast. -export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/lightning/restore/SlowDownCheckDupe=sleep(5000)" +export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/lightning/restore/SlowDownCheckDupe=return(10)" run_lightning --backend local --sorted-kv-dir "$TEST_DIR/lightning_duplicate_resolution_incremental.sorted1" \ --enable-checkpoint=1 --log-file "$LOG_FILE1" --config "tests/$TEST_NAME/config1.toml" & + +counter=0 +while [ $counter -lt 10 ]; do + if grep -Fq "start to sleep several seconds before checking other dupe" "$LOG_FILE1"; then + echo "lightning 1 already starts waiting for dupe" + break + fi + ((counter += 1)) + echo "waiting for lightning 1 starts" + sleep 1 +done + +if [ $counter -ge 10 ]; then + echo "fail to wait for lightning 1 starts" + exit 1 +fi + run_lightning --backend local --sorted-kv-dir "$TEST_DIR/lightning_duplicate_resolution_incremental.sorted2" \ --enable-checkpoint=1 --log-file "$LOG_FILE2" --config "tests/$TEST_NAME/config2.toml" & wait +export GO_FAILPOINTS="" + # Ensure table is consistent. run_sql 'admin check table dup_resolve_detect.ta'