Skip to content
This repository has been archived by the owner on Jul 24, 2024. It is now read-only.

Commit

Permalink
integration_test: fix br_incremental_index test not stable (#468) (#658)
Browse files Browse the repository at this point in the history
* cherry pick #468 to release-4.0

Signed-off-by: ti-srebot <ti-srebot@pingcap.com>
  • Loading branch information
ti-srebot authored Dec 25, 2020
1 parent 804aa90 commit 46a3652
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 34 deletions.
29 changes: 0 additions & 29 deletions pkg/backup/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -985,35 +985,6 @@ func CollectChecksums(backupMeta *kvproto.BackupMeta) ([]Checksum, error) {
return checksums, nil
}

// FilterSchema filter in-place schemas that doesn't have backup files
// this is useful during incremental backup, no files in backup means no files to restore
// so we can skip some DDL in restore to speed up restoration.
func FilterSchema(backupMeta *kvproto.BackupMeta) error {
dbs, err := utils.LoadBackupTables(backupMeta)
if err != nil {
return errors.Trace(err)
}
schemas := make([]*kvproto.Schema, 0, len(backupMeta.Schemas))
for _, schema := range backupMeta.Schemas {
dbInfo := &model.DBInfo{}
err := json.Unmarshal(schema.Db, dbInfo)
if err != nil {
return errors.Trace(err)
}
tblInfo := &model.TableInfo{}
err = json.Unmarshal(schema.Table, tblInfo)
if err != nil {
return errors.Trace(err)
}
tbl := dbs[dbInfo.Name.String()].GetTable(tblInfo.Name.String())
if len(tbl.Files) > 0 {
schemas = append(schemas, schema)
}
}
backupMeta.Schemas = schemas
return nil
}

// isRetryableError represents whether we should retry reset grpc connection.
func isRetryableError(err error) bool {
return status.Code(err) == codes.Unavailable || status.Code(err) == codes.Canceled
Expand Down
4 changes: 0 additions & 4 deletions pkg/task/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -342,10 +342,6 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig
if isIncrementalBackup {
// Since we don't support checksum for incremental data, fast checksum should be skipped.
log.Info("Skip fast checksum in incremental backup")
err = backup.FilterSchema(&backupMeta)
if err != nil {
return errors.Trace(err)
}
} else {
// When user specified not to calculate checksum, don't calculate checksum.
log.Info("Skip fast checksum because user requirement.")
Expand Down
2 changes: 1 addition & 1 deletion tests/br_incremental_index/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ done

# full backup
echo "backup full start..."
run_sql "CREATE INDEX idx_c1 ON ${DB}.${TABLE}(c1)" &
run_sql "CREATE INDEX idx_c1 ON ${DB}.${TABLE}(c1)"
run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/full" --ratelimit 5 --concurrency 4
wait
# run ddls
Expand Down

0 comments on commit 46a3652

Please sign in to comment.