Skip to content
This repository has been archived by the owner on Jul 24, 2024. It is now read-only.

integration_test: fix br_incremental_index test not stable (#468) #658

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 0 additions & 29 deletions pkg/backup/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -985,35 +985,6 @@ func CollectChecksums(backupMeta *kvproto.BackupMeta) ([]Checksum, error) {
return checksums, nil
}

// FilterSchema filter in-place schemas that doesn't have backup files
// this is useful during incremental backup, no files in backup means no files to restore
// so we can skip some DDL in restore to speed up restoration.
func FilterSchema(backupMeta *kvproto.BackupMeta) error {
dbs, err := utils.LoadBackupTables(backupMeta)
if err != nil {
return errors.Trace(err)
}
schemas := make([]*kvproto.Schema, 0, len(backupMeta.Schemas))
for _, schema := range backupMeta.Schemas {
dbInfo := &model.DBInfo{}
err := json.Unmarshal(schema.Db, dbInfo)
if err != nil {
return errors.Trace(err)
}
tblInfo := &model.TableInfo{}
err = json.Unmarshal(schema.Table, tblInfo)
if err != nil {
return errors.Trace(err)
}
tbl := dbs[dbInfo.Name.String()].GetTable(tblInfo.Name.String())
if len(tbl.Files) > 0 {
schemas = append(schemas, schema)
}
}
backupMeta.Schemas = schemas
return nil
}

// isRetryableError represents whether we should retry reset grpc connection.
func isRetryableError(err error) bool {
return status.Code(err) == codes.Unavailable || status.Code(err) == codes.Canceled
Expand Down
4 changes: 0 additions & 4 deletions pkg/task/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -342,10 +342,6 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig
if isIncrementalBackup {
// Since we don't support checksum for incremental data, fast checksum should be skipped.
log.Info("Skip fast checksum in incremental backup")
err = backup.FilterSchema(&backupMeta)
if err != nil {
return errors.Trace(err)
}
} else {
// When user specified not to calculate checksum, don't calculate checksum.
log.Info("Skip fast checksum because user requirement.")
Expand Down
2 changes: 1 addition & 1 deletion tests/br_incremental_index/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ done

# full backup
echo "backup full start..."
run_sql "CREATE INDEX idx_c1 ON ${DB}.${TABLE}(c1)" &
run_sql "CREATE INDEX idx_c1 ON ${DB}.${TABLE}(c1)"
run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/full" --ratelimit 5 --concurrency 4
wait
# run ddls
Expand Down