From 6b72ec460d64cba43287857dfb742748b73a4f6c Mon Sep 17 00:00:00 2001 From: Jianjun Liao <36503113+Leavrth@users.noreply.github.com> Date: Wed, 13 Nov 2024 08:32:11 +0800 Subject: [PATCH] br: fix debug decode backupmeta (#56627) close pingcap/tidb#56296 --- br/cmd/br/debug.go | 18 ++- br/pkg/metautil/BUILD.bazel | 5 +- br/pkg/metautil/debug.go | 134 ++++++++++++++++++ br/pkg/metautil/debug_test.go | 194 ++++++++++++++++++++++++++ br/pkg/metautil/metafile.go | 2 +- br/pkg/utils/BUILD.bazel | 2 +- br/pkg/utils/json.go | 174 +++++++++++++++++++++++ br/pkg/utils/json_test.go | 253 +++++++++++++++++++++++++++++++++- br/tests/br_debug_meta/run.sh | 29 ++-- 9 files changed, 798 insertions(+), 13 deletions(-) create mode 100644 br/pkg/metautil/debug.go create mode 100644 br/pkg/metautil/debug_test.go diff --git a/br/cmd/br/debug.go b/br/cmd/br/debug.go index 7dd600d025783..a858bfb00dee6 100644 --- a/br/cmd/br/debug.go +++ b/br/cmd/br/debug.go @@ -285,6 +285,19 @@ func decodeBackupMetaCommand() *cobra.Command { fieldName, _ := cmd.Flags().GetString("field") if fieldName == "" { + if err := metautil.DecodeMetaFile(ctx, s, &cfg.CipherInfo, backupMeta.FileIndex); err != nil { + return errors.Trace(err) + } + if err := metautil.DecodeMetaFile(ctx, s, &cfg.CipherInfo, backupMeta.RawRangeIndex); err != nil { + return errors.Trace(err) + } + if err := metautil.DecodeMetaFile(ctx, s, &cfg.CipherInfo, backupMeta.SchemaIndex); err != nil { + return errors.Trace(err) + } + if err := metautil.DecodeStatsFile(ctx, s, &cfg.CipherInfo, backupMeta.Schemas); err != nil { + return errors.Trace(err) + } + // No field flag, write backupmeta to external storage in JSON format. backupMetaJSON, err := utils.MarshalBackupMeta(backupMeta) if err != nil { @@ -294,7 +307,7 @@ func decodeBackupMetaCommand() *cobra.Command { if err != nil { return errors.Trace(err) } - cmd.Printf("backupmeta decoded at %s\n", path.Join(cfg.Storage, metautil.MetaJSONFile)) + cmd.Printf("backupmeta decoded at %s\n", path.Join(s.URI(), metautil.MetaJSONFile)) return nil } @@ -353,6 +366,9 @@ func encodeBackupMetaCommand() *cobra.Command { if err != nil { return errors.Trace(err) } + if backupMetaJSON.Version == metautil.MetaV2 { + return errors.Errorf("encoding backupmeta v2 is unimplemented") + } backupMeta, err := proto.Marshal(backupMetaJSON) if err != nil { return errors.Trace(err) diff --git a/br/pkg/metautil/BUILD.bazel b/br/pkg/metautil/BUILD.bazel index a7008e6283859..13e6dc7306a43 100644 --- a/br/pkg/metautil/BUILD.bazel +++ b/br/pkg/metautil/BUILD.bazel @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "metautil", srcs = [ + "debug.go", "load.go", "metafile.go", "statsfile.go", @@ -38,6 +39,7 @@ go_test( name = "metautil_test", timeout = "short", srcs = [ + "debug_test.go", "load_test.go", "main_test.go", "metafile_test.go", @@ -45,7 +47,7 @@ go_test( ], embed = [":metautil"], flaky = True, - shard_count = 9, + shard_count = 10, deps = [ "//br/pkg/storage", "//br/pkg/utils", @@ -56,6 +58,7 @@ go_test( "//pkg/tablecodec", "//pkg/testkit/testsetup", "//pkg/util", + "@com_github_gogo_protobuf//proto", "@com_github_golang_protobuf//proto", "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_pingcap_kvproto//pkg/encryptionpb", diff --git a/br/pkg/metautil/debug.go b/br/pkg/metautil/debug.go new file mode 100644 index 0000000000000..1b1fc35bdd2c4 --- /dev/null +++ b/br/pkg/metautil/debug.go @@ -0,0 +1,134 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metautil + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap/errors" + backuppb "github.com/pingcap/kvproto/pkg/brpb" + berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/br/pkg/utils" + tidbutil "github.com/pingcap/tidb/pkg/util" + "golang.org/x/sync/errgroup" +) + +const ( + // JSONFileFormat represents json file name format + JSONFileFormat = "jsons/%s.json" +) + +// DecodeStatsFile decodes the stats file to json format, it is called by br debug +func DecodeStatsFile( + ctx context.Context, + s storage.ExternalStorage, + cipher *backuppb.CipherInfo, + schemas []*backuppb.Schema, +) error { + for _, schema := range schemas { + for _, statsIndex := range schema.StatsIndex { + if len(statsIndex.Name) == 0 { + continue + } + content, err := s.ReadFile(ctx, statsIndex.Name) + if err != nil { + return errors.Trace(err) + } + decryptContent, err := utils.Decrypt(content, cipher, statsIndex.CipherIv) + if err != nil { + return errors.Trace(err) + } + checksum := sha256.Sum256(decryptContent) + if !bytes.Equal(statsIndex.Sha256, checksum[:]) { + return berrors.ErrInvalidMetaFile.GenWithStackByArgs(fmt.Sprintf( + "checksum mismatch expect %x, got %x", statsIndex.Sha256, checksum[:])) + } + statsFileBlocks := &backuppb.StatsFile{} + if err := proto.Unmarshal(decryptContent, statsFileBlocks); err != nil { + return errors.Trace(err) + } + jsonContent, err := utils.MarshalStatsFile(statsFileBlocks) + if err != nil { + return errors.Trace(err) + } + if err := s.WriteFile(ctx, fmt.Sprintf(JSONFileFormat, statsIndex.Name), jsonContent); err != nil { + return errors.Trace(err) + } + } + } + return nil +} + +// DecodeMetaFile decodes the meta file to json format, it is called by br debug +func DecodeMetaFile( + ctx context.Context, + s storage.ExternalStorage, + cipher *backuppb.CipherInfo, + metaIndex *backuppb.MetaFile, +) error { + if metaIndex == nil { + return nil + } + eg, ectx := errgroup.WithContext(ctx) + workers := tidbutil.NewWorkerPool(8, "download files workers") + for _, node := range metaIndex.MetaFiles { + workers.ApplyOnErrorGroup(eg, func() error { + content, err := s.ReadFile(ectx, node.Name) + if err != nil { + return errors.Trace(err) + } + + decryptContent, err := utils.Decrypt(content, cipher, node.CipherIv) + if err != nil { + return errors.Trace(err) + } + + checksum := sha256.Sum256(decryptContent) + if !bytes.Equal(node.Sha256, checksum[:]) { + return berrors.ErrInvalidMetaFile.GenWithStackByArgs(fmt.Sprintf( + "checksum mismatch expect %x, got %x", node.Sha256, checksum[:])) + } + + child := &backuppb.MetaFile{} + if err = proto.Unmarshal(decryptContent, child); err != nil { + return errors.Trace(err) + } + + // the max depth of the root metafile is only 1. + // ASSERT: len(child.MetaFiles) == 0 + if len(child.MetaFiles) > 0 { + return errors.Errorf("the metafile has unexpected level: %v", child) + } + + jsonContent, err := utils.MarshalMetaFile(child) + if err != nil { + return errors.Trace(err) + } + + if err := s.WriteFile(ctx, fmt.Sprintf(JSONFileFormat, node.Name), jsonContent); err != nil { + return errors.Trace(err) + } + + err = DecodeStatsFile(ctx, s, cipher, child.Schemas) + return errors.Trace(err) + }) + } + return eg.Wait() +} diff --git a/br/pkg/metautil/debug_test.go b/br/pkg/metautil/debug_test.go new file mode 100644 index 0000000000000..3a51449eb4c18 --- /dev/null +++ b/br/pkg/metautil/debug_test.go @@ -0,0 +1,194 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metautil_test + +import ( + "context" + "crypto/sha256" + "fmt" + "math/rand" + "testing" + + "github.com/gogo/protobuf/proto" + backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/tidb/br/pkg/metautil" + "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/br/pkg/utils" + "github.com/stretchr/testify/require" +) + +func flushMetaFile( + ctx context.Context, + t *testing.T, + fname string, + metaFile *backuppb.MetaFile, + storage storage.ExternalStorage, + cipher *backuppb.CipherInfo, +) *backuppb.File { + content, err := metaFile.Marshal() + require.NoError(t, err) + + encyptedContent, iv, err := metautil.Encrypt(content, cipher) + require.NoError(t, err) + + err = storage.WriteFile(ctx, fname, encyptedContent) + require.NoError(t, err) + + checksum := sha256.Sum256(content) + file := &backuppb.File{ + Name: fname, + Sha256: checksum[:], + Size_: uint64(len(content)), + CipherIv: iv, + } + + return file +} + +func flushStatsFile( + ctx context.Context, + t *testing.T, + fname string, + statsFile *backuppb.StatsFile, + storage storage.ExternalStorage, + cipher *backuppb.CipherInfo, +) *backuppb.StatsFileIndex { + content, err := proto.Marshal(statsFile) + require.NoError(t, err) + + checksum := sha256.Sum256(content) + sizeOri := uint64(len(content)) + encryptedContent, iv, err := metautil.Encrypt(content, cipher) + require.NoError(t, err) + + err = storage.WriteFile(ctx, fname, encryptedContent) + require.NoError(t, err) + + return &backuppb.StatsFileIndex{ + Name: fname, + Sha256: checksum[:], + SizeEnc: uint64(len(encryptedContent)), + SizeOri: sizeOri, + CipherIv: iv, + InlineData: []byte(fmt.Sprintf("%d", rand.Int())), + } +} + +func TestDecodeMetaFile(t *testing.T) { + ctx := context.Background() + base := t.TempDir() + s, err := storage.NewLocalStorage(base) + require.NoError(t, err) + cipher := &backuppb.CipherInfo{CipherType: 1} + file1 := flushMetaFile(ctx, t, "data", &backuppb.MetaFile{ + DataFiles: []*backuppb.File{ + { + Name: "1.sst", + Sha256: []byte("1.sst"), + StartKey: []byte("start"), + EndKey: []byte("end"), + EndVersion: 1, + Crc64Xor: 1, + TotalKvs: 2, + TotalBytes: 3, + Cf: "write", + CipherIv: []byte("1.sst"), + }, + }, + }, s, cipher) + stats := flushStatsFile(ctx, t, "stats", &backuppb.StatsFile{Blocks: []*backuppb.StatsBlock{ + { + PhysicalId: 1, + JsonTable: []byte("1"), + }, + { + PhysicalId: 2, + JsonTable: []byte("2"), + }, + }}, s, cipher) + metaFile2 := &backuppb.MetaFile{ + Schemas: []*backuppb.Schema{ + { + Db: []byte(`{"db_name":{"L":"test","O":"test"},"id":1,"state":5}`), + Table: []byte(`{"id":2,"state":5}`), + Crc64Xor: 1, + TotalKvs: 2, + TotalBytes: 3, + TiflashReplicas: 4, + Stats: []byte(`{"a":1}`), + StatsIndex: []*backuppb.StatsFileIndex{stats}, + }, + }, + } + file2 := flushMetaFile(ctx, t, "schema", metaFile2, s, cipher) + + { + err = metautil.DecodeMetaFile(ctx, s, cipher, &backuppb.MetaFile{MetaFiles: []*backuppb.File{file1}}) + require.NoError(t, err) + content, err := s.ReadFile(ctx, "jsons/data.json") + require.NoError(t, err) + metaFile, err := utils.UnmarshalMetaFile(content) + require.NoError(t, err) + require.Equal(t, 1, len(metaFile.DataFiles)) + require.Equal(t, "1.sst", metaFile.DataFiles[0].Name) + require.Equal(t, []byte("1.sst"), metaFile.DataFiles[0].Sha256) + require.Equal(t, []byte("start"), metaFile.DataFiles[0].StartKey) + require.Equal(t, []byte("end"), metaFile.DataFiles[0].EndKey) + require.Equal(t, uint64(1), metaFile.DataFiles[0].EndVersion) + require.Equal(t, uint64(1), metaFile.DataFiles[0].Crc64Xor) + require.Equal(t, uint64(2), metaFile.DataFiles[0].TotalKvs) + require.Equal(t, uint64(3), metaFile.DataFiles[0].TotalBytes) + require.Equal(t, "write", metaFile.DataFiles[0].Cf) + require.Equal(t, []byte("1.sst"), metaFile.DataFiles[0].CipherIv) + } + + { + err = metautil.DecodeMetaFile(ctx, s, cipher, &backuppb.MetaFile{MetaFiles: []*backuppb.File{file2}}) + require.NoError(t, err) + { + content, err := s.ReadFile(ctx, "jsons/schema.json") + require.NoError(t, err) + metaFile, err := utils.UnmarshalMetaFile(content) + require.NoError(t, err) + require.Equal(t, 1, len(metaFile.Schemas)) + require.Equal(t, metaFile2.Schemas[0].Db, metaFile.Schemas[0].Db) + require.Equal(t, metaFile2.Schemas[0].Table, metaFile.Schemas[0].Table) + require.Equal(t, uint64(1), metaFile.Schemas[0].Crc64Xor) + require.Equal(t, uint64(2), metaFile.Schemas[0].TotalKvs) + require.Equal(t, uint64(3), metaFile.Schemas[0].TotalBytes) + require.Equal(t, uint32(4), metaFile.Schemas[0].TiflashReplicas) + require.Equal(t, metaFile2.Schemas[0].Stats, metaFile.Schemas[0].Stats) + statsIndex := metaFile.Schemas[0].StatsIndex + require.Equal(t, 1, len(statsIndex)) + require.Equal(t, stats.Name, statsIndex[0].Name) + require.Equal(t, stats.Sha256, statsIndex[0].Sha256) + require.Equal(t, stats.SizeEnc, statsIndex[0].SizeEnc) + require.Equal(t, stats.SizeOri, statsIndex[0].SizeOri) + require.Equal(t, stats.CipherIv, statsIndex[0].CipherIv) + require.Equal(t, stats.InlineData, statsIndex[0].InlineData) + } + { + content, err := s.ReadFile(ctx, "jsons/stats.json") + require.NoError(t, err) + statsFileBlocks, err := utils.UnmarshalStatsFile(content) + require.NoError(t, err) + require.Equal(t, 2, len(statsFileBlocks.Blocks)) + require.Equal(t, int64(1), statsFileBlocks.Blocks[0].PhysicalId) + require.Equal(t, []byte("1"), statsFileBlocks.Blocks[0].JsonTable) + require.Equal(t, int64(2), statsFileBlocks.Blocks[1].PhysicalId) + require.Equal(t, []byte("2"), statsFileBlocks.Blocks[1].JsonTable) + } + } +} diff --git a/br/pkg/metautil/metafile.go b/br/pkg/metautil/metafile.go index 7b146f380243b..c557ee063debb 100644 --- a/br/pkg/metautil/metafile.go +++ b/br/pkg/metautil/metafile.go @@ -39,7 +39,7 @@ const ( // MetaFile represents file name MetaFile = "backupmeta" // MetaJSONFile represents backup meta json file name - MetaJSONFile = "backupmeta.json" + MetaJSONFile = "jsons/backupmeta.json" // MaxBatchSize represents the internal channel buffer size of MetaWriter and MetaReader. MaxBatchSize = 1024 diff --git a/br/pkg/utils/BUILD.bazel b/br/pkg/utils/BUILD.bazel index fa18a8317b234..0c1de4964dd96 100644 --- a/br/pkg/utils/BUILD.bazel +++ b/br/pkg/utils/BUILD.bazel @@ -85,7 +85,7 @@ go_test( ], embed = [":utils"], flaky = True, - shard_count = 32, + shard_count = 34, deps = [ "//br/pkg/errors", "//pkg/kv", diff --git a/br/pkg/utils/json.go b/br/pkg/utils/json.go index 9f8462dd3a4d9..5f77cc0bf4596 100644 --- a/br/pkg/utils/json.go +++ b/br/pkg/utils/json.go @@ -28,6 +28,38 @@ func UnmarshalBackupMeta(data []byte) (*backuppb.BackupMeta, error) { return fromJSONBackupMeta(jMeta) } +func MarshalMetaFile(meta *backuppb.MetaFile) ([]byte, error) { + result, err := makeJSONMetaFile(meta) + if err != nil { + return nil, errors.Trace(err) + } + return json.Marshal(result) +} + +func UnmarshalMetaFile(data []byte) (*backuppb.MetaFile, error) { + jMeta := &jsonMetaFile{} + if err := json.Unmarshal(data, jMeta); err != nil { + return nil, errors.Trace(err) + } + return fromJSONMetaFile(jMeta) +} + +func MarshalStatsFile(meta *backuppb.StatsFile) ([]byte, error) { + result, err := makeJSONStatsFile(meta) + if err != nil { + return nil, errors.Trace(err) + } + return json.Marshal(result) +} + +func UnmarshalStatsFile(data []byte) (*backuppb.StatsFile, error) { + jMeta := &jsonStatsFile{} + if err := json.Unmarshal(data, jMeta); err != nil { + return nil, errors.Trace(err) + } + return fromJSONStatsFile(jMeta) +} + type jsonValue any type jsonFile struct { @@ -95,6 +127,7 @@ func fromJSONRawRange(rng *jsonRawRange) (*backuppb.RawRange, error) { type jsonSchema struct { Table jsonValue `json:"table,omitempty"` DB jsonValue `json:"db,omitempty"` + Stats jsonValue `json:"stats,omitempty"` *backuppb.Schema } @@ -109,6 +142,12 @@ func makeJSONSchema(schema *backuppb.Schema) (*jsonSchema, error) { return nil, errors.Trace(err) } } + + if schema.Stats != nil { + if err := json.Unmarshal(schema.Stats, &result.Stats); err != nil { + return nil, errors.Trace(err) + } + } return result, nil } @@ -129,6 +168,12 @@ func fromJSONSchema(jSchema *jsonSchema) (*backuppb.Schema, error) { return nil, errors.Trace(err) } } + if jSchema.Stats != nil { + schema.Stats, err = json.Marshal(jSchema.Stats) + if err != nil { + return nil, errors.Trace(err) + } + } return schema, nil } @@ -195,3 +240,132 @@ func fromJSONBackupMeta(jMeta *jsonBackupMeta) (*backuppb.BackupMeta, error) { } return meta, nil } + +type jsonMetaFile struct { + DataFiles []*jsonFile `json:"data_files,omitempty"` + Schemas []*jsonSchema `json:"schemas,omitempty"` + RawRanges []*jsonRawRange `json:"raw_ranges,omitempty"` + DDLs []jsonValue `json:"ddls,omitempty"` + + *backuppb.MetaFile +} + +func makeJSONMetaFile(meta *backuppb.MetaFile) (*jsonMetaFile, error) { + result := &jsonMetaFile{ + MetaFile: meta, + } + for _, file := range meta.DataFiles { + result.DataFiles = append(result.DataFiles, makeJSONFile(file)) + } + for _, rawRange := range meta.RawRanges { + result.RawRanges = append(result.RawRanges, makeJSONRawRange(rawRange)) + } + for _, schema := range meta.Schemas { + s, err := makeJSONSchema(schema) + if err != nil { + return nil, errors.Trace(err) + } + result.Schemas = append(result.Schemas, s) + } + for _, ddl := range meta.Ddls { + var d jsonValue + if err := json.Unmarshal(ddl, &d); err != nil { + return nil, errors.Trace(err) + } + result.DDLs = append(result.DDLs, d) + } + return result, nil +} + +func fromJSONMetaFile(jMeta *jsonMetaFile) (*backuppb.MetaFile, error) { + meta := jMeta.MetaFile + if meta == nil { + meta = &backuppb.MetaFile{} + } + + for _, schema := range jMeta.Schemas { + s, err := fromJSONSchema(schema) + if err != nil { + return nil, errors.Trace(err) + } + meta.Schemas = append(meta.Schemas, s) + } + for _, file := range jMeta.DataFiles { + f, err := fromJSONFile(file) + if err != nil { + return nil, errors.Trace(err) + } + meta.DataFiles = append(meta.DataFiles, f) + } + for _, rawRange := range jMeta.RawRanges { + rng, err := fromJSONRawRange(rawRange) + if err != nil { + return nil, errors.Trace(err) + } + meta.RawRanges = append(meta.RawRanges, rng) + } + for _, ddl := range jMeta.DDLs { + d, err := json.Marshal(ddl) + if err != nil { + return nil, errors.Trace(err) + } + meta.Ddls = append(meta.Ddls, d) + } + return meta, nil +} + +type jsonStatsBlock struct { + JSONTable jsonValue `json:"json_table,omitempty"` + + *backuppb.StatsBlock +} + +func makeJSONStatsBlock(statsBlock *backuppb.StatsBlock) (*jsonStatsBlock, error) { + result := &jsonStatsBlock{ + StatsBlock: statsBlock, + } + if err := json.Unmarshal(statsBlock.JsonTable, &result.JSONTable); err != nil { + return nil, errors.Trace(err) + } + return result, nil +} + +func fromJSONStatsBlock(jMeta *jsonStatsBlock) (*backuppb.StatsBlock, error) { + meta := jMeta.StatsBlock + + var err error + meta.JsonTable, err = json.Marshal(jMeta.JSONTable) + if err != nil { + return nil, errors.Trace(err) + } + return meta, nil +} + +type jsonStatsFile struct { + Blocks []*jsonStatsBlock `json:"blocks,omitempty"` +} + +func makeJSONStatsFile(statsFile *backuppb.StatsFile) (*jsonStatsFile, error) { + result := &jsonStatsFile{} + for _, block := range statsFile.Blocks { + b, err := makeJSONStatsBlock(block) + if err != nil { + return nil, errors.Trace(err) + } + result.Blocks = append(result.Blocks, b) + } + return result, nil +} + +func fromJSONStatsFile(jMeta *jsonStatsFile) (*backuppb.StatsFile, error) { + meta := &backuppb.StatsFile{} + + for _, schema := range jMeta.Blocks { + b, err := fromJSONStatsBlock(schema) + if err != nil { + return nil, errors.Trace(err) + } + meta.Blocks = append(meta.Blocks, b) + } + return meta, nil +} diff --git a/br/pkg/utils/json_test.go b/br/pkg/utils/json_test.go index 3f03f287d92f1..1387d71697e8c 100644 --- a/br/pkg/utils/json_test.go +++ b/br/pkg/utils/json_test.go @@ -244,7 +244,7 @@ var testMetaJSONs = [][]byte{ }`), } -func TestEncodeAndDecode(t *testing.T) { +func TestEncodeAndDecodeForBackupMeta(t *testing.T) { for _, testMetaJSON := range testMetaJSONs { meta, err := UnmarshalBackupMeta(testMetaJSON) require.NoError(t, err) @@ -253,3 +253,254 @@ func TestEncodeAndDecode(t *testing.T) { require.JSONEq(t, string(testMetaJSON), string(metaJSON)) } } + +var testMetaFileJSONs = [][]byte{ + []byte(`{ + "data_files": [ + { + "sha256": "aa5cefba077644dbb2aa1d7fae2a0f879b56411195ad62d18caaf4ec76fae48f", + "start_key": "7480000000000000365f720000000000000000", + "end_key": "7480000000000000365f72ffffffffffffffff00", + "name": "1_2_29_6e97c3b17c657c4413724f614a619f5b665b990187b159e7d2b92552076144b6_1617351201040_write.sst", + "end_version": 423978913229963260, + "crc64xor": 8093018294706077000, + "total_kvs": 1, + "total_bytes": 27, + "cf": "write", + "size": 1423 + } + ], + "schemas": [ + { + "table": { + "Lock": null, + "ShardRowIDBits": 0, + "auto_id_cache": 0, + "auto_inc_id": 0, + "auto_rand_id": 0, + "auto_random_bits": 0, + "charset": "utf8mb4", + "collate": "utf8mb4_bin", + "cols": [ + { + "change_state_info": null, + "comment": "", + "default": null, + "default_bit": null, + "default_is_expr": false, + "dependences": null, + "generated_expr_string": "", + "generated_stored": false, + "hidden": false, + "id": 1, + "name": { + "L": "pk", + "O": "pk" + }, + "offset": 0, + "origin_default": null, + "origin_default_bit": null, + "state": 5, + "type": { + "charset": "utf8mb4", + "collate": "utf8mb4_bin", + "decimal": 0, + "elems": null, + "flag": 4099, + "flen": 256, + "tp": 15 + }, + "version": 2 + } + ], + "comment": "", + "common_handle_version": 1, + "compression": "", + "constraint_info": null, + "fk_info": null, + "id": 54, + "index_info": [ + { + "comment": "", + "id": 1, + "idx_cols": [ + { + "length": -1, + "name": { + "L": "pk", + "O": "pk" + }, + "offset": 0 + } + ], + "idx_name": { + "L": "primary", + "O": "PRIMARY" + }, + "index_type": 1, + "is_global": false, + "is_invisible": false, + "is_primary": true, + "is_unique": true, + "state": 5, + "tbl_name": { + "L": "", + "O": "" + } + } + ], + "is_columnar": false, + "is_common_handle": true, + "max_col_id": 1, + "max_cst_id": 0, + "max_idx_id": 1, + "max_shard_row_id_bits": 0, + "name": { + "L": "test", + "O": "test" + }, + "partition": null, + "pk_is_handle": false, + "pre_split_regions": 0, + "sequence": null, + "state": 5, + "tiflash_replica": null, + "update_timestamp": 423978913176223740, + "version": 4, + "view": null + }, + "db": { + "charset": "utf8mb4", + "collate": "utf8mb4_bin", + "db_name": { + "L": "test", + "O": "test" + }, + "id": 1, + "state": 5 + }, + "crc64xor": 8093018294706077000, + "total_kvs": 1, + "total_bytes": 27 + } + ], + "ddls": ["ddl1","ddl2"], + "backup_ranges": [{"start_key":"MTIz"}] +} +`), + []byte(`{ + "data_files": [ + { + "sha256": "5759c4c73789d6ecbd771b374d42e72a309245d31911efc8553423303c95f22c", + "end_key": "7480000000000000ff0500000000000000f8", + "name": "1_4_2_default.sst", + "total_kvs": 153, + "total_bytes": 824218, + "cf": "default", + "size": 44931 + }, + { + "sha256": "87597535ce0edbc9a9ef124777ad1d23388467e60c0409309ad33af505c1ea5b", + "start_key": "7480000000000000ff0f00000000000000f8", + "end_key": "7480000000000000ff1100000000000000f8", + "name": "1_16_8_58be9b5dfa92efb6a7de2127c196e03c5ddc3dd8ff3a9b3e7cd4c4aa7c969747_1617689203876_default.sst", + "total_kvs": 1, + "total_bytes": 396, + "cf": "default", + "size": 1350 + }, + { + "sha256": "97bd1b07f9cc218df089c70d454e23c694113fae63a226ae0433165a9c3d75d9", + "start_key": "7480000000000000ff1700000000000000f8", + "end_key": "7480000000000000ff1900000000000000f8", + "name": "1_24_12_72fa67937dd58d654197abadeb9e633d92ebccc5fd993a8e54819a1bd7f81a8c_1617689203853_default.sst", + "total_kvs": 35, + "total_bytes": 761167, + "cf": "default", + "size": 244471 + }, + { + "sha256": "6dcb6ba2ff11f4e7db349effc98210ba372bebbf2470e6cd600ed5f2294330e7", + "start_key": "7480000000000000ff3100000000000000f8", + "end_key": "7480000000000000ff3300000000000000f8", + "name": "1_50_25_2f1abd76c185ec355039f5b4a64f04637af91f80e6cb05099601ec6b9b1910e8_1617689203867_default.sst", + "total_kvs": 22, + "total_bytes": 1438283, + "cf": "default", + "size": 1284851 + }, + { + "sha256": "ba603af7ecb2e21c8f145d995ae85eea3625480cd8186d4cffb53ab1974d8679", + "start_key": "7480000000000000ff385f72ffffffffffffffffff0000000000fb", + "name": "1_2_33_07b745c3d5a614ed6cc1cf21723b161fcb3e8e7d537546839afd82a4f392988c_1617689203895_default.sst", + "total_kvs": 260000, + "total_bytes": 114425025, + "cf": "default", + "size": 66048845 + } + ], + "raw_ranges": [ + { + "cf": "default" + } + ], + "backup_ranges": [{"start_key":"MTIz"}] +}`), + []byte(`{ + "data_files": [ + { + "sha256": "3ae857ef9b379d498ae913434f1d47c3e90a55f3a4cd9074950bfbd163d5e5fc", + "start_key": "7480000000000000115f720000000000000000", + "end_key": "7480000000000000115f72ffffffffffffffff00", + "name": "1_20_9_36adb8cedcd7af34708edff520499e712e2cfdcb202f5707dc9305a031d55a98_1675066275424_write.sst", + "end_version": 439108573623222300, + "crc64xor": 16261462091570213000, + "total_kvs": 15, + "total_bytes": 1679, + "cf": "write", + "size": 2514, + "cipher_iv": "56MTbxA4CaNILpirKnBxUw==" + } + ], + "schemas": [ + { + "db": { + "charset": "utf8mb4", + "collate": "utf8mb4_bin", + "db_name": { + "L": "test", + "O": "test" + }, + "id": 1, + "policy_ref_info": null, + "state": 5 + } + } + ], + "backup_ranges": [{"start_key":"MTIz"}] + }`), +} + +func TestEncodeAndDecodeForMetaFile(t *testing.T) { + for _, testMetaFileJSON := range testMetaFileJSONs { + meta, err := UnmarshalMetaFile(testMetaFileJSON) + require.NoError(t, err) + metaJSON, err := MarshalMetaFile(meta) + require.NoError(t, err) + require.JSONEq(t, string(testMetaFileJSON), string(metaJSON)) + } +} + +var testStatsFileJSONs = [][]byte{ + []byte(`{"blocks":[{"json_table":{"a":1},"physical_id":123},{"json_table":{"a":2},"physical_id":456}]}`), +} + +func TestEncodeAndDecodeForStatsFile(t *testing.T) { + for _, testStatsFileJSON := range testStatsFileJSONs { + meta, err := UnmarshalStatsFile(testStatsFileJSON) + require.NoError(t, err) + statsJSON, err := MarshalStatsFile(meta) + require.NoError(t, err) + require.JSONEq(t, string(testStatsFileJSON), string(statsJSON)) + } +} diff --git a/br/tests/br_debug_meta/run.sh b/br/tests/br_debug_meta/run.sh index 9fc05b12cbaf3..4e93a54b1d709 100644 --- a/br/tests/br_debug_meta/run.sh +++ b/br/tests/br_debug_meta/run.sh @@ -32,36 +32,49 @@ run_sql "$table_region_sql" row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') -# backup table +# backup table with backupmetav2 echo "backup start..." run_br --pd $PD_ADDR backup table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB" -run_sql "DROP DATABASE $DB;" - # Test validate decode run_br validate decode -s "local://$TEST_DIR/$DB" # should generate backupmeta.json -if [ ! -f "$TEST_DIR/$DB/backupmeta.json" ]; then +if [ ! -f "$TEST_DIR/$DB/jsons/backupmeta.json" ]; then + echo "TEST: [$TEST_NAME] decode failed!" + exit 1 +fi + +# backup table with backupmetav1 +echo "backup start..." +run_br --pd $PD_ADDR backup table --db $DB --table $TABLE -s "local://$TEST_DIR/${DB}_2" --use-backupmeta-v2=false + + +# Test validate decode +run_br validate decode -s "local://$TEST_DIR/${DB}_2" + +# should generate backupmeta.json +if [ ! -f "$TEST_DIR/${DB}_2/jsons/backupmeta.json" ]; then echo "TEST: [$TEST_NAME] decode failed!" exit 1 fi # Test validate encode -run_br validate encode -s "local://$TEST_DIR/$DB" +run_br validate encode -s "local://$TEST_DIR/${DB}_2" # should generate backupmeta_from_json -if [ ! -f "$TEST_DIR/$DB/backupmeta_from_json" ]; then +if [ ! -f "$TEST_DIR/${DB}_2/backupmeta_from_json" ]; then echo "TEST: [$TEST_NAME] encode failed!" exit 1 fi # replace backupmeta -mv "$TEST_DIR/$DB/backupmeta_from_json" "$TEST_DIR/$DB/backupmeta" +mv "$TEST_DIR/${DB}_2/backupmeta_from_json" "$TEST_DIR/${DB}_2/backupmeta" # restore table echo "restore start..." -run_br --pd $PD_ADDR restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB" +run_sql "DROP DATABASE $DB;" +run_br --pd $PD_ADDR restore table --db $DB --table $TABLE -s "local://$TEST_DIR/${DB}_2" row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}')