-
Notifications
You must be signed in to change notification settings - Fork 287
/
path.go
478 lines (426 loc) · 13.7 KB
/
path.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package cloudstorage
import (
"context"
"fmt"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/pingcap/log"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tiflow/cdc/model"
"github.com/pingcap/tiflow/engine/pkg/clock"
"github.com/pingcap/tiflow/pkg/config"
"github.com/pingcap/tiflow/pkg/errors"
"github.com/pingcap/tiflow/pkg/hash"
"github.com/pingcap/tiflow/pkg/util"
"github.com/tikv/client-go/v2/oracle"
"go.uber.org/zap"
)
const (
// 3 is the length of "CDC", and the file number contains
// at least 6 digits (e.g. CDC000001.csv).
minFileNamePrefixLen = 3 + config.MinFileIndexWidth
defaultIndexFileName = "meta/CDC.index"
// The following constants are used to generate file paths.
schemaFileNameFormat = "schema_%d_%010d.json"
// The database schema is stored in the following path:
// <schema>/meta/schema_{tableVersion}_{checksum}.json
dbSchemaPrefix = "%s/meta/"
// The table schema is stored in the following path:
// <schema>/<table>/meta/schema_{tableVersion}_{checksum}.json
tableSchemaPrefix = "%s/%s/meta/"
)
var schemaRE = regexp.MustCompile(`meta/schema_\d+_\d{10}\.json$`)
// IsSchemaFile checks whether the file is a schema file.
func IsSchemaFile(path string) bool {
return schemaRE.MatchString(path)
}
// mustParseSchemaName parses the version from the schema file name.
func mustParseSchemaName(path string) (uint64, uint32) {
reportErr := func(err error) {
log.Panic("failed to parse schema file name",
zap.String("schemaPath", path),
zap.Any("error", err))
}
// For <schema>/<table>/meta/schema_{tableVersion}_{checksum}.json, the parts
// should be ["<schema>/<table>/meta/schema", "{tableVersion}", "{checksum}.json"].
parts := strings.Split(path, "_")
if len(parts) < 3 {
reportErr(errors.New("invalid path format"))
}
checksum := strings.TrimSuffix(parts[len(parts)-1], ".json")
tableChecksum, err := strconv.ParseUint(checksum, 10, 64)
if err != nil {
reportErr(err)
}
version := parts[len(parts)-2]
tableVersion, err := strconv.ParseUint(version, 10, 64)
if err != nil {
reportErr(err)
}
return tableVersion, uint32(tableChecksum)
}
func generateSchemaFilePath(
schema, table string, tableVersion uint64, checksum uint32,
) string {
if schema == "" || tableVersion == 0 {
log.Panic("invalid schema or tableVersion",
zap.String("schema", schema), zap.Uint64("tableVersion", tableVersion))
}
var dir string
if table == "" {
// Generate db schema file path.
dir = fmt.Sprintf(dbSchemaPrefix, schema)
} else {
// Generate table schema file path.
dir = fmt.Sprintf(tableSchemaPrefix, schema, table)
}
name := fmt.Sprintf(schemaFileNameFormat, tableVersion, checksum)
return path.Join(dir, name)
}
func generateDataFileName(index uint64, extension string, fileIndexWidth int) string {
indexFmt := "%0" + strconv.Itoa(fileIndexWidth) + "d"
return fmt.Sprintf("CDC"+indexFmt+"%s", index, extension)
}
type indexWithDate struct {
index uint64
currDate, prevDate string
}
// VersionedTableName is used to wrap TableNameWithPhysicTableID with a version.
type VersionedTableName struct {
// Because we need to generate different file paths for different
// tables, we need to use the physical table ID instead of the
// logical table ID.(Especially when the table is a partitioned table).
TableNameWithPhysicTableID model.TableName
// TableInfoVersion is consistent with the version of TableInfo recorded in
// schema storage. It can either be finished ts of a DDL event,
// or be the checkpoint ts when processor is restarted.
TableInfoVersion uint64
}
// FilePathGenerator is used to generate data file path and index file path.
type FilePathGenerator struct {
extension string
config *Config
clock clock.Clock
storage storage.ExternalStorage
fileIndex map[VersionedTableName]*indexWithDate
hasher *hash.PositionInertia
versionMap map[VersionedTableName]uint64
}
// NewFilePathGenerator creates a FilePathGenerator.
func NewFilePathGenerator(
config *Config,
storage storage.ExternalStorage,
extension string,
clock clock.Clock,
) *FilePathGenerator {
return &FilePathGenerator{
config: config,
extension: extension,
storage: storage,
clock: clock,
fileIndex: make(map[VersionedTableName]*indexWithDate),
hasher: hash.NewPositionInertia(),
versionMap: make(map[VersionedTableName]uint64),
}
}
// CheckOrWriteSchema checks whether the schema file exists in the storage and
// write scheme.json if necessary.
func (f *FilePathGenerator) CheckOrWriteSchema(
ctx context.Context,
table VersionedTableName,
tableInfo *model.TableInfo,
) error {
if _, ok := f.versionMap[table]; ok {
return nil
}
var def TableDefinition
def.FromTableInfo(tableInfo, table.TableInfoVersion)
if !def.IsTableSchema() {
// only check schema for table
log.Panic("invalid table schema", zap.Any("versionedTableName", table),
zap.Any("tableInfo", tableInfo))
}
// Case 1: point check if the schema file exists.
tblSchemaFile, err := def.GenerateSchemaFilePath()
if err != nil {
return err
}
exist, err := f.storage.FileExists(ctx, tblSchemaFile)
if err != nil {
return err
}
if exist {
f.versionMap[table] = table.TableInfoVersion
return nil
}
// walk the table meta path to find the last schema file
_, checksum := mustParseSchemaName(tblSchemaFile)
schemaFileCnt := 0
lastVersion := uint64(0)
subDir := fmt.Sprintf(tableSchemaPrefix, def.Schema, def.Table)
checksumSuffix := fmt.Sprintf("%010d.json", checksum)
err = f.storage.WalkDir(ctx, &storage.WalkOption{
SubDir: subDir, /* use subDir to prevent walk the whole storage */
ObjPrefix: subDir + "schema_",
}, func(path string, _ int64) error {
schemaFileCnt++
if !strings.HasSuffix(path, checksumSuffix) {
return nil
}
version, parsedChecksum := mustParseSchemaName(path)
if parsedChecksum != checksum {
// TODO: parsedChecksum should be ignored, remove this panic
// after the new path protocol is verified.
log.Panic("invalid schema file name",
zap.String("path", path), zap.Any("checksum", checksum))
}
if version > lastVersion {
lastVersion = version
}
return nil
})
if err != nil {
return err
}
// Case 2: the table meta path is not empty.
if schemaFileCnt != 0 && lastVersion != 0 {
f.versionMap[table] = lastVersion
return nil
}
// Case 3: the table meta path is empty, which happens when:
// a. the table is existed before changefeed started. We need to write schema file to external storage.
// b. the schema file is deleted by the consumer. We write schema file to external storage too.
if schemaFileCnt != 0 && lastVersion == 0 {
log.Warn("no table schema file found in an non-empty meta path",
zap.Any("versionedTableName", table),
zap.Uint32("checksum", checksum))
}
encodedDetail, err := def.MarshalWithQuery()
if err != nil {
return err
}
f.versionMap[table] = table.TableInfoVersion
return f.storage.WriteFile(ctx, tblSchemaFile, encodedDetail)
}
// SetClock is used for unit test
func (f *FilePathGenerator) SetClock(clock clock.Clock) {
f.clock = clock
}
// GenerateDateStr generates a date string base on current time
// and the date-separator configuration item.
func (f *FilePathGenerator) GenerateDateStr() string {
var dateStr string
currTime := f.clock.Now()
switch f.config.DateSeparator {
case config.DateSeparatorYear.String():
dateStr = currTime.Format("2006")
case config.DateSeparatorMonth.String():
dateStr = currTime.Format("2006-01")
case config.DateSeparatorDay.String():
dateStr = currTime.Format("2006-01-02")
default:
}
return dateStr
}
// GenerateIndexFilePath generates a canonical path for index file.
func (f *FilePathGenerator) GenerateIndexFilePath(tbl VersionedTableName, date string) string {
dir := f.generateDataDirPath(tbl, date)
name := defaultIndexFileName
return path.Join(dir, name)
}
// GenerateDataFilePath generates a canonical path for data file.
func (f *FilePathGenerator) GenerateDataFilePath(
ctx context.Context, tbl VersionedTableName, date string,
) (string, error) {
dir := f.generateDataDirPath(tbl, date)
name, err := f.generateDataFileName(ctx, tbl, date)
if err != nil {
return "", err
}
return path.Join(dir, name), nil
}
func (f *FilePathGenerator) generateDataDirPath(tbl VersionedTableName, date string) string {
var elems []string
elems = append(elems, tbl.TableNameWithPhysicTableID.Schema)
elems = append(elems, tbl.TableNameWithPhysicTableID.Table)
elems = append(elems, fmt.Sprintf("%d", f.versionMap[tbl]))
if f.config.EnablePartitionSeparator && tbl.TableNameWithPhysicTableID.IsPartition {
elems = append(elems, fmt.Sprintf("%d", tbl.TableNameWithPhysicTableID.TableID))
}
if len(date) != 0 {
elems = append(elems, date)
}
return path.Join(elems...)
}
func (f *FilePathGenerator) generateDataFileName(
ctx context.Context, tbl VersionedTableName, date string,
) (string, error) {
if idx, ok := f.fileIndex[tbl]; !ok {
fileIdx, err := f.getNextFileIdxFromIndexFile(ctx, tbl, date)
if err != nil {
return "", err
}
f.fileIndex[tbl] = &indexWithDate{
prevDate: date,
currDate: date,
index: fileIdx,
}
} else {
idx.currDate = date
}
// if date changed, reset the counter
if f.fileIndex[tbl].prevDate != f.fileIndex[tbl].currDate {
f.fileIndex[tbl].prevDate = f.fileIndex[tbl].currDate
f.fileIndex[tbl].index = 0
}
f.fileIndex[tbl].index++
return generateDataFileName(f.fileIndex[tbl].index, f.extension, f.config.FileIndexWidth), nil
}
func (f *FilePathGenerator) getNextFileIdxFromIndexFile(
ctx context.Context, tbl VersionedTableName, date string,
) (uint64, error) {
indexFile := f.GenerateIndexFilePath(tbl, date)
exist, err := f.storage.FileExists(ctx, indexFile)
if err != nil {
return 0, err
}
if !exist {
return 0, nil
}
data, err := f.storage.ReadFile(ctx, indexFile)
if err != nil {
return 0, err
}
fileName := strings.TrimSuffix(string(data), "\n")
maxFileIdx, err := f.fetchIndexFromFileName(fileName)
if err != nil {
return 0, err
}
lastFilePath := path.Join(
f.generateDataDirPath(tbl, date), // file dir
generateDataFileName(maxFileIdx, f.extension, f.config.FileIndexWidth), // file name
)
var lastFileExists, lastFileIsEmpty bool
lastFileExists, err = f.storage.FileExists(ctx, lastFilePath)
if err != nil {
return 0, err
}
if lastFileExists {
fileReader, err := f.storage.Open(ctx, lastFilePath)
if err != nil {
return 0, err
}
readBytes, err := fileReader.Read(make([]byte, 1))
if err != nil && err != io.EOF {
return 0, err
}
lastFileIsEmpty = readBytes == 0
if err := fileReader.Close(); err != nil {
return 0, err
}
}
var fileIdx uint64
if lastFileExists && !lastFileIsEmpty {
fileIdx = maxFileIdx
} else {
// Reuse the old index number if the last file does not exist.
fileIdx = maxFileIdx - 1
}
return fileIdx, nil
}
func (f *FilePathGenerator) fetchIndexFromFileName(fileName string) (uint64, error) {
var fileIdx uint64
var err error
if len(fileName) < minFileNamePrefixLen+len(f.extension) ||
!strings.HasPrefix(fileName, "CDC") ||
!strings.HasSuffix(fileName, f.extension) {
return 0, errors.WrapError(errors.ErrStorageSinkInvalidFileName,
fmt.Errorf("'%s' is a invalid file name", fileName))
}
extIdx := strings.Index(fileName, f.extension)
fileIdxStr := fileName[3:extIdx]
if fileIdx, err = strconv.ParseUint(fileIdxStr, 10, 64); err != nil {
return 0, errors.WrapError(errors.ErrStorageSinkInvalidFileName, err)
}
return fileIdx, nil
}
var dateSeparatorDayRegexp *regexp.Regexp
// RemoveExpiredFiles removes expired files from external storage.
func RemoveExpiredFiles(
ctx context.Context,
_ model.ChangeFeedID,
storage storage.ExternalStorage,
cfg *Config,
checkpointTs model.Ts,
) (uint64, error) {
if cfg.DateSeparator != config.DateSeparatorDay.String() {
return 0, nil
}
if dateSeparatorDayRegexp == nil {
dateSeparatorDayRegexp = regexp.MustCompile(config.DateSeparatorDay.GetPattern())
}
ttl := time.Duration(cfg.FileExpirationDays) * time.Hour * 24
currTime := oracle.GetTimeFromTS(checkpointTs).Add(-ttl)
expiredDate := currTime.Format("2006-01-02")
cnt := uint64(0)
err := util.RemoveFilesIf(ctx, storage, func(path string) bool {
// the path is like: <schema>/<table>/<tableVersion>/<partitionID>/<date>/CDC{num}.extension
match := dateSeparatorDayRegexp.FindString(path)
if match != "" && match < expiredDate {
cnt++
return true
}
return false
}, nil)
return cnt, err
}
// RemoveEmptyDirs removes empty directories from external storage.
func RemoveEmptyDirs(
ctx context.Context,
id model.ChangeFeedID,
target string,
) (uint64, error) {
cnt := uint64(0)
err := filepath.Walk(target, func(path string, info fs.FileInfo, err error) error {
if os.IsNotExist(err) || path == target || info == nil {
// if path not exists, we should return nil to continue.
return nil
}
if err != nil {
return err
}
if info.IsDir() {
files, err := os.ReadDir(path)
if err == nil && len(files) == 0 {
log.Debug("Deleting empty directory",
zap.String("namespace", id.Namespace),
zap.String("changeFeedID", id.ID),
zap.String("path", path))
os.Remove(path)
cnt++
return filepath.SkipDir
}
}
return nil
})
return cnt, err
}