From 1d194a77b0d1ceafc9e2881016ef27dee25695de Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 20 Feb 2024 19:27:41 +0500 Subject: [PATCH 01/80] WIP backup to S3/AzureStorageBlob --- pkg/backup/backuper.go | 43 ++++++++++++++++++++++++++++++++++++++++++ pkg/backup/create.go | 23 ++++++++++++++++------ pkg/config/config.go | 2 ++ 3 files changed, 62 insertions(+), 6 deletions(-) diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index 784df2c6..507ab6fe 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "net/url" + "os" "path" "strings" @@ -174,3 +176,44 @@ func (b *Backuper) isDiskTypeEncryptedObject(disk clickhouse.Disk, disks []click } return underlyingIdx >= 0 } + +func (b *Backuper) getEmbeddedBackupDestination(ctx context.Context, backupName string) (string, error) { + if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + return fmt.Sprintf("Disk('%s','%s')", b.cfg.ClickHouse.EmbeddedBackupDisk, backupName), nil + } + if b.cfg.General.RemoteStorage == "s3" { + s3Endpoint, err := b.ch.ApplyMacros(ctx, b.buildS3DestinationEndpoint()) + if err != nil { + return "", err + } + if b.cfg.S3.AssumeRoleARN != "" || (b.cfg.S3.AccessKey == "" && os.Getenv("AWS_ACCESS_KEY_ID") != "") { + return fmt.Sprintf("S3('%s/%s')", s3Endpoint, backupName), nil + } + + } + if b.cfg.General.RemoteStorage == "gcs" { + + } + if b.cfg.General.RemoteStorage == "azblob" { + + } + return "", fmt.Errorf("empty clickhouse->embedded_backup_disk and invalid general->remote_storage: %s", b.cfg.General.RemoteStorage) +} + +func (b *Backuper) buildS3DestinationEndpoint() string { + url := url.URL{} + url.Scheme = "https" + if b.cfg.S3.DisableSSL { + url.Scheme = "http" + } + url.Host = b.cfg.S3.Endpoint + if url.Host == "" && b.cfg.S3.Region != "" && !b.cfg.S3.ForcePathStyle { + url.Host = "s3." + b.cfg.S3.Region + ".amazonaws.com" + url.Path = path.Join(b.cfg.S3.Bucket, b.cfg.S3.ObjectDiskPath) + } + if url.Host == "" && b.cfg.S3.Bucket != "" && b.cfg.S3.ForcePathStyle { + url.Host = b.cfg.S3.Bucket + "." + "s3.amazonaws.com" + url.Path = b.cfg.S3.ObjectDiskPath + } + return url.String() +} diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 8023bcba..63eb520f 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -302,7 +302,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa if doesShard(b.cfg.General.ShardedOperationMode) { return fmt.Errorf("cannot perform embedded backup: %w", errShardOperationUnsupported) } - if _, isBackupDiskExists := diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk]; !isBackupDiskExists { + if _, isBackupDiskExists := diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk]; !isBackupDiskExists && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { return fmt.Errorf("backup disk `%s` not exists in system.disks", b.cfg.ClickHouse.EmbeddedBackupDisk) } if createRBAC || createConfigs { @@ -322,7 +322,6 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa tableSizeSQL := "" i := 0 backupMetadataSize := uint64(0) - backupPath := path.Join(diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk], backupName) for _, table := range tables { if table.Skip { continue @@ -351,12 +350,23 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa tableSizeSQL += ", " } } - backupSQL := fmt.Sprintf("BACKUP %s TO Disk(?,?)", tablesSQL) + backupResult := make([]clickhouse.SystemBackups, 0) + embeddedBackupDestination, err := b.getEmbeddedBackupDestination(ctx, backupName) + if err != nil { + return err + } + backupSQL := fmt.Sprintf("BACKUP %s TO %s", tablesSQL, embeddedBackupDestination) + backupSettings := make([]string, 0) if schemaOnly { - backupSQL += " SETTINGS structure_only=1, show_table_uuid_in_table_create_query_if_not_nil=1" + backupSettings = append(backupSettings, "structure_only=1", "show_table_uuid_in_table_create_query_if_not_nil=1") } - backupResult := make([]clickhouse.SystemBackups, 0) - if err := b.ch.SelectContext(ctx, &backupResult, backupSQL, b.cfg.ClickHouse.EmbeddedBackupDisk, backupName); err != nil { + if b.cfg.ClickHouse.EmbeddedBackupThreads > 0 { + backupSettings = append(backupSettings, fmt.Sprintf("backup_threads=%d", b.cfg.ClickHouse.EmbeddedBackupThreads)) + } + if len(backupSettings) > 0 { + backupSQL += " SETTINGS " + strings.Join(backupSettings, ", ") + } + if err := b.ch.SelectContext(ctx, &backupResult, backupSQL); err != nil { return fmt.Errorf("backup error: %v", err) } if len(backupResult) != 1 || (backupResult[0].Status != "BACKUP_COMPLETE" && backupResult[0].Status != "BACKUP_CREATED") { @@ -390,6 +400,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa } log.Debug("calculate parts list from embedded backup disk") + backupPath := path.Join(diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk], backupName) for _, table := range tables { select { case <-ctx.Done(): diff --git a/pkg/config/config.go b/pkg/config/config.go index 5673690c..08c597b3 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -208,6 +208,8 @@ type ClickHouseConfig struct { FreezeByPartWhere string `yaml:"freeze_by_part_where" envconfig:"CLICKHOUSE_FREEZE_BY_PART_WHERE"` UseEmbeddedBackupRestore bool `yaml:"use_embedded_backup_restore" envconfig:"CLICKHOUSE_USE_EMBEDDED_BACKUP_RESTORE"` EmbeddedBackupDisk string `yaml:"embedded_backup_disk" envconfig:"CLICKHOUSE_EMBEDDED_BACKUP_DISK"` + EmbeddedBackupThreads uint8 `yaml:"embedded_backup_threads" envconfig:"CLICKHOUSE_EMBEDDED_BACKUP_THREADS"` + EmbeddedRestoreThreads uint8 `yaml:"embedded_restore_threads" envconfig:"CLICKHOUSE_EMBEDDED_RESTORE_THREADS"` BackupMutations bool `yaml:"backup_mutations" envconfig:"CLICKHOUSE_BACKUP_MUTATIONS"` RestoreAsAttach bool `yaml:"restore_as_attach" envconfig:"CLICKHOUSE_RESTORE_AS_ATTACH"` CheckPartsColumns bool `yaml:"check_parts_columns" envconfig:"CLICKHOUSE_CHECK_PARTS_COLUMNS"` From cf836319f7cb9c6e3207591bc08d2a4378aacae5 Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 22 Feb 2024 13:06:32 +0500 Subject: [PATCH 02/80] [WIP] continue to develop incremental backup for embedded backup --- ReadMe.md | 3 + cmd/clickhouse-backup/main.go | 7 +- pkg/backup/backuper.go | 82 +++++++++++++++++-- pkg/backup/create.go | 39 ++++++--- pkg/backup/create_remote.go | 2 +- pkg/backup/restore.go | 17 ++-- pkg/clickhouse/clickhouse.go | 5 +- pkg/config/config.go | 2 + pkg/server/server.go | 6 +- .../config-azblob-embedded-url.yml | 37 +++++++++ test/integration/config-s3-embedded-url.yml | 39 +++++++++ test/integration/integration_test.go | 14 ++-- 12 files changed, 211 insertions(+), 42 deletions(-) create mode 100644 test/integration/config-azblob-embedded-url.yml create mode 100644 test/integration/config-s3-embedded-url.yml diff --git a/ReadMe.md b/ReadMe.md index 94aa7fac..4707caa7 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -529,6 +529,9 @@ gcs: credentials_file: "" # GCS_CREDENTIALS_FILE credentials_json: "" # GCS_CREDENTIALS_JSON credentials_json_encoded: "" # GCS_CREDENTIALS_JSON_ENCODED + # look https://cloud.google.com/storage/docs/authentication/managing-hmackeys#create how to get HMAC keys for access to bucket + embedded_access_key: "" # GCS_EMBEDDED_ACCESS_KEY, use it when `use_embedded_backup_restore: true`, `embedded_backup_disk: ""`, `remote_storage: gcs` + embedded_secret_key: "" # GCS_EMBEDDED_SECRET_KEY, use it when `use_embedded_backup_restore: true`, `embedded_backup_disk: ""`, `remote_storage: gcs` endpoint: "" # GCS_ENDPOINT, use it for custom GCS endpoint/compatible storage. For example, when using custom endpoint via private service connect bucket: "" # GCS_BUCKET path: "" # GCS_PATH, `system.macros` values can be applied as {macro_name} diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 237ccd49..5e8d951c 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -90,7 +90,7 @@ func main() { Description: "Create new backup", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.CreateBackup(c.Args().First(), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) + return b.CreateBackup(c.Args().First(), c.String("t"), c.String("embbedded-base-backup"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -98,6 +98,11 @@ func main() { Hidden: false, Usage: "Create backup only matched with table name patterns, separated by comma, allow ? and * as wildcard", }, + cli.StringFlag{ + Name: "embbedded-base-backup", + Hidden: false, + Usage: "Create incremental embedded backup based on other backup name", + }, cli.StringSliceFlag{ Name: "partitions", Hidden: false, diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index 507ab6fe..d30e7cdd 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -177,30 +177,67 @@ func (b *Backuper) isDiskTypeEncryptedObject(disk clickhouse.Disk, disks []click return underlyingIdx >= 0 } -func (b *Backuper) getEmbeddedBackupDestination(ctx context.Context, backupName string) (string, error) { +func (b *Backuper) getEmbeddedBackupLocation(ctx context.Context, backupName string) (string, error) { if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { return fmt.Sprintf("Disk('%s','%s')", b.cfg.ClickHouse.EmbeddedBackupDisk, backupName), nil } + + if err := b.applyMacrosToObjectDiskPath(ctx); err != nil { + return "", err + } if b.cfg.General.RemoteStorage == "s3" { - s3Endpoint, err := b.ch.ApplyMacros(ctx, b.buildS3DestinationEndpoint()) + s3Endpoint, err := b.ch.ApplyMacros(ctx, b.buildEmbeddedLocationS3()) if err != nil { return "", err } - if b.cfg.S3.AssumeRoleARN != "" || (b.cfg.S3.AccessKey == "" && os.Getenv("AWS_ACCESS_KEY_ID") != "") { - return fmt.Sprintf("S3('%s/%s')", s3Endpoint, backupName), nil + if b.cfg.S3.AccessKey != "" { + return fmt.Sprintf("S3('%s/%s','%s','%s')", s3Endpoint, backupName, b.cfg.S3.AccessKey, b.cfg.S3.SecretKey), nil } - + if os.Getenv("AWS_ACCESS_KEY_ID") != "" { + return fmt.Sprintf("S3('%s/%s','%s','%s')", s3Endpoint, backupName, os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY")), nil + } + return "", fmt.Errorf("provide s3->access_key and s3->secret_key in config to allow embedded backup without `clickhouse->embedded_backup_disk`") } if b.cfg.General.RemoteStorage == "gcs" { + gcsEndpoint, err := b.ch.ApplyMacros(ctx, b.buildEmbeddedLocationGCS()) + if err != nil { + return "", err + } + if b.cfg.GCS.EmbeddedAccessKey != "" { + return fmt.Sprintf("S3('%s/%s','%s','%s')", gcsEndpoint, backupName, b.cfg.GCS.EmbeddedAccessKey, b.cfg.GCS.EmbeddedSecretKey), nil + } + if os.Getenv("AWS_ACCESS_KEY_ID") != "" { + return fmt.Sprintf("S3('%s/%s','%s','%s')", gcsEndpoint, backupName, os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY")), nil + } + return "", fmt.Errorf("provide gcs->embedded_access_key and gcs->embedded_secret_key in config to allow embedded backup without `clickhouse->embedded_backup_disk`") } if b.cfg.General.RemoteStorage == "azblob" { - + azblobEndpoint, err := b.ch.ApplyMacros(ctx, b.buildEmbeddedLocationAZBLOB()) + if err != nil { + return "", err + } + if b.cfg.AzureBlob.Container != "" { + return fmt.Sprintf("AzureBlobStorage('%s','%s','%s/%s')", azblobEndpoint, b.cfg.AzureBlob.Container, b.cfg.AzureBlob.ObjectDiskPath, backupName), nil + } + return "", fmt.Errorf("provide azblob->container and azblob->account_name, azblob->account_key in config to allow embedded backup without `clickhouse->embedded_backup_disk`") } return "", fmt.Errorf("empty clickhouse->embedded_backup_disk and invalid general->remote_storage: %s", b.cfg.General.RemoteStorage) } -func (b *Backuper) buildS3DestinationEndpoint() string { +func (b *Backuper) applyMacrosToObjectDiskPath(ctx context.Context) error { + var err error + if b.cfg.General.RemoteStorage == "s3" { + b.cfg.S3.ObjectDiskPath, err = b.ch.ApplyMacros(ctx, b.cfg.S3.ObjectDiskPath) + } else if b.cfg.General.RemoteStorage == "gcs" { + b.cfg.GCS.ObjectDiskPath, err = b.ch.ApplyMacros(ctx, b.cfg.GCS.ObjectDiskPath) + } else if b.cfg.General.RemoteStorage == "azblob" { + b.cfg.AzureBlob.ObjectDiskPath, err = b.ch.ApplyMacros(ctx, b.cfg.AzureBlob.ObjectDiskPath) + } + return err +} + +func (b *Backuper) buildEmbeddedLocationS3() string { url := url.URL{} url.Scheme = "https" if b.cfg.S3.DisableSSL { @@ -212,8 +249,37 @@ func (b *Backuper) buildS3DestinationEndpoint() string { url.Path = path.Join(b.cfg.S3.Bucket, b.cfg.S3.ObjectDiskPath) } if url.Host == "" && b.cfg.S3.Bucket != "" && b.cfg.S3.ForcePathStyle { - url.Host = b.cfg.S3.Bucket + "." + "s3.amazonaws.com" + url.Host = b.cfg.S3.Bucket + "." + "s3." + b.cfg.S3.Region + ".amazonaws.com" url.Path = b.cfg.S3.ObjectDiskPath } return url.String() } + +func (b *Backuper) buildEmbeddedLocationGCS() string { + url := url.URL{} + url.Scheme = "https" + if b.cfg.GCS.ForceHttp { + url.Scheme = "http" + } + if b.cfg.GCS.Endpoint != "" { + if !strings.HasPrefix(b.cfg.GCS.Endpoint, "http") { + url.Host = b.cfg.GCS.Endpoint + } else { + newUrl, _ := url.Parse(b.cfg.GCS.Endpoint) + url = *newUrl + } + } + if url.Host == "" { + url.Host = "storage.googleapis.com" + } + url.Path = path.Join(b.cfg.GCS.Bucket, b.cfg.GCS.ObjectDiskPath) + return url.String() +} + +func (b *Backuper) buildEmbeddedLocationAZBLOB() string { + url := url.URL{} + url.Scheme = b.cfg.AzureBlob.EndpointSchema + url.Host = b.cfg.AzureBlob.EndpointSuffix + url.Path = b.cfg.AzureBlob.AccountName + return fmt.Sprintf("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s;BlobEndpoint=%s;", b.cfg.AzureBlob.EndpointSchema, b.cfg.AzureBlob.AccountName, b.cfg.AzureBlob.AccountKey, url.String()) +} diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 63eb520f..d1875845 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -56,7 +56,7 @@ func NewBackupName() string { // CreateBackup - create new backup of all tables matched by tablePattern // If backupName is empty string will use default backup name -func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []string, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, skipCheckPartsColumns bool, version string, commandId int) error { +func (b *Backuper) CreateBackup(backupName, tablePattern, embeddedBaseBackup string, partitions []string, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, skipCheckPartsColumns bool, version string, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -121,7 +121,7 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st partitionsIdMap, partitionsNameList := partition.ConvertPartitionsToIdsMapAndNamesList(ctx, b.ch, tables, nil, partitions) // create if b.cfg.ClickHouse.UseEmbeddedBackupRestore { - err = b.createBackupEmbedded(ctx, backupName, tablePattern, partitionsNameList, partitionsIdMap, schemaOnly, createRBAC, createConfigs, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, log, startBackup, version) + err = b.createBackupEmbedded(ctx, backupName, tablePattern, embeddedBaseBackup, partitionsNameList, partitionsIdMap, schemaOnly, createRBAC, createConfigs, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, log, startBackup, version) } else { err = b.createBackupLocal(ctx, backupName, partitionsIdMap, tables, doBackupData, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, version, disks, diskMap, diskTypes, allDatabases, allFunctions, log, startBackup) } @@ -297,7 +297,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par return nil } -func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern string, partitionsNameList map[metadata.TableTitle][]string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, schemaOnly, createRBAC, createConfigs bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, log *apexLog.Entry, startBackup time.Time, backupVersion string) error { +func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern, baseBackup string, partitionsNameList map[metadata.TableTitle][]string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, schemaOnly, createRBAC, createConfigs bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, log *apexLog.Entry, startBackup time.Time, backupVersion string) error { // TODO: Implement sharded backup operations for embedded backups if doesShard(b.cfg.General.ShardedOperationMode) { return fmt.Errorf("cannot perform embedded backup: %w", errShardOperationUnsupported) @@ -350,22 +350,25 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa tableSizeSQL += ", " } } - backupResult := make([]clickhouse.SystemBackups, 0) - embeddedBackupDestination, err := b.getEmbeddedBackupDestination(ctx, backupName) + embeddedBackupDestination, err := b.getEmbeddedBackupLocation(ctx, backupName) if err != nil { return err } backupSQL := fmt.Sprintf("BACKUP %s TO %s", tablesSQL, embeddedBackupDestination) - backupSettings := make([]string, 0) + backupSettings := []string{"show_table_uuid_in_table_create_query_if_not_nil=1"} if schemaOnly { - backupSettings = append(backupSettings, "structure_only=1", "show_table_uuid_in_table_create_query_if_not_nil=1") + backupSettings = append(backupSettings, "structure_only=1") } if b.cfg.ClickHouse.EmbeddedBackupThreads > 0 { backupSettings = append(backupSettings, fmt.Sprintf("backup_threads=%d", b.cfg.ClickHouse.EmbeddedBackupThreads)) } + if baseBackup != "" { + backupSettings = append(backupSettings, fmt.Sprintf("base_backup='%s'", baseBackup)) + } if len(backupSettings) > 0 { backupSQL += " SETTINGS " + strings.Join(backupSettings, ", ") } + backupResult := make([]clickhouse.SystemBackups, 0) if err := b.ch.SelectContext(ctx, &backupResult, backupSQL); err != nil { return fmt.Errorf("backup error: %v", err) } @@ -400,7 +403,11 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa } log.Debug("calculate parts list from embedded backup disk") - backupPath := path.Join(diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk], backupName) + backupPath := b.DefaultDataPath + if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + backupPath = diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk] + } + backupPath = path.Join(backupPath, backupName) for _, table := range tables { select { case <-ctx.Done(): @@ -409,7 +416,12 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa if table.Skip { continue } - disksToPartsMap, err := b.getPartsFromBackupDisk(backupPath, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) + var disksToPartsMap map[string][]metadata.Part + if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + disksToPartsMap, err = b.getPartsFromLocalEmbeddedBackupDisk(backupPath, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) + } else { + disksToPartsMap, err = b.getPartsFromRemoteEmbeddedBackup(backupPath, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) + } if err != nil { return err } @@ -428,7 +440,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa backupMetadataSize += metadataSize } } - backupMetaFile := path.Join(diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk], backupName, "metadata.json") + backupMetaFile := path.Join(backupPath, "metadata.json") if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "embedded", diskMap, diskTypes, disks, backupDataSize[0].Size, backupMetadataSize, 0, 0, tableMetas, allDatabases, allFunctions, log); err != nil { return err } @@ -441,7 +453,12 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa return nil } -func (b *Backuper) getPartsFromBackupDisk(backupPath string, table clickhouse.Table, partitionsIdsMap common.EmptyMap) (map[string][]metadata.Part, error) { +func (b *Backuper) getPartsFromRemoteEmbeddedBackup(backupPath string, table clickhouse.Table, partitionsIdsMap common.EmptyMap) (map[string][]metadata.Part, error) { + parts := map[string][]metadata.Part{} + return parts, nil +} + +func (b *Backuper) getPartsFromLocalEmbeddedBackupDisk(backupPath string, table clickhouse.Table, partitionsIdsMap common.EmptyMap) (map[string][]metadata.Part, error) { parts := map[string][]metadata.Part{} dirList, err := os.ReadDir(path.Join(backupPath, "data", common.TablePathEncode(table.Database), common.TablePathEncode(table.Name))) if err != nil { diff --git a/pkg/backup/create_remote.go b/pkg/backup/create_remote.go index 0134143c..04da9379 100644 --- a/pkg/backup/create_remote.go +++ b/pkg/backup/create_remote.go @@ -15,7 +15,7 @@ func (b *Backuper) CreateToRemote(backupName, diffFrom, diffFromRemote, tablePat if backupName == "" { backupName = NewBackupName() } - if err := b.CreateBackup(backupName, tablePattern, partitions, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, version, commandId); err != nil { + if err := b.CreateBackup(backupName, tablePattern, diffFromRemote, partitions, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, version, commandId); err != nil { return err } if err := b.Upload(backupName, diffFrom, diffFromRemote, tablePattern, partitions, schemaOnly, resume, commandId); err != nil { diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index a8a08ff4..6c31d584 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -754,15 +754,7 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta if len(b.cfg.General.RestoreDatabaseMapping) > 0 { tablePattern = b.changeTablePatternFromRestoreDatabaseMapping(tablePattern) } - var err error - if b.cfg.General.RemoteStorage == "s3" { - b.cfg.S3.ObjectDiskPath, err = b.ch.ApplyMacros(ctx, b.cfg.S3.ObjectDiskPath) - } else if b.cfg.General.RemoteStorage == "gcs" { - b.cfg.GCS.ObjectDiskPath, err = b.ch.ApplyMacros(ctx, b.cfg.GCS.ObjectDiskPath) - } else if b.cfg.General.RemoteStorage == "azblob" { - b.cfg.AzureBlob.ObjectDiskPath, err = b.ch.ApplyMacros(ctx, b.cfg.AzureBlob.ObjectDiskPath) - } - if err != nil { + if err := b.applyMacrosToObjectDiskPath(ctx); err != nil { return err } @@ -1035,7 +1027,6 @@ func (b *Backuper) changeTablePatternFromRestoreDatabaseMapping(tablePattern str } func (b *Backuper) restoreEmbedded(ctx context.Context, backupName string, restoreOnlySchema bool, tablesForRestore ListOfTables, partitionsNameList map[metadata.TableTitle][]string) error { - restoreSQL := "Disk(?,?)" tablesSQL := "" l := len(tablesForRestore) for i, t := range tablesForRestore { @@ -1072,7 +1063,11 @@ func (b *Backuper) restoreEmbedded(ctx context.Context, backupName string, resto if restoreOnlySchema { settings = "SETTINGS structure_only=1" } - restoreSQL = fmt.Sprintf("RESTORE %s FROM %s %s", tablesSQL, restoreSQL, settings) + embeddedBackupDestination, err := b.getEmbeddedBackupLocation(ctx, backupName) + if err != nil { + return err + } + restoreSQL := fmt.Sprintf("RESTORE %s FROM %s %s", tablesSQL, embeddedBackupDestination, settings) restoreResults := make([]clickhouse.SystemBackups, 0) if err := b.ch.SelectContext(ctx, &restoreResults, restoreSQL, b.cfg.ClickHouse.EmbeddedBackupDisk, backupName); err != nil { return fmt.Errorf("restore error: %v", err) diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 025adc2e..e0050bfa 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -279,10 +279,7 @@ func (ch *ClickHouse) getDisksFromSystemDisks(ctx context.Context) ([]Disk, erro joinStoragePoliciesSQL := "" if len(diskFields) > 0 && diskFields[0].StoragePolicyPresent > 0 { storagePoliciesSQL = "groupUniqArray(s.policy_name)" - joinStoragePoliciesSQL = " INNER JOIN " - if ch.Config.UseEmbeddedBackupRestore { - joinStoragePoliciesSQL = " LEFT JOIN " - } + joinStoragePoliciesSQL = " LEFT JOIN " joinStoragePoliciesSQL += "(SELECT policy_name, arrayJoin(disks) AS disk FROM system.storage_policies) AS s ON s.disk = d.name" } var result []Disk diff --git a/pkg/config/config.go b/pkg/config/config.go index 08c597b3..9301afe9 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -69,6 +69,8 @@ type GCSConfig struct { CredentialsFile string `yaml:"credentials_file" envconfig:"GCS_CREDENTIALS_FILE"` CredentialsJSON string `yaml:"credentials_json" envconfig:"GCS_CREDENTIALS_JSON"` CredentialsJSONEncoded string `yaml:"credentials_json_encoded" envconfig:"GCS_CREDENTIALS_JSON_ENCODED"` + EmbeddedAccessKey string `yaml:"embedded_access_key" envconfig:"GCS_EMBEDDED_ACCESS_KEY"` + EmbeddedSecretKey string `yaml:"embedded_secret_key" envconfig:"GCS_EMBEDDED_SECRET_KEY"` Bucket string `yaml:"bucket" envconfig:"GCS_BUCKET"` Path string `yaml:"path" envconfig:"GCS_PATH"` ObjectDiskPath string `yaml:"object_disk_path" envconfig:"GCS_OBJECT_DISK_PATH"` diff --git a/pkg/server/server.go b/pkg/server/server.go index 8226af1b..f9ec0393 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -806,6 +806,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) return } tablePattern := "" + embeddedBaseBackup := "" partitionsToBackup := make([]string, 0) backupName := backup.NewBackupName() schemaOnly := false @@ -818,6 +819,9 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) tablePattern = tp[0] fullCommand = fmt.Sprintf("%s --tables=\"%s\"", fullCommand, tablePattern) } + if baseBackup, exists := query["embedded-base-backup"]; exists { + embeddedBaseBackup = baseBackup[0] + } if partitions, exist := query["partitions"]; exist { partitionsToBackup = strings.Split(partitions[0], ",") fullCommand = fmt.Sprintf("%s --partitions=\"%s\"", fullCommand, partitions) @@ -862,7 +866,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) go func() { err, _ := api.metrics.ExecuteWithMetrics("create", 0, func() error { b := backup.NewBackuper(cfg) - return b.CreateBackup(backupName, tablePattern, partitionsToBackup, schemaOnly, createRBAC, false, createConfigs, false, checkPartsColumns, api.clickhouseBackupVersion, commandId) + return b.CreateBackup(backupName, tablePattern, embeddedBaseBackup, partitionsToBackup, schemaOnly, createRBAC, false, createConfigs, false, checkPartsColumns, api.clickhouseBackupVersion, commandId) }) if err != nil { api.log.Errorf("API /backup/create error: %v", err) diff --git a/test/integration/config-azblob-embedded-url.yml b/test/integration/config-azblob-embedded-url.yml new file mode 100644 index 00000000..2a2cd4b8 --- /dev/null +++ b/test/integration/config-azblob-embedded-url.yml @@ -0,0 +1,37 @@ +general: + disable_progress_bar: true + remote_storage: azblob + upload_concurrency: 4 + download_concurrency: 4 + skip_tables: + - " system.*" + - "INFORMATION_SCHEMA.*" + - "information_schema.*" + - "_temporary_and_external_tables.*" + restore_schema_on_cluster: "{cluster}" +clickhouse: + host: clickhouse + port: 9440 + username: backup + password: meow=& 123?*%# МЯУ + secure: true + skip_verify: true + sync_replicated_tables: true + timeout: 4h + restart_command: bash -c 'echo "FAKE RESTART"' + use_embedded_backup_restore: true + embedded_backup_disk: "" +azblob: + account_name: devstoreaccount1 + account_key: Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== + endpoint_suffix: azure:10000 + endpoint_schema: http + container: container1 + object_disk_path: object_disk/{cluster}/{shard} + path: backup + compression_format: none +api: + listen: :7171 + create_integration_tables: true + integration_tables_host: "clickhouse-backup" + allow_parallel: true diff --git a/test/integration/config-s3-embedded-url.yml b/test/integration/config-s3-embedded-url.yml new file mode 100644 index 00000000..3c7f7dba --- /dev/null +++ b/test/integration/config-s3-embedded-url.yml @@ -0,0 +1,39 @@ +general: + disable_progress_bar: true + remote_storage: s3 + upload_concurrency: 4 + download_concurrency: 4 + skip_tables: + - " system.*" + - "INFORMATION_SCHEMA.*" + - "information_schema.*" + - "_temporary_and_external_tables.*" + restore_schema_on_cluster: "{cluster}" +clickhouse: + host: clickhouse + port: 9440 + username: backup + password: meow=& 123?*%# МЯУ + secure: true + skip_verify: true + sync_replicated_tables: true + timeout: 4h + restart_command: bash -c 'echo "FAKE RESTART"' + use_embedded_backup_restore: true + embedded_backup_disk: "" +s3: + access_key: access-key + secret_key: it-is-my-super-secret-key + bucket: clickhouse + endpoint: http://minio:9000 + acl: private + force_path_style: true + path: backup/{cluster}/{shard} + object_disk_path: object_disk/{cluster}/{shard} + disable_ssl: true + compression_format: none +api: + listen: :7171 + create_integration_tables: true + integration_tables_host: "clickhouse-backup" + allow_parallel: true diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 2f66b039..da6e3bbd 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1803,9 +1803,14 @@ func TestIntegrationEmbedded(t *testing.T) { //CUSTOM backup create folder in each disk r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") - //@TODO uncomment when resolve slow azure BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/52088 - //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) - //runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") + runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") + + //@TODO uncomment when resolve slow azure BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/52088, https://github.com/Azure/Azurite/issues/2053 + if version == "head" || compareVersion(version, "24.2") >= 0 { + r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) + runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") + runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") + } //@TODO think about how to implements embedded backup for s3_plain disks //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") @@ -1992,13 +1997,12 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig st uploadCmd := fmt.Sprintf("%s_COMPRESSION_FORMAT=zstd CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/%s clickhouse-backup upload --resume %s", remoteStorageType, backupConfig, testBackupName) checkResumeAlreadyProcessed(uploadCmd, testBackupName, "upload", r, remoteStorageType) - //diffFrom := []string{"--diff-from", "--diff-from-remote"}[rand.Intn(2)] diffFrom := "--diff-from-remote" uploadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s upload %s %s %s --resume", backupConfig, incrementBackupName, diffFrom, testBackupName) checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) backupDir := "/var/lib/clickhouse/backup" - if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + if strings.HasPrefix(remoteStorageType, "EMBEDDED") && !strings.HasPrefix(remoteStorageType, "_URL") { backupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) } out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) From 8e8c319525ec89abd196f8abac86f9be8f810e72 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 24 Feb 2024 00:27:29 +0500 Subject: [PATCH 03/80] WIP, refactoring create, upload, download, restore commands which allow RBAC, Configs for Embedded Backup --- pkg/backup/backuper.go | 16 +- pkg/backup/create.go | 372 +++++++++++-------- pkg/backup/delete.go | 12 +- pkg/backup/restore.go | 17 +- pkg/backup/upload.go | 38 +- pkg/clickhouse/structs.go | 5 + test/integration/docker-compose.yml | 2 + test/integration/docker-compose_advanced.yml | 2 + test/integration/integration_test.go | 273 +++++++------- 9 files changed, 433 insertions(+), 304 deletions(-) diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index d30e7cdd..f0df9b1c 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -92,6 +92,9 @@ func (b *Backuper) init(ctx context.Context, disks []clickhouse.Disk, backupName b.EmbeddedBackupDataPath = disk.Path } } + if b.cfg.ClickHouse.UseEmbeddedBackupRestore && b.EmbeddedBackupDataPath == "" { + b.EmbeddedBackupDataPath = b.DefaultDataPath + } b.DiskToPathMap = diskMap if b.cfg.General.RemoteStorage != "none" && b.cfg.General.RemoteStorage != "custom" { b.dst, err = storage.NewBackupDestination(ctx, b.cfg, b.ch, true, backupName) @@ -240,15 +243,22 @@ func (b *Backuper) applyMacrosToObjectDiskPath(ctx context.Context) error { func (b *Backuper) buildEmbeddedLocationS3() string { url := url.URL{} url.Scheme = "https" + if strings.HasPrefix(b.cfg.S3.Endpoint, "http") { + newUrl, _ := url.Parse(b.cfg.S3.Endpoint) + url = *newUrl + url.Path = path.Join(b.cfg.S3.Bucket, b.cfg.S3.ObjectDiskPath) + } else { + url.Host = b.cfg.S3.Endpoint + url.Path = path.Join(b.cfg.S3.Bucket, b.cfg.S3.ObjectDiskPath) + } if b.cfg.S3.DisableSSL { url.Scheme = "http" } - url.Host = b.cfg.S3.Endpoint - if url.Host == "" && b.cfg.S3.Region != "" && !b.cfg.S3.ForcePathStyle { + if url.Host == "" && b.cfg.S3.Region != "" && b.cfg.S3.ForcePathStyle { url.Host = "s3." + b.cfg.S3.Region + ".amazonaws.com" url.Path = path.Join(b.cfg.S3.Bucket, b.cfg.S3.ObjectDiskPath) } - if url.Host == "" && b.cfg.S3.Bucket != "" && b.cfg.S3.ForcePathStyle { + if url.Host == "" && b.cfg.S3.Bucket != "" && !b.cfg.S3.ForcePathStyle { url.Host = b.cfg.S3.Bucket + "." + "s3." + b.cfg.S3.Region + ".amazonaws.com" url.Path = b.cfg.S3.ObjectDiskPath } diff --git a/pkg/backup/create.go b/pkg/backup/create.go index d1875845..dc4a8a86 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -65,7 +65,6 @@ func (b *Backuper) CreateBackup(backupName, tablePattern, embeddedBaseBackup str defer cancel() startBackup := time.Now() - doBackupData := !schemaOnly && !rbacOnly && !configsOnly if backupName == "" { backupName = NewBackupName() } @@ -111,6 +110,10 @@ func (b *Backuper) CreateBackup(backupName, tablePattern, embeddedBaseBackup str if err != nil { return err } + b.DefaultDataPath, err = b.ch.GetDefaultPath(disks) + if err != nil { + return err + } diskMap := make(map[string]string, len(disks)) diskTypes := make(map[string]string, len(disks)) @@ -119,11 +122,13 @@ func (b *Backuper) CreateBackup(backupName, tablePattern, embeddedBaseBackup str diskTypes[disk.Name] = disk.Type } partitionsIdMap, partitionsNameList := partition.ConvertPartitionsToIdsMapAndNamesList(ctx, b.ch, tables, nil, partitions) - // create + doBackupData := !schemaOnly && !rbacOnly && !configsOnly + backupRBACSize, backupConfigSize := b.createRBACAndConfigsIfNecessary(ctx, backupName, createRBAC, rbacOnly, createConfigs, configsOnly, disks, diskMap, log) + if b.cfg.ClickHouse.UseEmbeddedBackupRestore { - err = b.createBackupEmbedded(ctx, backupName, tablePattern, embeddedBaseBackup, partitionsNameList, partitionsIdMap, schemaOnly, createRBAC, createConfigs, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, log, startBackup, version) + err = b.createBackupEmbedded(ctx, backupName, doBackupData, schemaOnly, version, tablePattern, embeddedBaseBackup, partitionsNameList, partitionsIdMap, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, backupRBACSize, backupConfigSize, log, startBackup) } else { - err = b.createBackupLocal(ctx, backupName, partitionsIdMap, tables, doBackupData, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, version, disks, diskMap, diskTypes, allDatabases, allFunctions, log, startBackup) + err = b.createBackupLocal(ctx, backupName, doBackupData, schemaOnly, rbacOnly, configsOnly, version, partitionsIdMap, tables, disks, diskMap, diskTypes, allDatabases, allFunctions, backupRBACSize, backupConfigSize, log, startBackup) } if err != nil { // delete local backup if can't create @@ -146,18 +151,40 @@ func (b *Backuper) CreateBackup(backupName, tablePattern, embeddedBaseBackup str return nil } -func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, doBackupData bool, schemaOnly bool, createRBAC, rbacOnly bool, createConfigs, configsOnly bool, version string, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, log *apexLog.Entry, startBackup time.Time) error { +func (b *Backuper) createRBACAndConfigsIfNecessary(ctx context.Context, backupName string, createRBAC bool, rbacOnly bool, createConfigs bool, configsOnly bool, disks []clickhouse.Disk, diskMap map[string]string, log *apexLog.Entry) (uint64, uint64) { + backupRBACSize, backupConfigSize := uint64(0), uint64(0) + backupPath := path.Join(b.DefaultDataPath, "backup") + if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + backupPath = diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk] + } + backupPath = path.Join(backupPath, backupName) + if createRBAC || rbacOnly { + var createRBACErr error + if backupRBACSize, createRBACErr = b.createBackupRBAC(ctx, backupPath, disks); createRBACErr != nil { + log.Fatalf("error during do RBAC backup: %v", createRBACErr) + } else { + log.WithField("size", utils.FormatBytes(backupRBACSize)).Info("done createBackupRBAC") + } + } + if createConfigs || configsOnly { + var createConfigsErr error + if backupConfigSize, createConfigsErr = b.createBackupConfigs(ctx, backupPath); createConfigsErr != nil { + log.Fatalf("error during do CONFIG backup: %v", createConfigsErr) + } else { + log.WithField("size", utils.FormatBytes(backupConfigSize)).Info("done createBackupConfigs") + } + } + return backupRBACSize, backupConfigSize +} + +func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, doBackupData, schemaOnly, rbacOnly, configsOnly bool, backupVersion string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, backupRBACSize, backupConfigSize uint64, log *apexLog.Entry, startBackup time.Time) error { // Create backup dir on all clickhouse disks for _, disk := range disks { if err := filesystemhelper.Mkdir(path.Join(disk.Path, "backup"), b.ch, disks); err != nil { return err } } - defaultPath, err := b.ch.GetDefaultPath(disks) - if err != nil { - return err - } - backupPath := path.Join(defaultPath, "backup", backupName) + backupPath := path.Join(b.DefaultDataPath, "backup", backupName) if _, err := os.Stat(path.Join(backupPath, "metadata.json")); err == nil || !os.IsNotExist(err) { return fmt.Errorf("'%s' medatata.json already exists", backupName) } @@ -182,6 +209,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par } } if isObjectDiskContainsTables { + var err error if err = config.ValidateObjectDiskConfig(b.cfg); err != nil { return err } @@ -270,43 +298,30 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par if wgWaitErr := createBackupWorkingGroup.Wait(); wgWaitErr != nil { return fmt.Errorf("one of createBackupLocal go-routine return error: %v", wgWaitErr) } - backupRBACSize, backupConfigSize := uint64(0), uint64(0) - - if createRBAC || rbacOnly { - var createRBACErr error - if backupRBACSize, createRBACErr = b.createBackupRBAC(ctx, backupPath, disks); createRBACErr != nil { - log.Fatalf("error during do RBAC backup: %v", err) - } else { - log.WithField("size", utils.FormatBytes(backupRBACSize)).Info("done createBackupRBAC") - } - } - if createConfigs || configsOnly { - var createConfigsErr error - if backupConfigSize, createConfigsErr = b.createBackupConfigs(ctx, backupPath); createConfigsErr != nil { - log.Fatalf("error during do CONFIG backup: %v", createConfigsErr) - } else { - log.WithField("size", utils.FormatBytes(backupConfigSize)).Info("done createBackupConfigs") - } - } - backupMetaFile := path.Join(defaultPath, "backup", backupName, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, version, "regular", diskMap, diskTypes, disks, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions, log); err != nil { + backupMetaFile := path.Join(b.DefaultDataPath, "backup", backupName, "metadata.json") + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "regular", diskMap, diskTypes, disks, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions, log); err != nil { return fmt.Errorf("createBackupMetadata return error: %v", err) } log.WithField("duration", utils.HumanizeDuration(time.Since(startBackup))).Info("done") return nil } -func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern, baseBackup string, partitionsNameList map[metadata.TableTitle][]string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, schemaOnly, createRBAC, createConfigs bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, log *apexLog.Entry, startBackup time.Time, backupVersion string) error { +func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName string, doBackupData, schemaOnly bool, backupVersion, tablePattern, baseBackup string, partitionsNameList map[metadata.TableTitle][]string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, backupRBACSize, backupConfigSize uint64, log *apexLog.Entry, startBackup time.Time) error { // TODO: Implement sharded backup operations for embedded backups if doesShard(b.cfg.General.ShardedOperationMode) { return fmt.Errorf("cannot perform embedded backup: %w", errShardOperationUnsupported) } - if _, isBackupDiskExists := diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk]; !isBackupDiskExists && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { - return fmt.Errorf("backup disk `%s` not exists in system.disks", b.cfg.ClickHouse.EmbeddedBackupDisk) + backupPath := path.Join(b.DefaultDataPath, "backup") + if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + backupPath = diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk] } - if createRBAC || createConfigs { - return fmt.Errorf("`use_embedded_backup_restore: true` doesn't support --rbac, --configs parameters") + backupPath = path.Join(backupPath, backupName) + + backupMetadataSize := uint64(0) + backupDataSize := make([]clickhouse.BackupDataSize, 0) + if !schemaOnly && !doBackupData { + backupDataSize = append(backupDataSize, clickhouse.BackupDataSize{Size: 0}) } l := 0 for _, table := range tables { @@ -314,19 +329,129 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa l += 1 } } - if l == 0 { - return fmt.Errorf("`use_embedded_backup_restore: true` doesn't allow empty backups, check your parameter --tables=%v", tablePattern) + if l == 0 && (schemaOnly || doBackupData) { + return fmt.Errorf("`use_embedded_backup_restore: true` not found tables for backup, check your parameter --tables=%v", tablePattern) + } + tablesTitle := make([]metadata.TableTitle, l) + + if schemaOnly || doBackupData { + if _, isBackupDiskExists := diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk]; !isBackupDiskExists && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + return fmt.Errorf("backup disk `%s` not exists in system.disks", b.cfg.ClickHouse.EmbeddedBackupDisk) + } + if b.cfg.ClickHouse.EmbeddedBackupDisk == "" { + if err := config.ValidateObjectDiskConfig(b.cfg); err != nil { + return err + } + } + + tableSizeSQL, backupSQL, err := b.generateEmbeddedBackupSQL(ctx, backupName, schemaOnly, tables, tablesTitle, partitionsNameList, l, baseBackup) + if err != nil { + return err + } + backupResult := make([]clickhouse.SystemBackups, 0) + if err := b.ch.SelectContext(ctx, &backupResult, backupSQL); err != nil { + return fmt.Errorf("backup error: %v", err) + } + if len(backupResult) != 1 || (backupResult[0].Status != "BACKUP_COMPLETE" && backupResult[0].Status != "BACKUP_CREATED") { + return fmt.Errorf("backup return wrong results: %+v", backupResult) + } + + if schemaOnly { + backupDataSize = append(backupDataSize, clickhouse.BackupDataSize{Size: 0}) + } else { + if backupResult[0].CompressedSize == 0 { + chVersion, err := b.ch.GetVersion(ctx) + if err != nil { + return err + } + backupSizeSQL := fmt.Sprintf("SELECT sum(bytes_on_disk) AS backup_data_size FROM system.parts WHERE active AND concat(database,'.',table) IN (%s)", tableSizeSQL) + if chVersion >= 20005000 { + backupSizeSQL = fmt.Sprintf("SELECT sum(total_bytes) AS backup_data_size FROM system.tables WHERE concat(database,'.',name) IN (%s)", tableSizeSQL) + } + if err := b.ch.SelectContext(ctx, &backupDataSize, backupSizeSQL); err != nil { + return err + } + } else { + backupDataSize = append(backupDataSize, clickhouse.BackupDataSize{Size: backupResult[0].CompressedSize}) + } + } + + if doBackupData && b.cfg.ClickHouse.EmbeddedBackupDisk == "" { + var err error + if b.dst, err = storage.NewBackupDestination(ctx, b.cfg, b.ch, false, backupName); err != nil { + return err + } + if err = b.dst.Connect(ctx); err != nil { + return fmt.Errorf("createBackupEmbedded: can't connect to %s: %v", b.dst.Kind(), err) + } + defer func() { + if closeErr := b.dst.Close(ctx); closeErr != nil { + log.Warnf("createBackupEmbedded: can't close connection to %s: %v", b.dst.Kind(), closeErr) + } + }() + } + + for _, table := range tables { + select { + case <-ctx.Done(): + return ctx.Err() + default: + if table.Skip { + continue + } + var disksToPartsMap map[string][]metadata.Part + if doBackupData { + if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + log.Debugf("calculate parts list `%s`.`%s` from embedded backup disk `%s`") + disksToPartsMap, err = b.getPartsFromLocalEmbeddedBackupDisk(backupPath, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) + } else { + log.Debugf("calculate parts list `%s`.`%s` from embedded backup remote destination") + disksToPartsMap, err = b.getPartsFromRemoteEmbeddedBackup(ctx, backupName, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}], log) + } + } + if err != nil { + return err + } + if schemaOnly || doBackupData { + metadataSize, err := b.createTableMetadata(path.Join(backupPath, "metadata"), metadata.TableMetadata{ + Table: table.Name, + Database: table.Database, + Query: table.CreateTableQuery, + TotalBytes: table.TotalBytes, + Size: map[string]int64{b.cfg.ClickHouse.EmbeddedBackupDisk: 0}, + Parts: disksToPartsMap, + MetadataOnly: schemaOnly, + }, disks) + if err != nil { + return err + } + backupMetadataSize += metadataSize + } + } + } + } + backupMetaFile := path.Join(backupPath, "metadata.json") + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "embedded", diskMap, diskTypes, disks, backupDataSize[0].Size, backupMetadataSize, backupRBACSize, backupConfigSize, tablesTitle, allDatabases, allFunctions, log); err != nil { + return err } - tableMetas := make([]metadata.TableTitle, l) + + log.WithFields(apexLog.Fields{ + "operation": "create_embedded", + "duration": utils.HumanizeDuration(time.Since(startBackup)), + }).Info("done") + + return nil +} + +func (b *Backuper) generateEmbeddedBackupSQL(ctx context.Context, backupName string, schemaOnly bool, tables []clickhouse.Table, tablesTitle []metadata.TableTitle, partitionsNameList map[metadata.TableTitle][]string, tablesListLen int, baseBackup string) (string, string, error) { tablesSQL := "" tableSizeSQL := "" i := 0 - backupMetadataSize := uint64(0) for _, table := range tables { if table.Skip { continue } - tableMetas[i] = metadata.TableTitle{ + tablesTitle[i] = metadata.TableTitle{ Database: table.Database, Table: table.Name, } @@ -345,17 +470,17 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa } tablesSQL += fmt.Sprintf(" PARTITIONS %s", partitionsSQL[:len(partitionsSQL)-1]) } - if i < l { + if i < tablesListLen { tablesSQL += ", " tableSizeSQL += ", " } } - embeddedBackupDestination, err := b.getEmbeddedBackupLocation(ctx, backupName) + embeddedBackupLocation, err := b.getEmbeddedBackupLocation(ctx, backupName) if err != nil { - return err + return "", "", err } - backupSQL := fmt.Sprintf("BACKUP %s TO %s", tablesSQL, embeddedBackupDestination) - backupSettings := []string{"show_table_uuid_in_table_create_query_if_not_nil=1"} + backupSQL := fmt.Sprintf("BACKUP %s TO %s", tablesSQL, embeddedBackupLocation) + var backupSettings []string if schemaOnly { backupSettings = append(backupSettings, "structure_only=1") } @@ -368,128 +493,73 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa if len(backupSettings) > 0 { backupSQL += " SETTINGS " + strings.Join(backupSettings, ", ") } - backupResult := make([]clickhouse.SystemBackups, 0) - if err := b.ch.SelectContext(ctx, &backupResult, backupSQL); err != nil { - return fmt.Errorf("backup error: %v", err) - } - if len(backupResult) != 1 || (backupResult[0].Status != "BACKUP_COMPLETE" && backupResult[0].Status != "BACKUP_CREATED") { - return fmt.Errorf("backup return wrong results: %+v", backupResult) - } - backupDataSize := make([]struct { - Size uint64 `ch:"backup_data_size"` - }, 0) - if !schemaOnly { - if backupResult[0].CompressedSize == 0 { - chVersion, err := b.ch.GetVersion(ctx) - if err != nil { - return err - } - backupSizeSQL := fmt.Sprintf("SELECT sum(bytes_on_disk) AS backup_data_size FROM system.parts WHERE active AND concat(database,'.',table) IN (%s)", tableSizeSQL) - if chVersion >= 20005000 { - backupSizeSQL = fmt.Sprintf("SELECT sum(total_bytes) AS backup_data_size FROM system.tables WHERE concat(database,'.',name) IN (%s)", tableSizeSQL) - } - if err := b.ch.SelectContext(ctx, &backupDataSize, backupSizeSQL); err != nil { - return err - } - } else { - backupDataSize = append(backupDataSize, struct { - Size uint64 `ch:"backup_data_size"` - }{Size: backupResult[0].CompressedSize}) - } - } else { - backupDataSize = append(backupDataSize, struct { - Size uint64 `ch:"backup_data_size"` - }{Size: 0}) - } + return tableSizeSQL, backupSQL, nil +} - log.Debug("calculate parts list from embedded backup disk") - backupPath := b.DefaultDataPath - if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { - backupPath = diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk] - } - backupPath = path.Join(backupPath, backupName) - for _, table := range tables { - select { - case <-ctx.Done(): - return ctx.Err() - default: - if table.Skip { - continue - } - var disksToPartsMap map[string][]metadata.Part - if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { - disksToPartsMap, err = b.getPartsFromLocalEmbeddedBackupDisk(backupPath, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) - } else { - disksToPartsMap, err = b.getPartsFromRemoteEmbeddedBackup(backupPath, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) - } - if err != nil { - return err - } - metadataSize, err := b.createTableMetadata(path.Join(backupPath, "metadata"), metadata.TableMetadata{ - Table: table.Name, - Database: table.Database, - Query: table.CreateTableQuery, - TotalBytes: table.TotalBytes, - Size: map[string]int64{b.cfg.ClickHouse.EmbeddedBackupDisk: 0}, - Parts: disksToPartsMap, - MetadataOnly: schemaOnly, - }, disks) - if err != nil { - return err - } - backupMetadataSize += metadataSize - } +func (b *Backuper) getPartsFromRemoteEmbeddedBackup(ctx context.Context, backupName string, table clickhouse.Table, partitionsIdsMap common.EmptyMap, log *apexLog.Entry) (map[string][]metadata.Part, error) { + dirListStr := make([]string, 0) + remoteEmbeddedBackupPath := "" + if b.cfg.General.RemoteStorage == "s3" { + remoteEmbeddedBackupPath = b.cfg.S3.ObjectDiskPath + } else if b.cfg.General.RemoteStorage == "gcs" { + remoteEmbeddedBackupPath = b.cfg.GCS.ObjectDiskPath + } else if b.cfg.General.RemoteStorage == "azblob" { + remoteEmbeddedBackupPath = b.cfg.AzureBlob.ObjectDiskPath + } else { + return nil, fmt.Errorf("getPartsFromRemoteEmbeddedBackup: unsupported remote_storage: %s", b.cfg.General.RemoteStorage) } - backupMetaFile := path.Join(backupPath, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "embedded", diskMap, diskTypes, disks, backupDataSize[0].Size, backupMetadataSize, 0, 0, tableMetas, allDatabases, allFunctions, log); err != nil { - return err + remoteEmbeddedBackupPath = path.Join(remoteEmbeddedBackupPath, backupName, "data", common.TablePathEncode(table.Database), common.TablePathEncode(table.Name)) + if walkErr := b.dst.WalkAbsolute(ctx, remoteEmbeddedBackupPath, false, func(ctx context.Context, fInfo storage.RemoteFile) error { + dirListStr = append(dirListStr, fInfo.Name()) + return nil + }); walkErr != nil { + return nil, walkErr } - - log.WithFields(apexLog.Fields{ - "operation": "create_embedded", - "duration": utils.HumanizeDuration(time.Since(startBackup)), - }).Info("done") - - return nil -} - -func (b *Backuper) getPartsFromRemoteEmbeddedBackup(backupPath string, table clickhouse.Table, partitionsIdsMap common.EmptyMap) (map[string][]metadata.Part, error) { - parts := map[string][]metadata.Part{} - return parts, nil + log.Debugf("getPartsFromRemoteEmbeddedBackup from %s found %d parts", remoteEmbeddedBackupPath, len(dirListStr)) + return b.fillEmbeddedPartsFromDirList(partitionsIdsMap, dirListStr, "default") } func (b *Backuper) getPartsFromLocalEmbeddedBackupDisk(backupPath string, table clickhouse.Table, partitionsIdsMap common.EmptyMap) (map[string][]metadata.Part, error) { - parts := map[string][]metadata.Part{} dirList, err := os.ReadDir(path.Join(backupPath, "data", common.TablePathEncode(table.Database), common.TablePathEncode(table.Name))) if err != nil { if os.IsNotExist(err) { - return parts, nil + return map[string][]metadata.Part{}, nil } return nil, err } + dirListStr := make([]string, len(dirList)) + for i, d := range dirList { + dirListStr[i] = d.Name() + } + return b.fillEmbeddedPartsFromDirList(partitionsIdsMap, dirListStr, b.cfg.ClickHouse.EmbeddedBackupDisk) +} + +func (b *Backuper) fillEmbeddedPartsFromDirList(partitionsIdsMap common.EmptyMap, dirList []string, diskName string) (map[string][]metadata.Part, error) { + parts := map[string][]metadata.Part{} if len(partitionsIdsMap) == 0 { - parts[b.cfg.ClickHouse.EmbeddedBackupDisk] = make([]metadata.Part, len(dirList)) - for i, d := range dirList { - parts[b.cfg.ClickHouse.EmbeddedBackupDisk][i] = metadata.Part{ - Name: d.Name(), + parts[diskName] = make([]metadata.Part, len(dirList)) + for i, dirName := range dirList { + parts[diskName][i] = metadata.Part{ + Name: dirName, } } - } else { - parts[b.cfg.ClickHouse.EmbeddedBackupDisk] = make([]metadata.Part, 0) - for _, d := range dirList { - found := false - for prefix := range partitionsIdsMap { - if strings.HasPrefix(d.Name(), prefix+"_") { - found = true - break - } - } - if found { - parts[b.cfg.ClickHouse.EmbeddedBackupDisk] = append(parts[b.cfg.ClickHouse.EmbeddedBackupDisk], metadata.Part{ - Name: d.Name(), - }) + return parts, nil + } + + parts[diskName] = make([]metadata.Part, 0) + for _, dirName := range dirList { + found := false + for prefix := range partitionsIdsMap { + if strings.HasPrefix(dirName, prefix+"_") { + found = true + break } } + if found { + parts[diskName] = append(parts[b.cfg.ClickHouse.EmbeddedBackupDisk], metadata.Part{ + Name: dirName, + }) + } } return parts, nil } @@ -775,6 +845,7 @@ func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, bac if err := filesystemhelper.Chown(backupMetaFile, b.ch, disks, false); err != nil { log.Warnf("can't chown %s: %v", backupMetaFile, err) } + b.log.Debugf("%s created", backupMetaFile) return nil } } @@ -798,5 +869,6 @@ func (b *Backuper) createTableMetadata(metadataPath string, table metadata.Table if err := filesystemhelper.Chown(metadataFile, b.ch, disks, false); err != nil { return 0, err } + b.log.Debugf("%s created", metadataFile) return uint64(len(metadataBody)), nil } diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index be224cc6..c907906d 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -177,17 +177,17 @@ func (b *Backuper) cleanEmbeddedAndObjectDiskLocalIfSameRemoteNotPresent(ctx con if err != nil { return err } - if !skip && strings.Contains(backup.Tags, "embedded") { - if err = b.cleanLocalEmbedded(ctx, backup, disks); err != nil { - log.Warnf("b.cleanLocalEmbedded return error: %v", err) + if !skip && (hasObjectDisks || (strings.Contains(backup.Tags, "embedded") && b.cfg.ClickHouse.EmbeddedBackupDisk == "")) { + if err = b.cleanBackupObjectDisks(ctx, backupName); err != nil { + log.Warnf("b.cleanBackupObjectDisks return error: %v", err) return err } } - if !skip && hasObjectDisks { - if err = b.cleanBackupObjectDisks(ctx, backupName); err != nil { + if !skip && strings.Contains(backup.Tags, "embedded") { + if err = b.cleanLocalEmbedded(ctx, backup, disks); err != nil { + log.Warnf("b.cleanLocalEmbedded return error: %v", err) return err } - log.Debugf("b.cleanBackupObjectDisks return skip=%v", err) } return nil } diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 6c31d584..b42f4ce8 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -1059,17 +1059,24 @@ func (b *Backuper) restoreEmbedded(ctx context.Context, backupName string, resto } } } - settings := "" + var settings []string if restoreOnlySchema { - settings = "SETTINGS structure_only=1" + settings = append(settings, "structure_only=1") } - embeddedBackupDestination, err := b.getEmbeddedBackupLocation(ctx, backupName) + if b.cfg.ClickHouse.EmbeddedRestoreThreads > 0 { + settings = append(settings, fmt.Sprintf("restore_threads=%d", b.cfg.ClickHouse.EmbeddedRestoreThreads)) + } + embeddedBackupLocation, err := b.getEmbeddedBackupLocation(ctx, backupName) if err != nil { return err } - restoreSQL := fmt.Sprintf("RESTORE %s FROM %s %s", tablesSQL, embeddedBackupDestination, settings) + settingsStr := "" + if len(settings) > 0 { + settingsStr = "SETTINGS " + strings.Join(settings, ", ") + } + restoreSQL := fmt.Sprintf("RESTORE %s FROM %s %s", tablesSQL, embeddedBackupLocation, settingsStr) restoreResults := make([]clickhouse.SystemBackups, 0) - if err := b.ch.SelectContext(ctx, &restoreResults, restoreSQL, b.cfg.ClickHouse.EmbeddedBackupDisk, backupName); err != nil { + if err := b.ch.SelectContext(ctx, &restoreResults, restoreSQL); err != nil { return fmt.Errorf("restore error: %v", err) } if len(restoreResults) == 0 || restoreResults[0].Status != "RESTORED" { diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index bf787c57..414d70a5 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -147,7 +147,8 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str idx := i uploadGroup.Go(func() error { var uploadedBytes int64 - if !schemaOnly { + //skip upload data for embedded backup with empty embedded_backup_disk + if !schemaOnly && (!b.isEmbedded || b.cfg.ClickHouse.EmbeddedBackupDisk != "") { var files map[string][]string var err error files, uploadedBytes, err = b.uploadTableData(uploadCtx, backupName, tablesForUpload[idx]) @@ -174,16 +175,14 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str return fmt.Errorf("one of upload table go-routine return error: %v", err) } - if !b.isEmbedded { - // upload rbac for backup - if backupMetadata.RBACSize, err = b.uploadRBACData(ctx, backupName); err != nil { - return fmt.Errorf("b.uploadRBACData return error: %v", err) - } + // upload rbac for backup + if backupMetadata.RBACSize, err = b.uploadRBACData(ctx, backupName); err != nil { + return fmt.Errorf("b.uploadRBACData return error: %v", err) + } - // upload configs for backup - if backupMetadata.ConfigSize, err = b.uploadConfigData(ctx, backupName); err != nil { - return fmt.Errorf("b.uploadConfigData return error: %v", err) - } + // upload configs for backup + if backupMetadata.ConfigSize, err = b.uploadConfigData(ctx, backupName); err != nil { + return fmt.Errorf("b.uploadConfigData return error: %v", err) } // upload metadata for backup @@ -312,7 +311,7 @@ func (b *Backuper) uploadSingleBackupFile(ctx context.Context, localFile, remote func (b *Backuper) prepareTableListToUpload(ctx context.Context, backupName string, tablePattern string, partitions []string) (tablesForUpload ListOfTables, err error) { metadataPath := path.Join(b.DefaultDataPath, "backup", backupName, "metadata") - if b.isEmbedded { + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { metadataPath = path.Join(b.EmbeddedBackupDataPath, backupName, "metadata") } tablesForUpload, _, err = b.getTableListByPatternLocal(ctx, metadataPath, tablePattern, false, partitions) @@ -454,7 +453,12 @@ func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, } func (b *Backuper) uploadConfigData(ctx context.Context, backupName string) (uint64, error) { - configBackupPath := path.Join(b.DefaultDataPath, "backup", backupName, "configs") + backupPath := b.DefaultDataPath + configBackupPath := path.Join(backupPath, "backup", backupName, "configs") + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + backupPath = b.EmbeddedBackupDataPath + configBackupPath = path.Join(backupPath, backupName, "configs") + } configFilesGlobPattern := path.Join(configBackupPath, "**/*.*") if b.cfg.GetCompressionFormat() == "none" { remoteConfigsDir := path.Join(backupName, "configs") @@ -465,7 +469,12 @@ func (b *Backuper) uploadConfigData(ctx context.Context, backupName string) (uin } func (b *Backuper) uploadRBACData(ctx context.Context, backupName string) (uint64, error) { - rbacBackupPath := path.Join(b.DefaultDataPath, "backup", backupName, "access") + backupPath := b.DefaultDataPath + rbacBackupPath := path.Join(backupPath, "backup", backupName, "access") + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + backupPath = b.EmbeddedBackupDataPath + rbacBackupPath = path.Join(backupPath, backupName, "access") + } accessFilesGlobPattern := path.Join(rbacBackupPath, "*.*") if b.cfg.GetCompressionFormat() == "none" { remoteRBACDir := path.Join(backupName, "access") @@ -678,6 +687,9 @@ func (b *Backuper) uploadTableMetadataRegular(ctx context.Context, backupName st } func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName string, tableMetadata metadata.TableMetadata) (int64, error) { + if b.cfg.ClickHouse.EmbeddedBackupDisk == "" { + return 0, nil + } remoteTableMetaFile := path.Join(backupName, "metadata", common.TablePathEncode(tableMetadata.Database), fmt.Sprintf("%s.sql", common.TablePathEncode(tableMetadata.Table))) if b.resume { if isProcessed, processedSize := b.resumableState.IsAlreadyProcessed(remoteTableMetaFile); isProcessed { diff --git a/pkg/clickhouse/structs.go b/pkg/clickhouse/structs.go index 7dd7b679..49d306eb 100644 --- a/pkg/clickhouse/structs.go +++ b/pkg/clickhouse/structs.go @@ -87,3 +87,8 @@ type ColumnDataTypes struct { Column string `ch:"column"` Types []string `ch:"uniq_types"` } + +// BackupDataSize - info from system.parts or system.tables when embedded BACKUP statement return zero size +type BackupDataSize struct { + Size uint64 `ch:"backup_data_size"` +} diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index ee0cd73b..95470526 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -185,6 +185,7 @@ services: - ./credentials.json:/etc/clickhouse-backup/credentials.json - ./config-azblob.yml:/etc/clickhouse-backup/config-azblob.yml - ./config-azblob-embedded.yml:/etc/clickhouse-backup/config-azblob-embedded.yml + - ./config-azblob-embedded-url.yml:/etc/clickhouse-backup/config-azblob-embedded-url.yml - ./config-custom-kopia.yml:/etc/clickhouse-backup/config-custom-kopia.yml - ./config-custom-restic.yml:/etc/clickhouse-backup/config-custom-restic.yml - ./config-custom-rsync.yml:/etc/clickhouse-backup/config-custom-rsync.yml @@ -195,6 +196,7 @@ services: - ./config-gcs-custom-endpoint.yml:/etc/clickhouse-backup/config-gcs-custom-endpoint.yml - ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml - ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml + - ./config-s3-embedded-url.yml:/etc/clickhouse-backup/config-s3-embedded-url.yml - ./config-s3-fips.yml:/etc/clickhouse-backup/config-s3-fips.yml.template - ./config-s3-nodelete.yml:/etc/clickhouse-backup/config-s3-nodelete.yml - ./config-s3-plain-embedded.yml:/etc/clickhouse-backup/config-s3-plain-embedded.yml diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 53671bcf..94b18fbe 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -243,6 +243,7 @@ services: - ./credentials.json:/etc/clickhouse-backup/credentials.json - ./config-azblob.yml:/etc/clickhouse-backup/config-azblob.yml - ./config-azblob-embedded.yml:/etc/clickhouse-backup/config-azblob-embedded.yml + - ./config-azblob-embedded-url.yml:/etc/clickhouse-backup/config-azblob-embedded-url.yml - ./config-custom-kopia.yml:/etc/clickhouse-backup/config-custom-kopia.yml - ./config-custom-restic.yml:/etc/clickhouse-backup/config-custom-restic.yml - ./config-custom-rsync.yml:/etc/clickhouse-backup/config-custom-rsync.yml @@ -253,6 +254,7 @@ services: - ./config-gcs-custom-endpoint.yml:/etc/clickhouse-backup/config-gcs-custom-endpoint.yml - ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml - ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml + - ./config-s3-embedded-url.yml:/etc/clickhouse-backup/config-s3-embedded-url.yml - ./config-s3-fips.yml:/etc/clickhouse-backup/config-s3-fips.yml.template - ./config-s3-nodelete.yml:/etc/clickhouse-backup/config-s3-nodelete.yml - ./config-s3-plain-embedded.yml:/etc/clickhouse-backup/config-s3-plain-embedded.yml diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index da6e3bbd..9bde954f 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -447,160 +447,179 @@ func TestS3NoDeletePermission(t *testing.T) { checkObjectStorageIsEmpty(t, r, "S3") } -// TestDoRestoreRBAC need clickhouse-server restart, no parallel -func TestDoRestoreRBAC(t *testing.T) { - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.4") == -1 { +// TestRBAC need clickhouse-server restart, no parallel +func TestRBAC(t *testing.T) { + chVersion := os.Getenv("CLICKHOUSE_VERSION") + if compareVersion(chVersion, "20.4") < 0 { t.Skipf("Test skipped, RBAC not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) } ch := &TestClickHouse{} r := require.New(t) + testRBACScenario := func(config string) { + ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") + ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") + + ch.queryWithNoError(r, "DROP SETTINGS PROFILE IF EXISTS test_rbac") + ch.queryWithNoError(r, "DROP QUOTA IF EXISTS test_rbac") + ch.queryWithNoError(r, "DROP ROW POLICY IF EXISTS test_rbac ON default.test_rbac") + ch.queryWithNoError(r, "DROP ROLE IF EXISTS test_rbac") + ch.queryWithNoError(r, "DROP USER IF EXISTS test_rbac") + + log.Info("create RBAC related objects") + ch.queryWithNoError(r, "CREATE SETTINGS PROFILE test_rbac SETTINGS max_execution_time=60") + ch.queryWithNoError(r, "CREATE ROLE test_rbac SETTINGS PROFILE 'test_rbac'") + ch.queryWithNoError(r, "CREATE USER test_rbac IDENTIFIED BY 'test_rbac' DEFAULT ROLE test_rbac") + ch.queryWithNoError(r, "CREATE QUOTA test_rbac KEYED BY user_name FOR INTERVAL 1 hour NO LIMITS TO test_rbac") + ch.queryWithNoError(r, "CREATE ROW POLICY test_rbac ON default.test_rbac USING 1=1 AS RESTRICTIVE TO test_rbac") + + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "create", "--rbac", "--rbac-only", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup upload test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + + log.Info("drop all RBAC related objects after backup") + ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") + ch.queryWithNoError(r, "DROP QUOTA test_rbac") + ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") + ch.queryWithNoError(r, "DROP ROLE test_rbac") + ch.queryWithNoError(r, "DROP USER test_rbac") + + log.Info("download+restore RBAC") + r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup download test_rbac_backup")) + + out, err := dockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac test_rbac_backup") + r.Contains(out, "RBAC successfully restored") + r.NoError(err) - ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) - - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") - ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") - - ch.queryWithNoError(r, "DROP SETTINGS PROFILE IF EXISTS test_rbac") - ch.queryWithNoError(r, "DROP QUOTA IF EXISTS test_rbac") - ch.queryWithNoError(r, "DROP ROW POLICY IF EXISTS test_rbac ON default.test_rbac") - ch.queryWithNoError(r, "DROP ROLE IF EXISTS test_rbac") - ch.queryWithNoError(r, "DROP USER IF EXISTS test_rbac") - - log.Info("create RBAC related objects") - ch.queryWithNoError(r, "CREATE SETTINGS PROFILE test_rbac SETTINGS max_execution_time=60") - ch.queryWithNoError(r, "CREATE ROLE test_rbac SETTINGS PROFILE 'test_rbac'") - ch.queryWithNoError(r, "CREATE USER test_rbac IDENTIFIED BY 'test_rbac' DEFAULT ROLE test_rbac") - ch.queryWithNoError(r, "CREATE QUOTA test_rbac KEYED BY user_name FOR INTERVAL 1 hour NO LIMITS TO test_rbac") - ch.queryWithNoError(r, "CREATE ROW POLICY test_rbac ON default.test_rbac USING 1=1 AS RESTRICTIVE TO test_rbac") - - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--rbac", "--rbac-only", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup upload test_rbac_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - - log.Info("drop all RBAC related objects after backup") - ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") - ch.queryWithNoError(r, "DROP QUOTA test_rbac") - ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") - ch.queryWithNoError(r, "DROP ROLE test_rbac") - ch.queryWithNoError(r, "DROP USER test_rbac") - - log.Info("download+restore RBAC") - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup download test_rbac_backup")) - - out, err := dockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore --rm --rbac test_rbac_backup") - r.Contains(out, "RBAC successfully restored") - r.NoError(err) - - out, err = dockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore --rm --rbac-only test_rbac_backup") - r.Contains(out, "RBAC successfully restored") - r.NoError(err) - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + out, err = dockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac-only test_rbac_backup") + r.Contains(out, "RBAC successfully restored") + r.NoError(err) + r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - ch.chbackend.Close() - // r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "clickhouse")) - ch.connectWithWait(r, 2*time.Second, 2*time.Second, 8*time.Second) + ch.chbackend.Close() + // r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "clickhouse")) + ch.connectWithWait(r, 2*time.Second, 2*time.Second, 8*time.Second) - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - rbacTypes := map[string]string{ - "PROFILES": "test_rbac", - "QUOTAS": "test_rbac", - "POLICIES": "test_rbac ON default.test_rbac", - "ROLES": "test_rbac", - "USERS": "test_rbac", - } - for rbacType, expectedValue := range rbacTypes { - var rbacRows []struct { - Name string `ch:"name"` + rbacTypes := map[string]string{ + "PROFILES": "test_rbac", + "QUOTAS": "test_rbac", + "POLICIES": "test_rbac ON default.test_rbac", + "ROLES": "test_rbac", + "USERS": "test_rbac", } - err := ch.chbackend.Select(&rbacRows, fmt.Sprintf("SHOW %s", rbacType)) - r.NoError(err) - found := false - for _, row := range rbacRows { - if expectedValue == row.Name { - found = true - break + for rbacType, expectedValue := range rbacTypes { + var rbacRows []struct { + Name string `ch:"name"` + } + err := ch.chbackend.Select(&rbacRows, fmt.Sprintf("SHOW %s", rbacType)) + r.NoError(err) + found := false + for _, row := range rbacRows { + if expectedValue == row.Name { + found = true + break + } + } + if !found { + //r.NoError(dockerExec("clickhouse", "cat", "/var/log/clickhouse-server/clickhouse-server.log")) + r.Failf("wrong RBAC", "SHOW %s, %#v doesn't contain %#v", rbacType, rbacRows, expectedValue) } } - if !found { - //r.NoError(dockerExec("clickhouse", "cat", "/var/log/clickhouse-server/clickhouse-server.log")) - r.Failf("wrong RBAC", "SHOW %s, %#v doesn't contain %#v", rbacType, rbacRows, expectedValue) - } + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "remote", "test_rbac_backup")) + + ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") + ch.queryWithNoError(r, "DROP QUOTA test_rbac") + ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") + ch.queryWithNoError(r, "DROP ROLE test_rbac") + ch.queryWithNoError(r, "DROP USER test_rbac") + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") + ch.chbackend.Close() + } + testRBACScenario("/etc/clickhouse-backup/config-s3.yml") + if chVersion == "head" || compareVersion(chVersion, "24.1") >= 0 { + testRBACScenario("/etc/clickhouse-backup/config-s3-embedded.yml") + testRBACScenario("/etc/clickhouse-backup/config-s3-embedded-url.yml") + testRBACScenario("/etc/clickhouse-backup/config-azblob-embedded.yml") + } + if chVersion == "head" || compareVersion(chVersion, "24.2") >= 0 { + testRBACScenario("/etc/clickhouse-backup/config-azblob-embedded-url.yml") } - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_rbac_backup")) - - ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") - ch.queryWithNoError(r, "DROP QUOTA test_rbac") - ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") - ch.queryWithNoError(r, "DROP ROLE test_rbac") - ch.queryWithNoError(r, "DROP USER test_rbac") - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") - ch.chbackend.Close() - } -// TestDoRestoreConfigs - require direct access to `/etc/clickhouse-backup/`, so executed inside `clickhouse` container +// TestConfigs - require direct access to `/etc/clickhouse-backup/`, so executed inside `clickhouse` container // need clickhouse-server restart, no parallel -func TestDoRestoreConfigs(t *testing.T) { - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "1.1.54391") < 0 { - t.Skipf("Test skipped, users.d is not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) - } +func TestConfigs(t *testing.T) { ch := &TestClickHouse{} r := require.New(t) - ch.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 1*time.Second) - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") - ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") + testConfigsScenario := func(config string) { + ch.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 1*time.Second) + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") + ch.queryWithNoError(r, "CREATE TABLE default.test_configs (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") - r.NoError(dockerExec("clickhouse", "bash", "-ce", "echo '1' > /etc/clickhouse-server/users.d/test_config.xml")) + r.NoError(dockerExec("clickhouse", "bash", "-ce", "echo '1' > /etc/clickhouse-server/users.d/test_config.xml")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--configs", "--configs-only", "test_configs_backup")) - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") - r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml S3_COMPRESSION_FORMAT=none ALLOW_EMPTY_BACKUPS=1 clickhouse-backup upload test_configs_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_configs_backup")) + r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", config, "create", "--configs", "--configs-only", "test_configs_backup")) + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") + r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" S3_COMPRESSION_FORMAT=none ALLOW_EMPTY_BACKUPS=1 clickhouse-backup upload test_configs_backup")) + r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup")) - ch.queryWithNoError(r, "SYSTEM RELOAD CONFIG") - ch.chbackend.Close() - ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) - selectEmptyResultForAggQuery := "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'" - var settings string - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) - if settings != "1" { - r.NoError(dockerExec("clickhouse", "grep", "empty_result_for_aggregation_by_empty_set", "-r", "/var/lib/clickhouse/preprocessed_configs/")) - } - r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") + ch.queryWithNoError(r, "SYSTEM RELOAD CONFIG") + ch.chbackend.Close() + ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + selectEmptyResultForAggQuery := "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'" + var settings string + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) + if settings != "1" { + r.NoError(dockerExec("clickhouse", "grep", "empty_result_for_aggregation_by_empty_set", "-r", "/var/lib/clickhouse/preprocessed_configs/")) + } + r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) - r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_configs_backup")) + r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) + r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_configs_backup")) - r.NoError(ch.chbackend.Query("SYSTEM RELOAD CONFIG")) - ch.chbackend.Close() - ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + r.NoError(ch.chbackend.Query("SYSTEM RELOAD CONFIG")) + ch.chbackend.Close() + ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) - settings = "" - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) - r.Equal("0", settings, "expect empty_result_for_aggregation_by_empty_set=0") + settings = "" + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) + r.Equal("0", settings, "expect empty_result_for_aggregation_by_empty_set=0") - r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml CLICKHOUSE_RESTART_COMMAND='sql:SYSTEM RELOAD CONFIG' clickhouse-backup restore --rm --configs --configs-only test_configs_backup")) + r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" CLICKHOUSE_RESTART_COMMAND='sql:SYSTEM RELOAD CONFIG' clickhouse-backup restore --rm --configs --configs-only test_configs_backup")) - ch.chbackend.Close() - ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + ch.chbackend.Close() + ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) - settings = "" - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) - r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") + settings = "" + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) + r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") - isTestConfigsTablePresent := 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&isTestConfigsTablePresent, "SELECT count() FROM system.tables WHERE database='default' AND name='test_configs' SETTINGS empty_result_for_aggregation_by_empty_set=1")) - r.Equal(0, isTestConfigsTablePresent, "expect default.test_configs is not present") + isTestConfigsTablePresent := 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&isTestConfigsTablePresent, "SELECT count() FROM system.tables WHERE database='default' AND name='test_configs' SETTINGS empty_result_for_aggregation_by_empty_set=1")) + r.Equal(0, isTestConfigsTablePresent, "expect default.test_configs is not present") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_configs_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_configs_backup")) - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) + r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup")) + r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "remote", "test_configs_backup")) + r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) - ch.chbackend.Close() + ch.chbackend.Close() + } + testConfigsScenario("/etc/clickhouse-backup/config-s3.yml") + chVersion := os.Getenv("CLICKHOUSE_VERSION") + if chVersion == "head" || compareVersion(chVersion, "24.1") >= 0 { + testConfigsScenario("/etc/clickhouse-backup/config-s3-embedded.yml") + testConfigsScenario("/etc/clickhouse-backup/config-s3-embedded-url.yml") + testConfigsScenario("/etc/clickhouse-backup/config-azblob-embedded.yml") + } + if chVersion == "head" || compareVersion(chVersion, "24.2") >= 0 { + testConfigsScenario("/etc/clickhouse-backup/config-azblob-embedded-url.yml") + } } // TestLongListRemote - no parallel, cause need to restart minito @@ -1802,14 +1821,14 @@ func TestIntegrationEmbedded(t *testing.T) { r := require.New(t) //CUSTOM backup create folder in each disk r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) - runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") + runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") //@TODO uncomment when resolve slow azure BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/52088, https://github.com/Azure/Azurite/issues/2053 if version == "head" || compareVersion(version, "24.2") >= 0 { r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) - runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") + runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") } //@TODO think about how to implements embedded backup for s3_plain disks //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) From b7c84da780475f4af116c6c2a28fa5afc6e73d4c Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 27 Feb 2024 00:25:59 +0500 Subject: [PATCH 04/80] BACKUP/RESTORE FROM/TO S3() pass test, but BACKUP/RESTORE FROM/TO AzureBlobStorage(), still not work, look https://github.com/Azure/Azurite/issues/2053, https://github.com/ClickHouse/ClickHouse/issues/52088 --- pkg/backup/backuper.go | 14 +- pkg/backup/create.go | 4 +- pkg/backup/delete.go | 65 +++---- pkg/backup/download.go | 24 ++- pkg/backup/list.go | 6 + pkg/backup/restore.go | 181 +++++++++++++----- pkg/backup/upload.go | 4 +- pkg/clickhouse/clickhouse.go | 5 +- pkg/storage/azblob.go | 16 +- pkg/storage/cos.go | 12 +- pkg/storage/ftp.go | 19 +- pkg/storage/gcs.go | 11 +- pkg/storage/s3.go | 13 +- pkg/storage/sftp.go | 21 +- pkg/storage/structs.go | 2 + test/integration/config-database-mapping.yml | 4 +- test/integration/config-s3-embedded-url.yml | 4 +- test/integration/config-s3-embedded.yml | 4 +- test/integration/config-s3-plain-embedded.yml | 4 +- test/integration/config-s3.yml | 4 +- test/integration/docker-compose.yml | 17 +- test/integration/docker-compose_advanced.yml | 17 +- test/integration/dynamic_settings.sh | 18 +- test/integration/integration_test.go | 46 ++++- test/integration/kopia/init.sh | 6 +- test/integration/restic/init.sh | 6 +- .../configs/backup/config.yml | 4 +- .../configs/backup/config.yml.origin | 4 +- .../docker-compose/docker-compose.yml | 4 +- .../clickhouse_backup/tests/cloud_storage.py | 5 +- 30 files changed, 364 insertions(+), 180 deletions(-) diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index f0df9b1c..715f16fd 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -73,7 +73,7 @@ func WithBackupSharder(s backupSharder) BackuperOpt { } } -func (b *Backuper) init(ctx context.Context, disks []clickhouse.Disk, backupName string) error { +func (b *Backuper) initDisksPathdsAndBackupDestination(ctx context.Context, disks []clickhouse.Disk, backupName string) error { var err error if disks == nil { disks, err = b.ch.GetDisks(ctx, true) @@ -293,3 +293,15 @@ func (b *Backuper) buildEmbeddedLocationAZBLOB() string { url.Path = b.cfg.AzureBlob.AccountName return fmt.Sprintf("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s;BlobEndpoint=%s;", b.cfg.AzureBlob.EndpointSchema, b.cfg.AzureBlob.AccountName, b.cfg.AzureBlob.AccountKey, url.String()) } + +func (b *Backuper) getObjectDiskPath() (string, error) { + if b.cfg.General.RemoteStorage == "s3" { + return b.cfg.S3.ObjectDiskPath, nil + } else if b.cfg.General.RemoteStorage == "azblob" { + return b.cfg.AzureBlob.ObjectDiskPath, nil + } else if b.cfg.General.RemoteStorage == "gcs" { + return b.cfg.GCS.ObjectDiskPath, nil + } else { + return "", fmt.Errorf("cleanBackupObjectDisks: requesst object disks path but have unsupported remote_storage: %s", b.cfg.General.RemoteStorage) + } +} diff --git a/pkg/backup/create.go b/pkg/backup/create.go index dc4a8a86..10301b9a 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -402,10 +402,10 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName string, var disksToPartsMap map[string][]metadata.Part if doBackupData { if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { - log.Debugf("calculate parts list `%s`.`%s` from embedded backup disk `%s`") + log.Debugf("calculate parts list `%s`.`%s` from embedded backup disk `%s`", table.Database, table.Name, b.cfg.ClickHouse.EmbeddedBackupDisk) disksToPartsMap, err = b.getPartsFromLocalEmbeddedBackupDisk(backupPath, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) } else { - log.Debugf("calculate parts list `%s`.`%s` from embedded backup remote destination") + log.Debugf("calculate parts list `%s`.`%s` from embedded backup remote destination", table.Database, table.Name) disksToPartsMap, err = b.getPartsFromRemoteEmbeddedBackup(ctx, backupName, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}], log) } } diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index c907906d..9bc41944 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -127,25 +127,26 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis return err } hasObjectDisks := b.hasObjectDisksLocal(backupList, backupName, disks) - if hasObjectDisks { - bd, err := storage.NewBackupDestination(ctx, b.cfg, b.ch, false, backupName) - if err != nil { - return err - } - err = bd.Connect(ctx) - if err != nil { - return fmt.Errorf("can't connect to remote storage: %v", err) - } - defer func() { - if err := bd.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) - } - }() - b.dst = bd - } for _, backup := range backupList { if backup.BackupName == backupName { + b.isEmbedded = strings.Contains(backup.Tags, "embedded") + if hasObjectDisks || (b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk == "") { + bd, err := storage.NewBackupDestination(ctx, b.cfg, b.ch, false, backupName) + if err != nil { + return err + } + err = bd.Connect(ctx) + if err != nil { + return fmt.Errorf("can't connect to remote storage: %v", err) + } + defer func() { + if err := bd.Close(ctx); err != nil { + b.log.Warnf("can't close BackupDestination error: %v", err) + } + }() + b.dst = bd + } err = b.cleanEmbeddedAndObjectDiskLocalIfSameRemoteNotPresent(ctx, backupName, disks, backup, hasObjectDisks, log) if err != nil { return err @@ -177,13 +178,13 @@ func (b *Backuper) cleanEmbeddedAndObjectDiskLocalIfSameRemoteNotPresent(ctx con if err != nil { return err } - if !skip && (hasObjectDisks || (strings.Contains(backup.Tags, "embedded") && b.cfg.ClickHouse.EmbeddedBackupDisk == "")) { + if !skip && (hasObjectDisks || (b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk == "")) { if err = b.cleanBackupObjectDisks(ctx, backupName); err != nil { log.Warnf("b.cleanBackupObjectDisks return error: %v", err) return err } } - if !skip && strings.Contains(backup.Tags, "embedded") { + if !skip && (b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "") { if err = b.cleanLocalEmbedded(ctx, backup, disks); err != nil { log.Warnf("b.cleanLocalEmbedded return error: %v", err) return err @@ -194,7 +195,7 @@ func (b *Backuper) cleanEmbeddedAndObjectDiskLocalIfSameRemoteNotPresent(ctx con func (b *Backuper) hasObjectDisksLocal(backupList []LocalBackup, backupName string, disks []clickhouse.Disk) bool { for _, backup := range backupList { - if backup.BackupName == backupName && !strings.Contains(backup.Tags, "embedded") { + if backup.BackupName == backupName && !b.isEmbedded { for _, disk := range disks { if !disk.IsBackup && (b.isDiskTypeObject(disk.Type) || b.isDiskTypeEncryptedObject(disk, disks)) { backupExists, err := os.ReadDir(path.Join(disk.Path, "backup", backup.BackupName)) @@ -320,18 +321,24 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er } func (b *Backuper) cleanEmbeddedAndObjectDiskRemoteIfSameLocalNotPresent(ctx context.Context, backup storage.Backup, log *apexLog.Entry) error { - if skip, err := b.skipIfSameLocalBackupPresent(ctx, backup.BackupName, backup.Tags); err != nil { + var skip bool + var err error + if skip, err = b.skipIfSameLocalBackupPresent(ctx, backup.BackupName, backup.Tags); err != nil { return err - } else if !skip { - if strings.Contains(backup.Tags, "embedded") { + } + if !skip { + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { if err = b.cleanRemoteEmbedded(ctx, backup); err != nil { log.Warnf("b.cleanRemoteEmbedded return error: %v", err) return err } - } else if b.hasObjectDisksRemote(backup) { + return nil + } + if b.hasObjectDisksRemote(backup) || (b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk == "") { if err = b.cleanBackupObjectDisks(ctx, backup.BackupName); err != nil { log.Warnf("b.cleanBackupObjectDisks return error: %v", err) } + return nil } } return nil @@ -373,15 +380,9 @@ func (b *Backuper) cleanRemoteEmbedded(ctx context.Context, backup storage.Backu // cleanBackupObjectDisks - recursive delete / func (b *Backuper) cleanBackupObjectDisks(ctx context.Context, backupName string) error { - var objectDiskPath string - if b.cfg.General.RemoteStorage == "s3" { - objectDiskPath = b.cfg.S3.ObjectDiskPath - } else if b.cfg.General.RemoteStorage == "azblob" { - objectDiskPath = b.cfg.AzureBlob.ObjectDiskPath - } else if b.cfg.General.RemoteStorage == "gcs" { - objectDiskPath = b.cfg.GCS.ObjectDiskPath - } else { - return fmt.Errorf("cleanBackupObjectDisks: %s, contains object disks but \"unsupported remote_storage: %s", backupName, b.cfg.General.RemoteStorage) + objectDiskPath, err := b.getObjectDiskPath() + if err != nil { + return err } //walk absolute path, delete relative return b.dst.WalkAbsolute(ctx, path.Join(objectDiskPath, backupName), true, func(ctx context.Context, f storage.RemoteFile) error { diff --git a/pkg/backup/download.go b/pkg/backup/download.go index 551d4e18..d202ad7e 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -117,7 +117,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if b.cfg.General.RemoteStorage == "custom" { return custom.Download(ctx, b.cfg, backupName, tablePattern, partitions, schemaOnly) } - if err := b.init(ctx, disks, ""); err != nil { + if err := b.initDisksPathdsAndBackupDestination(ctx, disks, ""); err != nil { return err } defer func() { @@ -243,16 +243,14 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } } var rbacSize, configSize uint64 - if !b.isEmbedded { - rbacSize, err = b.downloadRBACData(ctx, remoteBackup) - if err != nil { - return fmt.Errorf("download RBAC error: %v", err) - } + rbacSize, err = b.downloadRBACData(ctx, remoteBackup) + if err != nil { + return fmt.Errorf("download RBAC error: %v", err) + } - configSize, err = b.downloadConfigData(ctx, remoteBackup) - if err != nil { - return fmt.Errorf("download CONFIGS error: %v", err) - } + configSize, err = b.downloadConfigData(ctx, remoteBackup) + if err != nil { + return fmt.Errorf("download CONFIGS error: %v", err) } backupMetadata := remoteBackup.BackupMetadata @@ -260,7 +258,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ backupMetadata.DataSize = dataSize backupMetadata.MetadataSize = metadataSize - if b.isEmbedded { + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { localClickHouseBackupFile := path.Join(b.EmbeddedBackupDataPath, backupName, ".backup") remoteClickHouseBackupFile := path.Join(backupName, ".backup") if err = b.downloadSingleBackupFile(ctx, remoteClickHouseBackupFile, localClickHouseBackupFile, disks); err != nil { @@ -275,7 +273,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ backupMetadata.RBACSize = rbacSize backupMetafileLocalPath := path.Join(b.DefaultDataPath, "backup", backupName, "metadata.json") - if b.isEmbedded { + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { backupMetafileLocalPath = path.Join(b.EmbeddedBackupDataPath, backupName, "metadata.json") } if err := backupMetadata.Save(backupMetafileLocalPath); err != nil { @@ -415,7 +413,7 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, remoteMedataPrefix := path.Join(backupName, "metadata", common.TablePathEncode(tableTitle.Database), common.TablePathEncode(tableTitle.Table)) metadataFiles[fmt.Sprintf("%s.json", remoteMedataPrefix)] = path.Join(b.DefaultDataPath, "backup", backupName, "metadata", common.TablePathEncode(tableTitle.Database), fmt.Sprintf("%s.json", common.TablePathEncode(tableTitle.Table))) partitionsIdMap := make(map[metadata.TableTitle]common.EmptyMap) - if b.isEmbedded { + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { metadataFiles[fmt.Sprintf("%s.sql", remoteMedataPrefix)] = path.Join(b.EmbeddedBackupDataPath, backupName, "metadata", common.TablePathEncode(tableTitle.Database), fmt.Sprintf("%s.sql", common.TablePathEncode(tableTitle.Table))) metadataFiles[fmt.Sprintf("%s.json", remoteMedataPrefix)] = path.Join(b.EmbeddedBackupDataPath, backupName, "metadata", common.TablePathEncode(tableTitle.Database), fmt.Sprintf("%s.json", common.TablePathEncode(tableTitle.Table))) } diff --git a/pkg/backup/list.go b/pkg/backup/list.go index c6cb11cc..c83aea6c 100644 --- a/pkg/backup/list.go +++ b/pkg/backup/list.go @@ -111,6 +111,12 @@ func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBacku size = utils.FormatBytes(backup.CompressedSize + backup.MetadataSize) } description := backup.DataFormat + if backup.Tags != "" { + if description != "" { + description += ", " + } + description += backup.Tags + } creationDate := backup.CreationDate.Format("02/01/2006 15:04:05") if backup.Legacy { size = "???" diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index b42f4ce8..f8106ba5 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -7,8 +7,10 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/keeper" "github.com/Altinity/clickhouse-backup/v2/pkg/status" + "github.com/Altinity/clickhouse-backup/v2/pkg/storage" "github.com/Altinity/clickhouse-backup/v2/pkg/storage/object_disk" "golang.org/x/sync/errgroup" + "io" "io/fs" "net/url" "os" @@ -84,14 +86,13 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par if err == nil && b.EmbeddedBackupDataPath != "" { backupMetafileLocalPaths = append(backupMetafileLocalPaths, path.Join(b.EmbeddedBackupDataPath, backupName, "metadata.json")) } else if b.cfg.ClickHouse.UseEmbeddedBackupRestore && b.cfg.ClickHouse.EmbeddedBackupDisk == "" { - log.Warnf("%v", err) + b.EmbeddedBackupDataPath = b.DefaultDataPath } else if err != nil { return err } for _, metadataPath := range backupMetafileLocalPaths { backupMetadataBody, err = os.ReadFile(metadataPath) - if err == nil && b.EmbeddedBackupDataPath != "" { - b.isEmbedded = strings.HasPrefix(metadataPath, b.EmbeddedBackupDataPath) + if err == nil { break } } @@ -100,6 +101,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par if err := json.Unmarshal(backupMetadataBody, &backupMetadata); err != nil { return err } + b.isEmbedded = strings.Contains(backupMetadata.Tags, "embedded") if schemaOnly || doRestoreData { for _, database := range backupMetadata.Databases { @@ -138,14 +140,14 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } } needRestart := false - if (rbacOnly || restoreRBAC) && !b.isEmbedded { + if rbacOnly || restoreRBAC { if err := b.restoreRBAC(ctx, backupName, disks); err != nil { return err } log.Infof("RBAC successfully restored") needRestart = true } - if (configsOnly || restoreConfigs) && !b.isEmbedded { + if configsOnly || restoreConfigs { if err := b.restoreConfigs(backupName, disks); err != nil { return err } @@ -162,6 +164,19 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } } + if b.cfg.ClickHouse.UseEmbeddedBackupRestore && b.cfg.ClickHouse.EmbeddedBackupDisk == "" { + if b.dst, err = storage.NewBackupDestination(ctx, b.cfg, b.ch, false, backupName); err != nil { + return err + } + if err = b.dst.Connect(ctx); err != nil { + return fmt.Errorf("restoreBackupEmbedded: can't connect to %s: %v", b.dst.Kind(), err) + } + defer func() { + if err := b.dst.Close(ctx); err != nil { + b.log.Warnf("can't close BackupDestination error: %v", err) + } + }() + } if schemaOnly || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { if err := b.RestoreSchema(ctx, backupName, tablePattern, dropTable, ignoreDependencies); err != nil { return err @@ -439,7 +454,7 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern string, dropTable, ignoreDependencies bool) error { log := apexLog.WithFields(apexLog.Fields{ "backup": backupName, - "operation": "restore", + "operation": "restore_schema", }) version, err := b.ch.GetVersion(ctx) @@ -447,7 +462,7 @@ func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern s return err } metadataPath := path.Join(b.DefaultDataPath, "backup", backupName, "metadata") - if b.isEmbedded { + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { metadataPath = path.Join(b.EmbeddedBackupDataPath, backupName, "metadata") } info, err := os.Stat(metadataPath) @@ -486,7 +501,7 @@ func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern s } var restoreErr error if b.isEmbedded { - restoreErr = b.restoreSchemaEmbedded(ctx, backupName, tablesForRestore) + restoreErr = b.restoreSchemaEmbedded(ctx, backupName, tablesForRestore, log) } else { restoreErr = b.restoreSchemaRegular(tablesForRestore, version, log) } @@ -500,9 +515,67 @@ var UUIDWithMergeTreeRE = regexp.MustCompile(`^(.+)(UUID)(\s+)'([^']+)'(.+)({uui var emptyReplicatedMergeTreeRE = regexp.MustCompile(`(?m)Replicated(MergeTree|ReplacingMergeTree|SummingMergeTree|AggregatingMergeTree|CollapsingMergeTree|VersionedCollapsingMergeTree|GraphiteMergeTree)\s*\(([^']*)\)(.*)`) -func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, tablesForRestore ListOfTables) error { +func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, tablesForRestore ListOfTables, log *apexLog.Entry) error { + var err error + chVersion, err := b.ch.GetVersion(ctx) + if err != nil { + return err + } + if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + err = b.fixEmbeddedMetadataLocal(ctx, backupName, chVersion) + } else { + err = b.fixEmbeddedMetadataRemote(ctx, backupName, chVersion) + } + if err != nil { + return err + } + /*}*/ + return b.restoreEmbedded(ctx, backupName, true, tablesForRestore, nil) +} + +func (b *Backuper) fixEmbeddedMetadataRemote(ctx context.Context, backupName string, chVersion int) error { + objectDiskPath, err := b.getObjectDiskPath() + if err != nil { + return err + } + if walkErr := b.dst.WalkAbsolute(ctx, path.Join(objectDiskPath, backupName, "metadata"), true, func(ctx context.Context, fInfo storage.RemoteFile) error { + if err != nil { + return err + } + if !strings.HasSuffix(fInfo.Name(), ".sql") { + return nil + } + var fReader io.ReadCloser + remoteFilePath := path.Join(objectDiskPath, backupName, "metadata", fInfo.Name()) + fReader, err = b.dst.GetFileReaderAbsolute(ctx, path.Join(objectDiskPath, backupName, "metadata", fInfo.Name())) + if err != nil { + return err + } + var sqlBytes []byte + sqlBytes, err = io.ReadAll(fReader) + if err != nil { + return err + } + sqlQuery, sqlMetadataChanged, fixSqlErr := b.fixEmbeddedMetadataSQLQuery(ctx, sqlBytes, remoteFilePath, chVersion) + if fixSqlErr != nil { + return fixSqlErr + } + if sqlMetadataChanged { + err = b.dst.PutFileAbsolute(ctx, remoteFilePath, io.NopCloser(strings.NewReader(sqlQuery))) + if err != nil { + return err + } + } + return nil + }); walkErr != nil { + return walkErr + } + return nil +} + +func (b *Backuper) fixEmbeddedMetadataLocal(ctx context.Context, backupName string, chVersion int) error { metadataPath := path.Join(b.EmbeddedBackupDataPath, backupName, "metadata") - if err := filepath.Walk(metadataPath, func(filePath string, info fs.FileInfo, err error) error { + if walkErr := filepath.Walk(metadataPath, func(filePath string, info fs.FileInfo, err error) error { if err != nil { return err } @@ -517,23 +590,11 @@ func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, if err != nil { return err } - sqlQuery := string(sqlBytes) - if strings.Contains(sqlQuery, "{uuid}") { - if UUIDWithMergeTreeRE.Match(sqlBytes) { - sqlQuery = UUIDWithMergeTreeRE.ReplaceAllString(sqlQuery, "$1$2$3'$4'$5$4$7") - } else { - apexLog.Warnf("%s contains `{uuid}` macro, but not contains UUID in table definition, will replace to `{database}/{table}` see https://github.com/ClickHouse/ClickHouse/issues/42709 for details", filePath) - filePathParts := strings.Split(filePath, "/") - database, err := url.QueryUnescape(filePathParts[len(filePathParts)-3]) - if err != nil { - return err - } - table, err := url.QueryUnescape(filePathParts[len(filePathParts)-2]) - if err != nil { - return err - } - sqlQuery = strings.Replace(sqlQuery, "{uuid}", database+"/"+table, 1) - } + sqlQuery, sqlMetadataChanged, fixSqlErr := b.fixEmbeddedMetadataSQLQuery(ctx, sqlBytes, filePath, chVersion) + if fixSqlErr != nil { + return fixSqlErr + } + if sqlMetadataChanged { if err = object_disk.WriteFileContent(ctx, b.ch, b.cfg, b.cfg.ClickHouse.EmbeddedBackupDisk, filePath, []byte(sqlQuery)); err != nil { return err } @@ -543,28 +604,58 @@ func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, return err } } - if emptyReplicatedMergeTreeRE.MatchString(sqlQuery) { - replicaXMLSettings := map[string]string{"default_replica_path": "//default_replica_path", "default_replica_name": "//default_replica_name"} - settings, err := b.ch.GetPreprocessedXMLSettings(ctx, replicaXMLSettings, "config.xml") + return nil + }); walkErr != nil { + return walkErr + } + return nil +} + +func (b *Backuper) fixEmbeddedMetadataSQLQuery(ctx context.Context, sqlBytes []byte, filePath string, version int) (string, bool, error) { + sqlQuery := string(sqlBytes) + sqlMetadataChanged := false + if strings.Contains(sqlQuery, "{uuid}") { + if UUIDWithMergeTreeRE.Match(sqlBytes) && version < 23009000 { + sqlQuery = UUIDWithMergeTreeRE.ReplaceAllString(sqlQuery, "$1$2$3'$4'$5$4$7") + } else { + apexLog.Warnf("%s contains `{uuid}` macro, will replace to `{database}/{table}` see https://github.com/ClickHouse/ClickHouse/issues/42709 for details", filePath) + filePathParts := strings.Split(filePath, "/") + database, err := url.QueryUnescape(filePathParts[len(filePathParts)-3]) if err != nil { - return err + return "", false, err } - if len(settings) != 2 { - apexLog.Fatalf("can't get %#v from preprocessed_configs/config.xml", replicaXMLSettings) + table, err := url.QueryUnescape(filePathParts[len(filePathParts)-2]) + if err != nil { + return "", false, err } - apexLog.Warnf("%s contains `ReplicatedMergeTree()` without parameters, will replace to '%s` and `%s` see https://github.com/ClickHouse/ClickHouse/issues/42709 for details", filePath, settings["default_replica_path"], settings["default_replica_name"]) - matches := emptyReplicatedMergeTreeRE.FindStringSubmatch(sqlQuery) - substitution := fmt.Sprintf("$1$2('%s','%s')$4", settings["default_replica_path"], settings["default_replica_name"]) - if matches[2] != "" { - substitution = fmt.Sprintf("$1$2('%s','%s',$3)$4", settings["default_replica_path"], settings["default_replica_name"]) + lastIndex := strings.LastIndex(sqlQuery, "{uuid}") + sqlQuery = sqlQuery[:lastIndex] + strings.Replace(sqlQuery[lastIndex:], "{uuid}", database+"/"+table, 1) + // create materialized view corner case + if strings.Contains(sqlQuery, "{uuid}") { + sqlQuery = UUIDWithMergeTreeRE.ReplaceAllString(sqlQuery, "$1$2$3'$4'$5$4$7") } - sqlQuery = emptyReplicatedMergeTreeRE.ReplaceAllString(sqlQuery, substitution) } - return nil - }); err != nil { - return err + sqlMetadataChanged = true } - return b.restoreEmbedded(ctx, backupName, true, tablesForRestore, nil) + if emptyReplicatedMergeTreeRE.MatchString(sqlQuery) { + replicaXMLSettings := map[string]string{"default_replica_path": "//default_replica_path", "default_replica_name": "//default_replica_name"} + settings, err := b.ch.GetPreprocessedXMLSettings(ctx, replicaXMLSettings, "config.xml") + if err != nil { + return "", false, err + } + if len(settings) != 2 { + apexLog.Fatalf("can't get %#v from preprocessed_configs/config.xml", replicaXMLSettings) + } + apexLog.Warnf("%s contains `ReplicatedMergeTree()` without parameters, will replace to '%s` and `%s` see https://github.com/ClickHouse/ClickHouse/issues/42709 for details", filePath, settings["default_replica_path"], settings["default_replica_name"]) + matches := emptyReplicatedMergeTreeRE.FindStringSubmatch(sqlQuery) + substitution := fmt.Sprintf("$1$2('%s','%s')$4", settings["default_replica_path"], settings["default_replica_name"]) + if matches[2] != "" { + substitution = fmt.Sprintf("$1$2('%s','%s',$3)$4", settings["default_replica_path"], settings["default_replica_name"]) + } + sqlQuery = emptyReplicatedMergeTreeRE.ReplaceAllString(sqlQuery, substitution) + sqlMetadataChanged = true + } + return sqlQuery, sqlMetadataChanged, nil } func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version int, log *apexLog.Entry) error { @@ -690,7 +781,7 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, tablePatt startRestore := time.Now() log := apexLog.WithFields(apexLog.Fields{ "backup": backupName, - "operation": "restore", + "operation": "restore_data", }) if b.ch.IsClickhouseShadow(path.Join(b.DefaultDataPath, "backup", backupName, "shadow")) { return fmt.Errorf("backups created in v0.0.1 is not supported now") @@ -714,7 +805,7 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, tablePatt var tablesForRestore ListOfTables var partitionsNameList map[metadata.TableTitle][]string metadataPath := path.Join(b.DefaultDataPath, "backup", backupName, "metadata") - if b.isEmbedded { + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { metadataPath = path.Join(b.EmbeddedBackupDataPath, backupName, "metadata") } if backup.Legacy { diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 414d70a5..d9dc9f94 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -64,7 +64,7 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str if _, disks, err = b.getLocalBackup(ctx, backupName, nil); err != nil { return fmt.Errorf("can't find local backup: %v", err) } - if err := b.init(ctx, disks, backupName); err != nil { + if err := b.initDisksPathdsAndBackupDestination(ctx, disks, backupName); err != nil { return err } defer func() { @@ -215,7 +215,7 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str return fmt.Errorf("can't upload %s: %v", remoteBackupMetaFile, err) } } - if b.isEmbedded { + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { localClickHouseBackupFile := path.Join(b.EmbeddedBackupDataPath, backupName, ".backup") remoteClickHouseBackupFile := path.Join(backupName, ".backup") if err = b.uploadSingleBackupFile(ctx, localClickHouseBackupFile, remoteClickHouseBackupFile); err != nil { diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index e0050bfa..83d20c7a 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -181,12 +181,9 @@ func (ch *ClickHouse) GetDisks(ctx context.Context, enrich bool) ([]Disk, error) } func (ch *ClickHouse) GetEmbeddedBackupPath(disks []Disk) (string, error) { - if !ch.Config.UseEmbeddedBackupRestore { + if !ch.Config.UseEmbeddedBackupRestore || ch.Config.EmbeddedBackupDisk == "" { return "", nil } - if ch.Config.EmbeddedBackupDisk == "" { - return "", fmt.Errorf("please setup `clickhouse->embedded_backup_disk` in config or CLICKHOUSE_EMBEDDED_BACKUP_DISK environment variable") - } for _, d := range disks { if d.Name == ch.Config.EmbeddedBackupDisk { return d.Path, nil diff --git a/pkg/storage/azblob.go b/pkg/storage/azblob.go index 8c118b84..300bfcc1 100644 --- a/pkg/storage/azblob.go +++ b/pkg/storage/azblob.go @@ -162,8 +162,12 @@ func (a *AzureBlob) Close(ctx context.Context) error { } func (a *AzureBlob) GetFileReader(ctx context.Context, key string) (io.ReadCloser, error) { - a.logf("AZBLOB->GetFileReader %s", key) - blob := a.Container.NewBlockBlobURL(path.Join(a.Config.Path, key)) + return a.GetFileReaderAbsolute(ctx, path.Join(a.Config.Path, key)) +} + +func (a *AzureBlob) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadCloser, error) { + a.logf("AZBLOB->GetFileReaderAbsolute %s", key) + blob := a.Container.NewBlockBlobURL(key) r, err := blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, a.CPK) if err != nil { return nil, err @@ -176,8 +180,12 @@ func (a *AzureBlob) GetFileReaderWithLocalPath(ctx context.Context, key, _ strin } func (a *AzureBlob) PutFile(ctx context.Context, key string, r io.ReadCloser) error { - a.logf("AZBLOB->PutFile %s", key) - blob := a.Container.NewBlockBlobURL(path.Join(a.Config.Path, key)) + return a.PutFileAbsolute(ctx, path.Join(a.Config.Path, key), r) +} + +func (a *AzureBlob) PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser) error { + a.logf("AZBLOB->PutFileAbsolute %s", key) + blob := a.Container.NewBlockBlobURL(key) bufferSize := a.Config.BufferSize // Configure the size of the rotating buffers that are used when uploading maxBuffers := a.Config.MaxBuffers // Configure the number of rotating buffers that are used when uploading _, err := x.UploadStreamToBlockBlob(ctx, r, blob, azblob.UploadStreamToBlockBlobOptions{BufferSize: bufferSize, MaxBuffers: maxBuffers}, a.CPK) diff --git a/pkg/storage/cos.go b/pkg/storage/cos.go index 8f78dcdd..d0a953c7 100644 --- a/pkg/storage/cos.go +++ b/pkg/storage/cos.go @@ -136,7 +136,11 @@ func (c *COS) WalkAbsolute(ctx context.Context, prefix string, recursive bool, p } func (c *COS) GetFileReader(ctx context.Context, key string) (io.ReadCloser, error) { - resp, err := c.client.Object.Get(ctx, path.Join(c.Config.Path, key), nil) + return c.GetFileReaderAbsolute(ctx, path.Join(c.Config.Path, key)) +} + +func (c *COS) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadCloser, error) { + resp, err := c.client.Object.Get(ctx, key, nil) if err != nil { return nil, err } @@ -148,7 +152,11 @@ func (c *COS) GetFileReaderWithLocalPath(ctx context.Context, key, _ string) (io } func (c *COS) PutFile(ctx context.Context, key string, r io.ReadCloser) error { - _, err := c.client.Object.Put(ctx, path.Join(c.Config.Path, key), r, nil) + return c.PutFileAbsolute(ctx, path.Join(c.Config.Path, key), r) +} + +func (c *COS) PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser) error { + _, err := c.client.Object.Put(ctx, key, r, nil) return err } diff --git a/pkg/storage/ftp.go b/pkg/storage/ftp.go index 2d8efa5e..0f720fd1 100644 --- a/pkg/storage/ftp.go +++ b/pkg/storage/ftp.go @@ -127,6 +127,7 @@ func (f *FTP) Walk(ctx context.Context, ftpPath string, recursive bool, process prefix := path.Join(f.Config.Path, ftpPath) return f.WalkAbsolute(ctx, prefix, recursive, process) } + func (f *FTP) WalkAbsolute(ctx context.Context, prefix string, recursive bool, process func(context.Context, RemoteFile) error) error { client, err := f.getConnectionFromPool(ctx, "Walk") if err != nil { @@ -178,12 +179,15 @@ func (f *FTP) WalkAbsolute(ctx context.Context, prefix string, recursive bool, p } func (f *FTP) GetFileReader(ctx context.Context, key string) (io.ReadCloser, error) { - f.Log.Debugf("GetFileReader key=%s", key) + return f.GetFileReaderAbsolute(ctx, path.Join(f.Config.Path, key)) +} +func (f *FTP) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadCloser, error) { + f.Log.Debugf("GetFileReaderAbsolute key=%s", key) client, err := f.getConnectionFromPool(ctx, "GetFileReader") if err != nil { return nil, err } - resp, err := client.Retr(path.Join(f.Config.Path, key)) + resp, err := client.Retr(key) return &FTPFileReader{ Response: resp, pool: f, @@ -197,18 +201,21 @@ func (f *FTP) GetFileReaderWithLocalPath(ctx context.Context, key, _ string) (io } func (f *FTP) PutFile(ctx context.Context, key string, r io.ReadCloser) error { - f.Log.Debugf("PutFile key=%s", key) + return f.PutFileAbsolute(ctx, path.Join(f.Config.Path, key), r) +} + +func (f *FTP) PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser) error { + f.Log.Debugf("PutFileAbsolute key=%s", key) client, err := f.getConnectionFromPool(ctx, "PutFile") defer f.returnConnectionToPool(ctx, "PutFile", client) if err != nil { return err } - k := path.Join(f.Config.Path, key) - err = f.MkdirAll(path.Dir(k), client) + err = f.MkdirAll(path.Dir(key), client) if err != nil { return err } - return client.Stor(k, r) + return client.Stor(key, r) } func (f *FTP) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, dstKey string) (int64, error) { diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index 10d154d8..54f68343 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -226,13 +226,17 @@ func (gcs *GCS) WalkAbsolute(ctx context.Context, rootPath string, recursive boo } func (gcs *GCS) GetFileReader(ctx context.Context, key string) (io.ReadCloser, error) { + return gcs.GetFileReaderAbsolute(ctx, path.Join(gcs.Config.Path, key)) +} + +func (gcs *GCS) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadCloser, error) { pClientObj, err := gcs.clientPool.BorrowObject(ctx) if err != nil { apexLog.Errorf("gcs.GetFileReader: gcs.clientPool.BorrowObject error: %+v", err) return nil, err } pClient := pClientObj.(*clientObject).Client - obj := pClient.Bucket(gcs.Config.Bucket).Object(path.Join(gcs.Config.Path, key)) + obj := pClient.Bucket(gcs.Config.Bucket).Object(key) reader, err := obj.NewReader(ctx) if err != nil { if pErr := gcs.clientPool.InvalidateObject(ctx, pClientObj); pErr != nil { @@ -251,13 +255,16 @@ func (gcs *GCS) GetFileReaderWithLocalPath(ctx context.Context, key, _ string) ( } func (gcs *GCS) PutFile(ctx context.Context, key string, r io.ReadCloser) error { + return gcs.PutFileAbsolute(ctx, path.Join(gcs.Config.Path, key), r) +} + +func (gcs *GCS) PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser) error { pClientObj, err := gcs.clientPool.BorrowObject(ctx) if err != nil { apexLog.Errorf("gcs.PutFile: gcs.clientPool.BorrowObject error: %+v", err) return err } pClient := pClientObj.(*clientObject).Client - key = path.Join(gcs.Config.Path, key) obj := pClient.Bucket(gcs.Config.Bucket).Object(key) writer := obj.NewWriter(ctx) writer.StorageClass = gcs.Config.StorageClass diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index eebba9d9..62effbf8 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -195,9 +195,13 @@ func (s *S3) Close(ctx context.Context) error { } func (s *S3) GetFileReader(ctx context.Context, key string) (io.ReadCloser, error) { + return s.GetFileReaderAbsolute(ctx, path.Join(s.Config.Path, key)) +} + +func (s *S3) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadCloser, error) { params := &s3.GetObjectInput{ Bucket: aws.String(s.Config.Bucket), - Key: aws.String(path.Join(s.Config.Path, key)), + Key: aws.String(key), } s.enrichGetObjectParams(params) resp, err := s.client.GetObject(ctx, params) @@ -266,9 +270,13 @@ func (s *S3) GetFileReaderWithLocalPath(ctx context.Context, key, localPath stri } func (s *S3) PutFile(ctx context.Context, key string, r io.ReadCloser) error { + return s.PutFileAbsolute(ctx, path.Join(s.Config.Path, key), r) +} + +func (s *S3) PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser) error { params := s3.PutObjectInput{ Bucket: aws.String(s.Config.Bucket), - Key: aws.String(path.Join(s.Config.Path, key)), + Key: aws.String(key), Body: r, StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)), } @@ -395,6 +403,7 @@ func (s *S3) Walk(ctx context.Context, s3Path string, recursive bool, process fu prefix := path.Join(s.Config.Path, s3Path) return s.WalkAbsolute(ctx, prefix, recursive, process) } + func (s *S3) WalkAbsolute(ctx context.Context, prefix string, recursive bool, process func(ctx context.Context, r RemoteFile) error) error { g, ctx := errgroup.WithContext(ctx) s3Files := make(chan *s3File) diff --git a/pkg/storage/sftp.go b/pkg/storage/sftp.go index 5f179953..7bad272e 100644 --- a/pkg/storage/sftp.go +++ b/pkg/storage/sftp.go @@ -169,6 +169,7 @@ func (sftp *SFTP) Walk(ctx context.Context, remotePath string, recursive bool, p prefix := path.Join(sftp.Config.Path, remotePath) return sftp.WalkAbsolute(ctx, prefix, recursive, process) } + func (sftp *SFTP) WalkAbsolute(ctx context.Context, prefix string, recursive bool, process func(context.Context, RemoteFile) error) error { sftp.Debug("[SFTP_DEBUG] Walk %s, recursive=%v", prefix, recursive) @@ -213,8 +214,11 @@ func (sftp *SFTP) WalkAbsolute(ctx context.Context, prefix string, recursive boo } func (sftp *SFTP) GetFileReader(ctx context.Context, key string) (io.ReadCloser, error) { - filePath := path.Join(sftp.Config.Path, key) - return sftp.sftpClient.OpenFile(filePath, syscall.O_RDWR) + return sftp.GetFileReaderAbsolute(ctx, path.Join(sftp.Config.Path, key)) +} + +func (sftp *SFTP) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadCloser, error) { + return sftp.sftpClient.OpenFile(key, syscall.O_RDWR) } func (sftp *SFTP) GetFileReaderWithLocalPath(ctx context.Context, key, _ string) (io.ReadCloser, error) { @@ -222,17 +226,20 @@ func (sftp *SFTP) GetFileReaderWithLocalPath(ctx context.Context, key, _ string) } func (sftp *SFTP) PutFile(ctx context.Context, key string, localFile io.ReadCloser) error { - filePath := path.Join(sftp.Config.Path, key) - if err := sftp.sftpClient.MkdirAll(path.Dir(filePath)); err != nil { - log.Warnf("sftp.sftpClient.MkdirAll(%s) err=%v", path.Dir(filePath), err) + return sftp.PutFileAbsolute(ctx, path.Join(sftp.Config.Path, key), localFile) +} + +func (sftp *SFTP) PutFileAbsolute(ctx context.Context, key string, localFile io.ReadCloser) error { + if err := sftp.sftpClient.MkdirAll(path.Dir(key)); err != nil { + log.Warnf("sftp.sftpClient.MkdirAll(%s) err=%v", path.Dir(key), err) } - remoteFile, err := sftp.sftpClient.Create(filePath) + remoteFile, err := sftp.sftpClient.Create(key) if err != nil { return err } defer func() { if err := remoteFile.Close(); err != nil { - log.Warnf("can't close %s err=%v", filePath, err) + log.Warnf("can't close %s err=%v", key, err) } }() if _, err = remoteFile.ReadFrom(localFile); err != nil { diff --git a/pkg/storage/structs.go b/pkg/storage/structs.go index fa7782ca..f90cf0eb 100644 --- a/pkg/storage/structs.go +++ b/pkg/storage/structs.go @@ -30,7 +30,9 @@ type RemoteStorage interface { Walk(ctx context.Context, prefix string, recursive bool, fn func(context.Context, RemoteFile) error) error WalkAbsolute(ctx context.Context, absolutePrefix string, recursive bool, fn func(context.Context, RemoteFile) error) error GetFileReader(ctx context.Context, key string) (io.ReadCloser, error) + GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadCloser, error) GetFileReaderWithLocalPath(ctx context.Context, key, localPath string) (io.ReadCloser, error) PutFile(ctx context.Context, key string, r io.ReadCloser) error + PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser) error CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, dstKey string) (int64, error) } diff --git a/test/integration/config-database-mapping.yml b/test/integration/config-database-mapping.yml index d73b2ee9..1ae1eb4c 100644 --- a/test/integration/config-database-mapping.yml +++ b/test/integration/config-database-mapping.yml @@ -17,8 +17,8 @@ clickhouse: timeout: 1s restart_command: bash -c 'echo "FAKE RESTART"' s3: - access_key: access-key - secret_key: it-is-my-super-secret-key + access_key: access_key + secret_key: it_is_my_super_secret_key bucket: clickhouse endpoint: http://minio:9000 acl: private diff --git a/test/integration/config-s3-embedded-url.yml b/test/integration/config-s3-embedded-url.yml index 3c7f7dba..2af8c54a 100644 --- a/test/integration/config-s3-embedded-url.yml +++ b/test/integration/config-s3-embedded-url.yml @@ -22,8 +22,8 @@ clickhouse: use_embedded_backup_restore: true embedded_backup_disk: "" s3: - access_key: access-key - secret_key: it-is-my-super-secret-key + access_key: access_key + secret_key: it_is_my_super_secret_key bucket: clickhouse endpoint: http://minio:9000 acl: private diff --git a/test/integration/config-s3-embedded.yml b/test/integration/config-s3-embedded.yml index 3ff7061f..5d59d8d7 100644 --- a/test/integration/config-s3-embedded.yml +++ b/test/integration/config-s3-embedded.yml @@ -22,8 +22,8 @@ clickhouse: use_embedded_backup_restore: true embedded_backup_disk: backups_s3 s3: - access_key: access-key - secret_key: it-is-my-super-secret-key + access_key: access_key + secret_key: it_is_my_super_secret_key bucket: clickhouse endpoint: http://minio:9000 acl: private diff --git a/test/integration/config-s3-plain-embedded.yml b/test/integration/config-s3-plain-embedded.yml index 3060aea1..ea986797 100644 --- a/test/integration/config-s3-plain-embedded.yml +++ b/test/integration/config-s3-plain-embedded.yml @@ -22,8 +22,8 @@ clickhouse: use_embedded_backup_restore: true embedded_backup_disk: backups_s3_plain s3: - access_key: access-key - secret_key: it-is-my-super-secret-key + access_key: access_key + secret_key: it_is_my_super_secret_key bucket: clickhouse endpoint: http://minio:9000 acl: private diff --git a/test/integration/config-s3.yml b/test/integration/config-s3.yml index ab49bdf0..c4773eac 100644 --- a/test/integration/config-s3.yml +++ b/test/integration/config-s3.yml @@ -25,8 +25,8 @@ clickhouse: # restart_command: bash -c 'echo "FAKE RESTART"' backup_mutations: true s3: - access_key: access-key - secret_key: it-is-my-super-secret-key + access_key: access_key + secret_key: it_is_my_super_secret_key bucket: clickhouse endpoint: http://minio:9000 acl: private diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 95470526..c28ccd5d 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -28,11 +28,11 @@ services: image: docker.io/bitnami/minio:${MINIO_VERSION:-latest} container_name: minio environment: - MINIO_ACCESS_KEY: access-key - MINIO_SECRET_KEY: it-is-my-super-secret-key + MINIO_ACCESS_KEY: access_key + MINIO_SECRET_KEY: it_is_my_super_secret_key MINIO_DEFAULT_BUCKETS: 'clickhouse' - MINIO_ROOT_USER: access-key - MINIO_ROOT_PASSWORD: it-is-my-super-secret-key + MINIO_ROOT_USER: access_key + MINIO_ROOT_PASSWORD: it_is_my_super_secret_key healthcheck: test: curl -sL http://localhost:9000/ interval: 10s @@ -134,8 +134,8 @@ services: QA_AWS_BUCKET: ${QA_AWS_BUCKET} QA_AWS_REGION: ${QA_AWS_REGION} # https://github.com/Altinity/clickhouse-backup/issues/691: - AWS_ACCESS_KEY_ID: access-key - AWS_SECRET_ACCESS_KEY: it-is-my-super-secret-key + AWS_ACCESS_KEY_ID: access_key + AWS_SECRET_ACCESS_KEY: it_is_my_super_secret_key volumes_from: - clickhouse ports: @@ -156,6 +156,7 @@ services: user: root environment: CLICKHOUSE_VERSION: ${CLICKHOUSE_VERSION:-1.1.54394} + CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS: "true" TZ: UTC LOG_LEVEL: "${LOG_LEVEL:-info}" S3_DEBUG: "${S3_DEBUG:-false}" @@ -171,8 +172,8 @@ services: QA_AWS_BUCKET: ${QA_AWS_BUCKET} QA_AWS_REGION: ${QA_AWS_REGION} # https://github.com/Altinity/clickhouse-backup/issues/691: - AWS_ACCESS_KEY_ID: access-key - AWS_SECRET_ACCESS_KEY: it-is-my-super-secret-key + AWS_ACCESS_KEY_ID: access_key + AWS_SECRET_ACCESS_KEY: it_is_my_super_secret_key # GCS over S3 object disk QA_GCS_OVER_S3_ACCESS_KEY: "${QA_GCS_OVER_S3_ACCESS_KEY}" QA_GCS_OVER_S3_SECRET_KEY: "${QA_GCS_OVER_S3_SECRET_KEY}" diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 94b18fbe..a34ed174 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -40,11 +40,11 @@ services: image: docker.io/bitnami/minio:${MINIO_VERSION:-latest} container_name: minio environment: - MINIO_ACCESS_KEY: access-key - MINIO_SECRET_KEY: it-is-my-super-secret-key + MINIO_ACCESS_KEY: access_key + MINIO_SECRET_KEY: it_is_my_super_secret_key MINIO_DEFAULT_BUCKETS: 'clickhouse' - MINIO_ROOT_USER: access-key - MINIO_ROOT_PASSWORD: it-is-my-super-secret-key + MINIO_ROOT_USER: access_key + MINIO_ROOT_PASSWORD: it_is_my_super_secret_key healthcheck: test: curl -sL http://localhost:9000/ interval: 10s @@ -185,8 +185,8 @@ services: QA_AWS_BUCKET: ${QA_AWS_BUCKET} QA_AWS_REGION: ${QA_AWS_REGION} # https://github.com/Altinity/clickhouse-backup/issues/691: - AWS_ACCESS_KEY_ID: access-key - AWS_SECRET_ACCESS_KEY: it-is-my-super-secret-key + AWS_ACCESS_KEY_ID: access_key + AWS_SECRET_ACCESS_KEY: it_is_my_super_secret_key volumes_from: - clickhouse ports: @@ -207,6 +207,7 @@ services: user: root environment: CLICKHOUSE_VERSION: ${CLICKHOUSE_VERSION:-19.17} + CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS: "true" TZ: UTC LOG_LEVEL: "${LOG_LEVEL:-info}" S3_DEBUG: "${S3_DEBUG:-false}" @@ -222,8 +223,8 @@ services: QA_AWS_BUCKET: ${QA_AWS_BUCKET} QA_AWS_REGION: ${QA_AWS_REGION} # https://github.com/Altinity/clickhouse-backup/issues/691: - AWS_ACCESS_KEY_ID: access-key - AWS_SECRET_ACCESS_KEY: it-is-my-super-secret-key + AWS_ACCESS_KEY_ID: access_key + AWS_SECRET_ACCESS_KEY: it_is_my_super_secret_key # GCS over S3 object disk QA_GCS_OVER_S3_ACCESS_KEY: "${QA_GCS_OVER_S3_ACCESS_KEY}" QA_GCS_OVER_S3_SECRET_KEY: "${QA_GCS_OVER_S3_SECRET_KEY}" diff --git a/test/integration/dynamic_settings.sh b/test/integration/dynamic_settings.sh index 12164000..ada3f6d5 100755 --- a/test/integration/dynamic_settings.sh +++ b/test/integration/dynamic_settings.sh @@ -44,7 +44,7 @@ cat < /etc/clickhouse-server/config.d/storage_configuration.xml EOT -if [[ "${CLICKHOUSE_VERSION}" =~ ^21\.1[0-9] || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then +if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^21\.1[0-9] || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then if [[ ! -d /hdd3_data ]]; then mkdir -pv /hdd3_data @@ -93,8 +93,8 @@ cat < /etc/clickhouse-server/config.d/storage_configuration_s3.xml s3 http://minio:9000/clickhouse/disk_s3/{cluster}/{shard}/ 1 @@ -170,8 +170,8 @@ cat < /etc/clickhouse-server/config.d/storage_configuration_encrypted_s3.x s3 http://minio:9000/clickhouse/disk_s3/ 1 @@ -220,8 +220,8 @@ cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3.xml s3 http://minio:9000/clickhouse/backups_s3/{cluster}/{shard}/ 1 false @@ -277,8 +277,8 @@ cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3_plai s3_plain http://minio:9000/clickhouse/backups_s3_plain/{cluster}/{shard}/ 1 false diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 9bde954f..ddbf6d0f 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2021,7 +2021,7 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig st checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) backupDir := "/var/lib/clickhouse/backup" - if strings.HasPrefix(remoteStorageType, "EMBEDDED") && !strings.HasPrefix(remoteStorageType, "_URL") { + if strings.HasPrefix(remoteStorageType, "EMBEDDED") && !strings.HasSuffix(remoteStorageType, "_URL") { backupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) } out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) @@ -2106,7 +2106,7 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig st } func checkObjectStorageIsEmpty(t *testing.T, r *require.Assertions, remoteStorageType string) { - if remoteStorageType == "AZBLOB" { + if remoteStorageType == "AZBLOB" || remoteStorageType == "AZBLOB_EMBEDDED_URL" { t.Log("wait when resolve https://github.com/Azure/Azurite/issues/2362") /* r.NoError(dockerExec("azure", "apk", "add", "jq")) @@ -2137,7 +2137,7 @@ func checkObjectStorageIsEmpty(t *testing.T, r *require.Assertions, remoteStorag r.NoError(err) r.Equal(expected, strings.Trim(out, "\r\n\t ")) } - if remoteStorageType == "S3" { + if remoteStorageType == "S3" || remoteStorageType == "S3_EMBEDDED_URL" { checkRemoteDir("total 0", "minio", "bash", "-c", "ls -lh /bitnami/minio/data/clickhouse/") } if remoteStorageType == "SFTP" { @@ -2213,28 +2213,44 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName)) fullBackupDir := "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/" + dbName + "/t?/default/" + // embedded storage with embedded disks contain object disk files and will download additional data parts if strings.HasPrefix(remoteStorageType, "EMBEDDED") { fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/" + dbName + "/t?" } + // embedded storage without embedded disks doesn't contain `shadow` and contain only `metadata` + if strings.HasPrefix(remoteStorageType, "EMBEDDED") && strings.HasSuffix(remoteStorageType, "_URL") { + fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/metadata/" + dbName + "/t?.json" + } out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+" | wc -l") r.NoError(err) expectedLines := "13" // custom storage doesn't support --partitions for upload / download now - // embedded storage contain hardLink files and will download additional data parts + // embedded storage with embedded disks contain hardLink files and will download additional data parts if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { expectedLines = "17" } + // embedded storage without embedded disks doesn't contain `shadow` and contain only `metadata` + if strings.HasPrefix(remoteStorageType, "EMBEDDED") && strings.HasSuffix(remoteStorageType, "_URL") { + expectedLines = "2" + } r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", fullBackupName)) + expectedLines = "17" fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/" + dbName + "/t?/default/" + // embedded storage with embedded disks contain hardLink files and will download additional data parts if strings.HasPrefix(remoteStorageType, "EMBEDDED") { fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/" + dbName + "/t?" } + // embedded storage without embedded disks doesn't contain `shadow` and contain only `metadata` + if strings.HasPrefix(remoteStorageType, "EMBEDDED") && strings.HasSuffix(remoteStorageType, "_URL") { + fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/metadata/" + dbName + "/t?.json" + expectedLines = "2" + } out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") r.NoError(err) - r.Equal("17", strings.Trim(out, "\r\n\t ")) + r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName)) result = 0 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) @@ -2249,24 +2265,36 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test // check create + partitions r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", partitionBackupName)) + expectedLines = "5" partitionBackupDir := "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/" + dbName + "/t1/default/" - if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + if strings.HasPrefix(remoteStorageType, "EMBEDDED") && !strings.HasSuffix(remoteStorageType, "_URL") { partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/" + dbName + "/t1" } + //embedded backup without disk have only local metadata + if strings.HasPrefix(remoteStorageType, "EMBEDDED") && strings.HasSuffix(remoteStorageType, "_URL") { + partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/metadata/" + dbName + "/t?.json" + expectedLines = "1" + } out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+"| wc -l") r.NoError(err) - r.Equal("5", strings.Trim(out, "\r\n\t ")) + r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) // check create > upload + partitions r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", partitionBackupName)) partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/" + dbName + "/t1/default/" - if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + expectedLines = "7" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") && !strings.HasSuffix(remoteStorageType, "_URL") { partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/" + dbName + "/t1" } + //embedded backup without disk have only local metadata + if strings.HasPrefix(remoteStorageType, "EMBEDDED") && strings.HasSuffix(remoteStorageType, "_URL") { + partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/metadata/" + dbName + "/t?.json" + expectedLines = "1" + } out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+" | wc -l") r.NoError(err) - r.Equal("7", strings.Trim(out, "\r\n\t ")) + r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", "--tables="+dbName+".t1", "--partitions=0-20220102,0-20220103", partitionBackupName)) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) diff --git a/test/integration/kopia/init.sh b/test/integration/kopia/init.sh index 7af33791..e00dfd61 100755 --- a/test/integration/kopia/init.sh +++ b/test/integration/kopia/init.sh @@ -3,12 +3,12 @@ export KOPIA_PASSWORD_FILE="${CUR_DIR}/password" export KOPIA_S3_BUCKET=clickhouse export KOPIA_S3_PATH=/clickhouse/kopia/cluster_name/shard_number/ export KOPIA_S3_ENDPOINT=minio:9000 -export AWS_ACCESS_KEY_ID=access-key -export AWS_SECRET_ACCESS_KEY=it-is-my-super-secret-key +export AWS_ACCESS_KEY_ID=access_key +export AWS_SECRET_ACCESS_KEY=it_is_my_super_secret_key export KOPIA_KEEP_LAST=7 export KOPIA_PASSWORD=kopia-repo-password export KOPIA_CHECK_FOR_UPDATES=false export CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-custom-kopia.yml export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' ${CLICKHOUSE_BACKUP_CONFIG})' --port '$(yq '.clickhouse.port' ${CLICKHOUSE_BACKUP_CONFIG})' --user '$(yq '.clickhouse.username' ${CLICKHOUSE_BACKUP_CONFIG})' --password '$(yq '.clickhouse.password' ${CLICKHOUSE_BACKUP_CONFIG})'" kopia repository connect s3 --endpoint=${KOPIA_S3_ENDPOINT} --disable-tls --bucket=${KOPIA_S3_BUCKET} --prefix=${KOPIA_S3_PATH} --access-key=${AWS_ACCESS_KEY_ID} --secret-access-key=${AWS_SECRET_ACCESS_KEY} || kopia repository create s3 --endpoint=${KOPIA_S3_ENDPOINT} --disable-tls --bucket=${KOPIA_S3_BUCKET} --prefix=${KOPIA_S3_PATH} --access-key=${AWS_ACCESS_KEY_ID} --secret-access-key=${AWS_SECRET_ACCESS_KEY} -kopia policy set --global --keep-latest=${KOPIA_KEEP_LAST} \ No newline at end of file +kopia policy set --global --keep-latest=${KOPIA_KEEP_LAST} diff --git a/test/integration/restic/init.sh b/test/integration/restic/init.sh index 7859fbf7..5a520000 100755 --- a/test/integration/restic/init.sh +++ b/test/integration/restic/init.sh @@ -1,9 +1,9 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" export RESTIC_PASSWORD_FILE="${CUR_DIR}/password" export RESTIC_REPOSITORY=s3:http://minio:9000/clickhouse/restic/cluster_name/shard_number -export AWS_ACCESS_KEY_ID=access-key -export AWS_SECRET_ACCESS_KEY=it-is-my-super-secret-key +export AWS_ACCESS_KEY_ID=access_key +export AWS_SECRET_ACCESS_KEY=it_is_my_super_secret_key export RESTIC_KEEP_LAST=7 export CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-custom-restic.yml export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' ${CLICKHOUSE_BACKUP_CONFIG})' --port '$(yq '.clickhouse.port' ${CLICKHOUSE_BACKUP_CONFIG})' --user '$(yq '.clickhouse.username' ${CLICKHOUSE_BACKUP_CONFIG})' --password '$(yq '.clickhouse.password' ${CLICKHOUSE_BACKUP_CONFIG})'" -restic cat config > /dev/null || restic init \ No newline at end of file +restic cat config > /dev/null || restic init diff --git a/test/testflows/clickhouse_backup/configs/backup/config.yml b/test/testflows/clickhouse_backup/configs/backup/config.yml index 1ebce557..519607da 100644 --- a/test/testflows/clickhouse_backup/configs/backup/config.yml +++ b/test/testflows/clickhouse_backup/configs/backup/config.yml @@ -75,14 +75,14 @@ general: remote_storage: none upload_concurrency: 2 s3: - access_key: access-key + access_key: access_key + secret_key: it_is_my_super_secret_key acl: private bucket: altinity-qa-test debug: false disable_ssl: true endpoint: http://minio:9000 force_path_style: true - secret_key: it-is-my-super-secret-key sftp: address: sftp_server compression_format: tar diff --git a/test/testflows/clickhouse_backup/configs/backup/config.yml.origin b/test/testflows/clickhouse_backup/configs/backup/config.yml.origin index 1ebce557..519607da 100644 --- a/test/testflows/clickhouse_backup/configs/backup/config.yml.origin +++ b/test/testflows/clickhouse_backup/configs/backup/config.yml.origin @@ -75,14 +75,14 @@ general: remote_storage: none upload_concurrency: 2 s3: - access_key: access-key + access_key: access_key + secret_key: it_is_my_super_secret_key acl: private bucket: altinity-qa-test debug: false disable_ssl: true endpoint: http://minio:9000 force_path_style: true - secret_key: it-is-my-super-secret-key sftp: address: sftp_server compression_format: tar diff --git a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml index 353708c1..74cbbc53 100644 --- a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml +++ b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml @@ -182,8 +182,8 @@ services: image: minio/minio:${MINIO_VERSION:-latest} hostname: minio environment: - MINIO_ACCESS_KEY: access-key - MINIO_SECRET_KEY: it-is-my-super-secret-key + MINIO_ACCESS_KEY: access_key + MINIO_SECRET_KEY: it_is_my_super_secret_key entrypoint: sh command: -c 'mkdir -p doc_gen_minio/export/clickhouse && minio server doc_gen_minio/export' healthcheck: diff --git a/test/testflows/clickhouse_backup/tests/cloud_storage.py b/test/testflows/clickhouse_backup/tests/cloud_storage.py index c86922c1..f5449358 100644 --- a/test/testflows/clickhouse_backup/tests/cloud_storage.py +++ b/test/testflows/clickhouse_backup/tests/cloud_storage.py @@ -1,4 +1,5 @@ import os + from clickhouse_backup.requirements.requirements import * from clickhouse_backup.tests.common import * from clickhouse_backup.tests.steps import * @@ -166,8 +167,8 @@ def s3_minio(self): fields_to_modify={ "general": {"remote_storage": "s3"}, "s3": { - "access_key": "access-key", - "secret_key": "it-is-my-super-secret-key", + "access_key": "access_key", + "secret_key": "it_is_my_super_secret_key", "endpoint": "http://minio:9000", "disable_ssl": True, "region": "us-west-2", "bucket": "clickhouse" From e87e23fd5524729051894c3b2c63ccae4979354d Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 27 Feb 2024 20:08:02 +0500 Subject: [PATCH 05/80] BACKUP/RESTORE FROM/TO S3() pass test, but BACKUP/RESTORE FROM/TO AzureBlobStorage(), still not work, clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447 --- test/integration/integration_test.go | 44 ++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 12 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index ddbf6d0f..c83a4870 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -542,12 +542,12 @@ func TestRBAC(t *testing.T) { ch.chbackend.Close() } testRBACScenario("/etc/clickhouse-backup/config-s3.yml") - if chVersion == "head" || compareVersion(chVersion, "24.1") >= 0 { + if compareVersion(chVersion, "24.1") >= 0 { testRBACScenario("/etc/clickhouse-backup/config-s3-embedded.yml") testRBACScenario("/etc/clickhouse-backup/config-s3-embedded-url.yml") testRBACScenario("/etc/clickhouse-backup/config-azblob-embedded.yml") } - if chVersion == "head" || compareVersion(chVersion, "24.2") >= 0 { + if compareVersion(chVersion, "24.2") >= 0 { testRBACScenario("/etc/clickhouse-backup/config-azblob-embedded-url.yml") } } @@ -612,12 +612,12 @@ func TestConfigs(t *testing.T) { } testConfigsScenario("/etc/clickhouse-backup/config-s3.yml") chVersion := os.Getenv("CLICKHOUSE_VERSION") - if chVersion == "head" || compareVersion(chVersion, "24.1") >= 0 { + if compareVersion(chVersion, "24.1") >= 0 { testConfigsScenario("/etc/clickhouse-backup/config-s3-embedded.yml") testConfigsScenario("/etc/clickhouse-backup/config-s3-embedded-url.yml") testConfigsScenario("/etc/clickhouse-backup/config-azblob-embedded.yml") } - if chVersion == "head" || compareVersion(chVersion, "24.2") >= 0 { + if compareVersion(chVersion, "24.2") >= 0 { testConfigsScenario("/etc/clickhouse-backup/config-azblob-embedded-url.yml") } } @@ -1814,22 +1814,30 @@ func TestIntegrationEmbedded(t *testing.T) { //t.Skipf("Test skipped, wait 23.8, RESTORE Ordinary table and RESTORE MATERIALIZED VIEW and {uuid} not works for %s version, look https://github.com/ClickHouse/ClickHouse/issues/43971 and https://github.com/ClickHouse/ClickHouse/issues/42709", os.Getenv("CLICKHOUSE_VERSION")) //dependencies restore https://github.com/ClickHouse/ClickHouse/issues/39416, fixed in 23.3 version := os.Getenv("CLICKHOUSE_VERSION") - if version != "head" && compareVersion(version, "23.3") < 0 { + if compareVersion(version, "23.3") < 0 { t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) } //t.Parallel() r := require.New(t) + //@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053 + //CUSTOM backup create folder in each disk + //r.NoError(dockerExec("azure", "apk", "add", "tcpdump")) + //r.NoError(dockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) + //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) + //if compareVersion(version, "24.2") >= 0 { + // runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") + //} + //runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") + //r.NoError(dockerExec("azure", "pkill", "tcpdump")) + //r.NoError(dockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) + + if compareVersion(version, "23.8") >= 0 { + runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") + } //CUSTOM backup create folder in each disk r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) - runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") - //@TODO uncomment when resolve slow azure BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/52088, https://github.com/Azure/Azurite/issues/2053 - if version == "head" || compareVersion(version, "24.2") >= 0 { - r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) - runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") - runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") - } //@TODO think about how to implements embedded backup for s3_plain disks //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") @@ -2752,6 +2760,18 @@ func (ch *TestClickHouse) queryWithNoError(r *require.Assertions, query string, var dockerExecTimeout = 180 * time.Second +func dockerExecBackground(container string, cmd ...string) error { + out, err := dockerExecBackgroundOut(container, cmd...) + log.Info(out) + return err +} + +func dockerExecBackgroundOut(container string, cmd ...string) (string, error) { + dcmd := []string{"exec", "-d", container} + dcmd = append(dcmd, cmd...) + return utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", dcmd...) +} + func dockerExec(container string, cmd ...string) error { out, err := dockerExecOut(container, cmd...) log.Info(out) From d2f3c846c13632ff0d8289c70eac24a760a79997 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 27 Feb 2024 22:38:38 +0500 Subject: [PATCH 06/80] BACKUP/RESTORE FROM/TO S3() pass test, but BACKUP/RESTORE FROM/TO AzureBlobStorage(), still not work, clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447 --- test/testflows/clickhouse_backup/configs/backup/config.yml | 2 +- .../clickhouse_backup/tests/snapshots/cli.py.cli.snapshot | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/testflows/clickhouse_backup/configs/backup/config.yml b/test/testflows/clickhouse_backup/configs/backup/config.yml index 519607da..f27bb1ef 100644 --- a/test/testflows/clickhouse_backup/configs/backup/config.yml +++ b/test/testflows/clickhouse_backup/configs/backup/config.yml @@ -76,13 +76,13 @@ general: upload_concurrency: 2 s3: access_key: access_key - secret_key: it_is_my_super_secret_key acl: private bucket: altinity-qa-test debug: false disable_ssl: true endpoint: http://minio:9000 force_path_style: true + secret_key: it_is_my_super_secret_key sftp: address: sftp_server compression_format: tar diff --git a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot index af8954d5..08ae0c79 100644 --- a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot +++ b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot @@ -1,4 +1,4 @@ -default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' check_sum_algorithm: ""\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' skip_credentials: false\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 4h\', \' debug: false\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" +default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' embedded_backup_threads: 0\', \' embedded_restore_threads: 0\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' check_sum_algorithm: ""\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' embedded_access_key: ""\', \' embedded_secret_key: ""\', \' skip_credentials: false\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 4h\', \' debug: false\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" help_flag = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --help, -h show help\n --version, -v print the version'""" From a947258fa8b68d82b806d93f59f1a61298d45363 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 28 Feb 2024 13:46:36 +0500 Subject: [PATCH 07/80] fix CI/CD for FTP, fix Dockerfile --- Dockerfile | 2 +- pkg/storage/ftp.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 64987361..1a37513c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ RUN rm -fv /etc/apt/sources.list.d/clickhouse.list && \ find /etc/apt/ -type f -name *.list -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} + && \ ( apt-get update || true ) && \ apt-get install -y --no-install-recommends gnupg ca-certificates wget && update-ca-certificates && \ - for srv in "keyserver.ubuntu.com" "pool.sks-keyservers.net" "keys.gnupg.net"; do apt-key adv --keyserver $srv --recv-keys 52B59B1571A79DBC054901C0F6BC817356A3D45E; if [[ $? -eq 0 ]]; then break; fi; done && \ + for srv in "keyserver.ubuntu.com" "pool.sks-keyservers.net" "keys.gnupg.net"; do apt-key adv --keyserver $srv --recv-keys 52B59B1571A79DBC054901C0F6BC817356A3D45E; if [ $? -eq 0 ]; then break; fi; done && \ DISTRIB_CODENAME=$(cat /etc/lsb-release | grep DISTRIB_CODENAME | cut -d "=" -f 2) && \ echo ${DISTRIB_CODENAME} && \ echo "deb https://ppa.launchpadcontent.net/longsleep/golang-backports/ubuntu ${DISTRIB_CODENAME} main" > /etc/apt/sources.list.d/golang.list && \ diff --git a/pkg/storage/ftp.go b/pkg/storage/ftp.go index 0f720fd1..c4643cb9 100644 --- a/pkg/storage/ftp.go +++ b/pkg/storage/ftp.go @@ -48,7 +48,7 @@ func (f *FTP) Connect(ctx context.Context) error { } f.clients = pool.NewObjectPoolWithDefaultConfig(ctx, &ftpPoolFactory{options: options, ftp: f}) if f.Config.Concurrency > 1 { - f.clients.Config.MaxTotal = int(f.Config.Concurrency) * 3 + f.clients.Config.MaxTotal = int(f.Config.Concurrency) * 4 } f.dirCacheMutex.Lock() From 25357bb19964a5a710ecc89490506463aad32f25 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 28 Feb 2024 19:24:35 +0500 Subject: [PATCH 08/80] final polishing BACKUP/RESTORE for S3/AzureBlobStorage, fix https://github.com/Altinity/clickhouse-backup/issues/695, wait when XML-API will implement on https://github.com/fsouza/fake-gcs-server/pull/1164, https://github.com/fsouza/fake-gcs-server/issues/1330 --- pkg/backup/restore.go | 1 - test/integration/config-gcs-embedded-url.yml | 20 ++++++++++++++++++++ test/integration/config-gcs.yml | 1 - test/integration/docker-compose_advanced.yml | 5 +++++ test/integration/integration_test.go | 4 ++++ 5 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 test/integration/config-gcs-embedded-url.yml diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index f8106ba5..e0a43c5f 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -529,7 +529,6 @@ func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, if err != nil { return err } - /*}*/ return b.restoreEmbedded(ctx, backupName, true, tablesForRestore, nil) } diff --git a/test/integration/config-gcs-embedded-url.yml b/test/integration/config-gcs-embedded-url.yml new file mode 100644 index 00000000..6921c1da --- /dev/null +++ b/test/integration/config-gcs-embedded-url.yml @@ -0,0 +1,20 @@ +general: + disable_progress_bar: true + remote_storage: gcs + upload_concurrency: 4 + download_concurrency: 4 + restore_schema_on_cluster: "{cluster}" +clickhouse: + host: clickhouse + port: 9000 + restart_command: bash -c 'echo "FAKE RESTART"' + use_embedded_backup_restore: true + timeout: 4h +gcs: + bucket: "${QA_GCS_OVER_S3_BUCKET}" + path: backup/{cluster}/{shard} + object_disk_path: object_disks/{cluster}/{shard} + credentials_file: /etc/clickhouse-backup/credentials.json + embedded_access_key: "${QA_GCS_OVER_S3_ACCESS_KEY}" + embedded_secret_key: "${QA_GCS_OVER_S3_SECRET_KEY}" + compression_format: none diff --git a/test/integration/config-gcs.yml b/test/integration/config-gcs.yml index a33b5bc6..f7101c66 100644 --- a/test/integration/config-gcs.yml +++ b/test/integration/config-gcs.yml @@ -14,4 +14,3 @@ gcs: object_disk_path: object_disks/{cluster}/{shard} credentials_file: /etc/clickhouse-backup/credentials.json compression_format: tar -# endpoint: http://gcs:8080/storage/v1/ diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index a34ed174..9cc6c4fc 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -184,6 +184,10 @@ services: QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY} QA_AWS_BUCKET: ${QA_AWS_BUCKET} QA_AWS_REGION: ${QA_AWS_REGION} +# GCS over S3 embedded backups + QA_GCS_OVER_S3_ACCESS_KEY: "${QA_GCS_OVER_S3_ACCESS_KEY}" + QA_GCS_OVER_S3_SECRET_KEY: "${QA_GCS_OVER_S3_SECRET_KEY}" + QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}" # https://github.com/Altinity/clickhouse-backup/issues/691: AWS_ACCESS_KEY_ID: access_key AWS_SECRET_ACCESS_KEY: it_is_my_super_secret_key @@ -252,6 +256,7 @@ services: - ./config-ftp.yaml:/etc/clickhouse-backup/config-ftp.yaml - ./config-ftp-old.yaml:/etc/clickhouse-backup/config-ftp-old.yaml - ./config-gcs.yml:/etc/clickhouse-backup/config-gcs.yml + - ./config-gcs-embedded-url.yml:/etc/clickhouse-backup/config-gcs-embedded-url.yml.template - ./config-gcs-custom-endpoint.yml:/etc/clickhouse-backup/config-gcs-custom-endpoint.yml - ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml - ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index c83a4870..ac2e05ee 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1832,6 +1832,10 @@ func TestIntegrationEmbedded(t *testing.T) { //r.NoError(dockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) if compareVersion(version, "23.8") >= 0 { + //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330 + //installDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") + //r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml")) + //runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") } //CUSTOM backup create folder in each disk From 901f40836e2d971ef2ac844580fc3e76e48098d5 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 28 Feb 2024 19:35:13 +0500 Subject: [PATCH 09/80] added support for `use_embedded_backup_restore: true` with empty `embedded_backup_disk` value, tested on S3/GCS over S3/AzureBlobStorage, fix https://github.com/Altinity/clickhouse-backup/issues/695 --- ChangeLog.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ChangeLog.md b/ChangeLog.md index 34d72260..353ee6f4 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,3 +1,7 @@ +# v2.5.0 (not released yet) +IMPROVEMENTS +- added support for `use_embedded_backup_restore: true` with empty `embedded_backup_disk` value, tested on S3/GCS over S3/AzureBlobStorage, fix [695](https://github.com/Altinity/clickhouse-backup/issues/695) + # v2.4.33 BUG FIXES - fixed wrong anonymous authorization for service account for GCS, added `GCS_SKIP_CREDENTIALS` fix [848](https://github.com/Altinity/clickhouse-backup/issues/848), fix [847](https://github.com/Altinity/clickhouse-backup/pull/847), thanks @sanadhis From 5ce6d6dde083da970e4d19faa3cfad6f02e402f7 Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 29 Feb 2024 16:48:12 +0500 Subject: [PATCH 10/80] execute `ALTER TABLE ... DROP PARTITION` instead of `DROP TABLE` for `restore` and `restore_remote` with parameters `--data --partitions=...`, fix https://github.com/Altinity/clickhouse-backup/issues/756 --- ChangeLog.md | 7 + pkg/backup/restore.go | 192 ++++++++++++++++----------- pkg/config/config.go | 2 +- pkg/storage/s3.go | 7 +- test/integration/integration_test.go | 37 ++++-- 5 files changed, 151 insertions(+), 94 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index 353ee6f4..6e026cda 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,6 +1,13 @@ # v2.5.0 (not released yet) IMPROVEMENTS - added support for `use_embedded_backup_restore: true` with empty `embedded_backup_disk` value, tested on S3/GCS over S3/AzureBlobStorage, fix [695](https://github.com/Altinity/clickhouse-backup/issues/695) +- `--rbac, --rbac-only, --configs, --configs-only` now works with `use_embedded_backup_restore: true` + +BUG FIXES +- continue `S3_MAX_PARTS_COUNT` default value from `2000` to `4000` to continue decrease memory usage for S3 +- changed minimal part size for multipart upload in CopyObject from `5Mb` to `10Mb` +- restore SQL UDF functions after restore tables +- execute `ALTER TABLE ... DROP PARTITION` instead of `DROP TABLE` for `restore` and `restore_remote` with parameters `--data --partitions=...`, fix [756](https://github.com/Altinity/clickhouse-backup/issues/756) # v2.4.33 BUG FIXES diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index e0a43c5f..db6da2b7 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -96,47 +96,40 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par break } } - if err == nil { - backupMetadata := metadata.BackupMetadata{} - if err := json.Unmarshal(backupMetadataBody, &backupMetadata); err != nil { - return err + if os.IsNotExist(err) { // Legacy backups don't have metadata.json, but we need handle not exists local backup + backupPath := path.Join(b.DefaultDataPath, "backup", backupName) + if fInfo, fErr := os.Stat(backupPath); fErr != nil || !fInfo.IsDir() { + return fmt.Errorf("'%s' stat return %v, %v", backupPath, fInfo, fErr) } - b.isEmbedded = strings.Contains(backupMetadata.Tags, "embedded") + } else if err != nil { + return err + } + backupMetadata := metadata.BackupMetadata{} + if err := json.Unmarshal(backupMetadataBody, &backupMetadata); err != nil { + return err + } + b.isEmbedded = strings.Contains(backupMetadata.Tags, "embedded") - if schemaOnly || doRestoreData { - for _, database := range backupMetadata.Databases { - targetDB := database.Name - if !IsInformationSchema(targetDB) { - if err = b.restoreEmptyDatabase(ctx, targetDB, tablePattern, database, dropTable, schemaOnly, ignoreDependencies); err != nil { - return err - } - } - } - } - // do not create UDF when use --data, --rbac-only, --configs-only flags, https://github.com/Altinity/clickhouse-backup/issues/697 - if schemaOnly || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { - for _, function := range backupMetadata.Functions { - if err = b.ch.CreateUserDefinedFunction(function.Name, function.CreateQuery, b.cfg.General.RestoreSchemaOnCluster); err != nil { + if schemaOnly || doRestoreData { + for _, database := range backupMetadata.Databases { + targetDB := database.Name + if !IsInformationSchema(targetDB) { + if err = b.restoreEmptyDatabase(ctx, targetDB, tablePattern, database, dropTable, schemaOnly, ignoreDependencies); err != nil { return err } } } - if len(backupMetadata.Tables) == 0 { - // corner cases for https://github.com/Altinity/clickhouse-backup/issues/832 - if !restoreRBAC && !rbacOnly && !restoreConfigs && !configsOnly { - if !b.cfg.General.AllowEmptyBackups { - err = fmt.Errorf("'%s' doesn't contains tables for restore, if you need it, you can setup `allow_empty_backups: true` in `general` config section", backupName) - log.Errorf("%v", err) - return err - } - log.Warnf("'%s' doesn't contains tables for restore", backupName) - return nil + } + if len(backupMetadata.Tables) == 0 { + // corner cases for https://github.com/Altinity/clickhouse-backup/issues/832 + if !restoreRBAC && !rbacOnly && !restoreConfigs && !configsOnly { + if !b.cfg.General.AllowEmptyBackups { + err = fmt.Errorf("'%s' doesn't contains tables for restore, if you need it, you can setup `allow_empty_backups: true` in `general` config section", backupName) + log.Errorf("%v", err) + return err } - } - } else if os.IsNotExist(err) { // Legacy backups don't have metadata.json, but we need handle not exists local backup - backupPath := path.Join(b.DefaultDataPath, "backup", backupName) - if fInfo, fErr := os.Stat(backupPath); fErr != nil || !fInfo.IsDir() { - return fmt.Errorf("'%s' stat return %v, %v", backupPath, fInfo, fErr) + log.Warnf("'%s' doesn't contains tables for restore", backupName) + return nil } } needRestart := false @@ -177,20 +170,85 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } }() } - if schemaOnly || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { - if err := b.RestoreSchema(ctx, backupName, tablePattern, dropTable, ignoreDependencies); err != nil { + var tablesForRestore ListOfTables + var partitionsNames map[metadata.TableTitle][]string + if tablePattern == "" { + tablePattern = "*" + } + metadataPath := path.Join(b.DefaultDataPath, "backup", backupName, "metadata") + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + metadataPath = path.Join(b.EmbeddedBackupDataPath, backupName, "metadata") + } + + if !rbacOnly && !configsOnly { + tablesForRestore, partitionsNames, err = b.getTablesForRestoreLocal(ctx, backupName, metadataPath, tablePattern, dropTable, partitions) + if err != nil { + return err + } + } + if schemaOnly || dropTable || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { + if err = b.RestoreSchema(ctx, backupName, tablesForRestore, ignoreDependencies); err != nil { return err } } + // https://github.com/Altinity/clickhouse-backup/issues/756 + if dataOnly && !schemaOnly && !rbacOnly && !configsOnly && len(partitions) > 0 { + if err = b.dropExistPartitions(ctx, tablesForRestore, partitionsNames, partitions); err != nil { + return err + } + + } if dataOnly || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { - if err := b.RestoreData(ctx, backupName, tablePattern, partitions, disks); err != nil { + if err := b.RestoreData(ctx, backupName, metadataPath, tablePattern, partitions, disks); err != nil { return err } } + // do not create UDF when use --data, --rbac-only, --configs-only flags, https://github.com/Altinity/clickhouse-backup/issues/697 + if schemaOnly || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { + for _, function := range backupMetadata.Functions { + if err = b.ch.CreateUserDefinedFunction(function.Name, function.CreateQuery, b.cfg.General.RestoreSchemaOnCluster); err != nil { + return err + } + } + } log.Info("done") return nil } +func (b *Backuper) getTablesForRestoreLocal(ctx context.Context, backupName string, metadataPath string, tablePattern string, dropTable bool, partitions []string) (ListOfTables, map[metadata.TableTitle][]string, error) { + var tablesForRestore ListOfTables + var partitionsNames map[metadata.TableTitle][]string + info, err := os.Stat(metadataPath) + // corner cases for https://github.com/Altinity/clickhouse-backup/issues/832 + if err != nil { + if !b.cfg.General.AllowEmptyBackups { + return nil, nil, err + } + if !os.IsNotExist(err) { + return nil, nil, err + } + return nil, nil, nil + } + if !info.IsDir() { + return nil, nil, fmt.Errorf("%s is not a dir", metadataPath) + } + tablesForRestore, partitionsNames, err = b.getTableListByPatternLocal(ctx, metadataPath, tablePattern, dropTable, partitions) + if err != nil { + return nil, nil, err + } + // if restore-database-mapping specified, create database in mapping rules instead of in backup files. + if len(b.cfg.General.RestoreDatabaseMapping) > 0 { + err = changeTableQueryToAdjustDatabaseMapping(&tablesForRestore, b.cfg.General.RestoreDatabaseMapping) + if err != nil { + return nil, nil, err + } + } + if len(tablesForRestore) == 0 { + return nil, nil, fmt.Errorf("no have found schemas by %s in %s", tablePattern, backupName) + } + return tablesForRestore, partitionsNames, nil +} + func (b *Backuper) restartClickHouse(ctx context.Context, backupName string, log *apexLog.Entry) error { log.Warnf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) for _, cmd := range strings.Split(b.ch.Config.RestartCommand, ";") { @@ -450,8 +508,24 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat return nil } +// execute ALTER TABLE db.table DROP PARTITION for corner case when we try to restore backup with the same structure, https://github.com/Altinity/clickhouse-backup/issues/756 +func (b *Backuper) dropExistPartitions(ctx context.Context, tablesForRestore ListOfTables, partitionsIdMap map[metadata.TableTitle][]string, partitions []string) error { + for _, table := range tablesForRestore { + partitionsIds, isExists := partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Table}] + if !isExists { + return fmt.Errorf("`%s`.`%s` doesn't contains %#v partitions", table.Database, table.Table, partitions) + } + partitionsSQL := fmt.Sprintf("DROP PARTITION %s", strings.Join(partitionsIds, ", DROP PARTITION ")) + err := b.ch.QueryContext(ctx, fmt.Sprintf("ALTER TABLE `%s`.`%s` %s SETTINGS mutations_sync=2", table.Database, table.Table, partitionsSQL)) + if err != nil { + return err + } + } + return nil +} + // RestoreSchema - restore schemas matched by tablePattern from backupName -func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern string, dropTable, ignoreDependencies bool) error { +func (b *Backuper) RestoreSchema(ctx context.Context, backupName string, tablesForRestore ListOfTables, ignoreDependencies bool) error { log := apexLog.WithFields(apexLog.Fields{ "backup": backupName, "operation": "restore_schema", @@ -461,41 +535,6 @@ func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern s if err != nil { return err } - metadataPath := path.Join(b.DefaultDataPath, "backup", backupName, "metadata") - if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { - metadataPath = path.Join(b.EmbeddedBackupDataPath, backupName, "metadata") - } - info, err := os.Stat(metadataPath) - // corner cases for https://github.com/Altinity/clickhouse-backup/issues/832 - if err != nil { - if !b.cfg.General.AllowEmptyBackups { - return err - } - if !os.IsNotExist(err) { - return err - } - return nil - } - if !info.IsDir() { - return fmt.Errorf("%s is not a dir", metadataPath) - } - if tablePattern == "" { - tablePattern = "*" - } - tablesForRestore, _, err := b.getTableListByPatternLocal(ctx, metadataPath, tablePattern, dropTable, nil) - if err != nil { - return err - } - // if restore-database-mapping specified, create database in mapping rules instead of in backup files. - if len(b.cfg.General.RestoreDatabaseMapping) > 0 { - err = changeTableQueryToAdjustDatabaseMapping(&tablesForRestore, b.cfg.General.RestoreDatabaseMapping) - if err != nil { - return err - } - } - if len(tablesForRestore) == 0 { - return fmt.Errorf("no have found schemas by %s in %s", tablePattern, backupName) - } if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version, log); dropErr != nil { return dropErr } @@ -743,6 +782,7 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci }, query, b.cfg.General.RestoreSchemaOnCluster, ignoreDependencies, version, b.DefaultDataPath) if dropErr == nil { tablesForDrop[i].Query = query + break } } } else { @@ -776,7 +816,7 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci } // RestoreData - restore data for tables matched by tablePattern from backupName -func (b *Backuper) RestoreData(ctx context.Context, backupName string, tablePattern string, partitions []string, disks []clickhouse.Disk) error { +func (b *Backuper) RestoreData(ctx context.Context, backupName string, metadataPath, tablePattern string, partitions []string, disks []clickhouse.Disk) error { startRestore := time.Now() log := apexLog.WithFields(apexLog.Fields{ "backup": backupName, @@ -803,10 +843,6 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, tablePatt } var tablesForRestore ListOfTables var partitionsNameList map[metadata.TableTitle][]string - metadataPath := path.Join(b.DefaultDataPath, "backup", backupName, "metadata") - if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { - metadataPath = path.Join(b.EmbeddedBackupDataPath, backupName, "metadata") - } if backup.Legacy { tablesForRestore, err = b.ch.GetBackupTablesLegacy(backupName, disks) } else { diff --git a/pkg/config/config.go b/pkg/config/config.go index e52d4ff2..5f0b2e15 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -565,7 +565,7 @@ func DefaultConfig() *Config { StorageClass: string(s3types.StorageClassStandard), Concurrency: int(downloadConcurrency + 1), PartSize: 0, - MaxPartsCount: 2000, + MaxPartsCount: 4000, }, GCS: GCSConfig{ CompressionLevel: 1, diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 62effbf8..fc9a91cb 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -501,8 +501,11 @@ func (s *S3) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, d if srcSize%s.Config.MaxPartsCount > 0 { partSize++ } - if partSize < 5*1024*1024 { - partSize = 5 * 1024 * 1024 + if partSize < 10*1024*1024 { + partSize = 10 * 1024 * 1024 + } + if partSize > 5*1024*1024*1024 { + partSize = 5 * 1024 * 1024 * 1024 } // Calculate the number of parts diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index ac2e05ee..199b9659 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2220,7 +2220,7 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test ch.queryWithNoError(r, fmt.Sprintf("INSERT INTO "+dbName+".t2(dt, v) SELECT '%s', number FROM numbers(10)", dt)) } - // check create_remote full > download + partitions > delete local > download > restore --partitions > restore + // check create_remote full > download + partitions > restore --data --partitions > delete local > download > restore --partitions > restore r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create_remote", "--tables="+dbName+".t*", fullBackupName)) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName)) @@ -2246,6 +2246,25 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test expectedLines = "2" } r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) + checkRestoredDataWithPartitions := func() { + result = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) + expectedCount = 40 + r.Equal(expectedCount, result, fmt.Sprintf("expect count=%d", expectedCount)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", fullBackupName)) + result = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) + r.Equal(uint64(80), result, "expect count=80") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) + } + + out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName) + t.Log(out) + r.NoError(err) + r.Contains(out, "DROP PARTITION") + checkRestoredDataWithPartitions() + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", fullBackupName)) @@ -2263,18 +2282,10 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") r.NoError(err) r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName)) - result = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) - expectedCount = 40 - r.Equal(expectedCount, result, fmt.Sprintf("expect count=%d", expectedCount)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", fullBackupName)) - result = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) - r.Equal(uint64(80), result, "expect count=80") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", fullBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) - + out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName) + r.NoError(err) + r.NotContains(out, "DROP PARTITION") + checkRestoredDataWithPartitions() // check create + partitions r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", partitionBackupName)) expectedLines = "5" From 751c37e4f62d47b7c50700f1f320a46a27dd68c6 Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 29 Feb 2024 18:02:01 +0500 Subject: [PATCH 11/80] execute `ALTER TABLE ... DROP PARTITION` instead of `DROP TABLE` for `restore` and `restore_remote` with parameters `--data --partitions=...`, fix https://github.com/Altinity/clickhouse-backup/issues/756 --- test/integration/integration_test.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 199b9659..b911b874 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2246,24 +2246,18 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test expectedLines = "2" } r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) - checkRestoredDataWithPartitions := func() { + checkRestoredDataWithPartitions := func(expectedCount uint64) { result = 0 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) - expectedCount = 40 - r.Equal(expectedCount, result, fmt.Sprintf("expect count=%d", expectedCount)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", fullBackupName)) - result = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) - r.Equal(uint64(80), result, "expect count=80") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", fullBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) + r.Equal(expectedCount, result, "expect count=%d", expectedCount) } out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName) t.Log(out) r.NoError(err) r.Contains(out, "DROP PARTITION") - checkRestoredDataWithPartitions() + // we just replace data in exists table + checkRestoredDataWithPartitions(80) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", fullBackupName)) @@ -2285,7 +2279,14 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName) r.NoError(err) r.NotContains(out, "DROP PARTITION") - checkRestoredDataWithPartitions() + checkRestoredDataWithPartitions(40) + + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", fullBackupName)) + checkRestoredDataWithPartitions(80) + + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) + // check create + partitions r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", partitionBackupName)) expectedLines = "5" From 7ddf8a20aa3011a515957094e8c5f8d215a2b5e5 Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 29 Feb 2024 19:17:12 +0500 Subject: [PATCH 12/80] execute `ALTER TABLE ... DROP PARTITION` instead of `DROP TABLE` for `restore` and `restore_remote` with parameters `--data --partitions=...`, fix https://github.com/Altinity/clickhouse-backup/issues/756 --- pkg/backup/restore.go | 42 +++++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index db6da2b7..0df981b5 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -69,6 +69,10 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par if err != nil { return err } + version, err := b.ch.GetVersion(ctx) + if err != nil { + return err + } b.DefaultDataPath, err = b.ch.GetDefaultPath(disks) if err != nil { log.Warnf("%v", err) @@ -114,7 +118,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par for _, database := range backupMetadata.Databases { targetDB := database.Name if !IsInformationSchema(targetDB) { - if err = b.restoreEmptyDatabase(ctx, targetDB, tablePattern, database, dropTable, schemaOnly, ignoreDependencies); err != nil { + if err = b.restoreEmptyDatabase(ctx, targetDB, tablePattern, database, dropTable, schemaOnly, ignoreDependencies, version); err != nil { return err } } @@ -187,13 +191,13 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } } if schemaOnly || dropTable || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { - if err = b.RestoreSchema(ctx, backupName, tablesForRestore, ignoreDependencies); err != nil { + if err = b.RestoreSchema(ctx, backupName, tablesForRestore, ignoreDependencies, version); err != nil { return err } } // https://github.com/Altinity/clickhouse-backup/issues/756 if dataOnly && !schemaOnly && !rbacOnly && !configsOnly && len(partitions) > 0 { - if err = b.dropExistPartitions(ctx, tablesForRestore, partitionsNames, partitions); err != nil { + if err = b.dropExistPartitions(ctx, tablesForRestore, partitionsNames, partitions, version); err != nil { return err } @@ -307,7 +311,7 @@ func (b *Backuper) executeShellCommandWithTimeout(ctx context.Context, cmd strin return nil } -func (b *Backuper) restoreEmptyDatabase(ctx context.Context, targetDB, tablePattern string, database metadata.DatabasesMeta, dropTable, schemaOnly, ignoreDependencies bool) error { +func (b *Backuper) restoreEmptyDatabase(ctx context.Context, targetDB, tablePattern string, database metadata.DatabasesMeta, dropTable, schemaOnly, ignoreDependencies bool, version int) error { isMapped := false if targetDB, isMapped = b.cfg.General.RestoreDatabaseMapping[database.Name]; !isMapped { targetDB = database.Name @@ -326,10 +330,6 @@ func (b *Backuper) restoreEmptyDatabase(ctx context.Context, targetDB, tablePatt // https://github.com/Altinity/clickhouse-backup/issues/651 settings := "" if ignoreDependencies { - version, err := b.ch.GetVersion(ctx) - if err != nil { - return err - } if version >= 21012000 { settings = "SETTINGS check_table_dependencies=0" } @@ -509,14 +509,18 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat } // execute ALTER TABLE db.table DROP PARTITION for corner case when we try to restore backup with the same structure, https://github.com/Altinity/clickhouse-backup/issues/756 -func (b *Backuper) dropExistPartitions(ctx context.Context, tablesForRestore ListOfTables, partitionsIdMap map[metadata.TableTitle][]string, partitions []string) error { +func (b *Backuper) dropExistPartitions(ctx context.Context, tablesForRestore ListOfTables, partitionsIdMap map[metadata.TableTitle][]string, partitions []string, version int) error { for _, table := range tablesForRestore { partitionsIds, isExists := partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Table}] if !isExists { return fmt.Errorf("`%s`.`%s` doesn't contains %#v partitions", table.Database, table.Table, partitions) } partitionsSQL := fmt.Sprintf("DROP PARTITION %s", strings.Join(partitionsIds, ", DROP PARTITION ")) - err := b.ch.QueryContext(ctx, fmt.Sprintf("ALTER TABLE `%s`.`%s` %s SETTINGS mutations_sync=2", table.Database, table.Table, partitionsSQL)) + settings := "" + if version >= 19017000 { + settings = "SETTINGS mutations_sync=2" + } + err := b.ch.QueryContext(ctx, fmt.Sprintf("ALTER TABLE `%s`.`%s` %s %s", table.Database, table.Table, partitionsSQL, settings)) if err != nil { return err } @@ -525,22 +529,18 @@ func (b *Backuper) dropExistPartitions(ctx context.Context, tablesForRestore Lis } // RestoreSchema - restore schemas matched by tablePattern from backupName -func (b *Backuper) RestoreSchema(ctx context.Context, backupName string, tablesForRestore ListOfTables, ignoreDependencies bool) error { +func (b *Backuper) RestoreSchema(ctx context.Context, backupName string, tablesForRestore ListOfTables, ignoreDependencies bool, version int) error { log := apexLog.WithFields(apexLog.Fields{ "backup": backupName, "operation": "restore_schema", }) - version, err := b.ch.GetVersion(ctx) - if err != nil { - return err - } if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version, log); dropErr != nil { return dropErr } var restoreErr error if b.isEmbedded { - restoreErr = b.restoreSchemaEmbedded(ctx, backupName, tablesForRestore, log) + restoreErr = b.restoreSchemaEmbedded(ctx, backupName, tablesForRestore, version) } else { restoreErr = b.restoreSchemaRegular(tablesForRestore, version, log) } @@ -554,16 +554,12 @@ var UUIDWithMergeTreeRE = regexp.MustCompile(`^(.+)(UUID)(\s+)'([^']+)'(.+)({uui var emptyReplicatedMergeTreeRE = regexp.MustCompile(`(?m)Replicated(MergeTree|ReplacingMergeTree|SummingMergeTree|AggregatingMergeTree|CollapsingMergeTree|VersionedCollapsingMergeTree|GraphiteMergeTree)\s*\(([^']*)\)(.*)`) -func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, tablesForRestore ListOfTables, log *apexLog.Entry) error { +func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, tablesForRestore ListOfTables, version int) error { var err error - chVersion, err := b.ch.GetVersion(ctx) - if err != nil { - return err - } if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { - err = b.fixEmbeddedMetadataLocal(ctx, backupName, chVersion) + err = b.fixEmbeddedMetadataLocal(ctx, backupName, version) } else { - err = b.fixEmbeddedMetadataRemote(ctx, backupName, chVersion) + err = b.fixEmbeddedMetadataRemote(ctx, backupName, version) } if err != nil { return err From b37ca55d626f7d8e0f9b0fba38aebcb7be609868 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 1 Mar 2024 11:08:57 +0500 Subject: [PATCH 13/80] fix wrong behavior for `freeze_by_part` + `freeze_by_part_where`, fix https://github.com/Altinity/clickhouse-backup/issues/855 --- ChangeLog.md | 1 + pkg/clickhouse/clickhouse.go | 8 ++++---- pkg/config/config.go | 10 ++++++++++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index 6e026cda..893f76e4 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -8,6 +8,7 @@ BUG FIXES - changed minimal part size for multipart upload in CopyObject from `5Mb` to `10Mb` - restore SQL UDF functions after restore tables - execute `ALTER TABLE ... DROP PARTITION` instead of `DROP TABLE` for `restore` and `restore_remote` with parameters `--data --partitions=...`, fix [756](https://github.com/Altinity/clickhouse-backup/issues/756) +- fix wrong behavior for `freeze_by_part` + `freeze_by_part_where`, fix [855](https://github.com/Altinity/clickhouse-backup/issues/855) # v2.4.33 BUG FIXES diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 83d20c7a..5ac4b32f 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -649,9 +649,9 @@ func (ch *ClickHouse) GetVersionDescribe(ctx context.Context) string { return result } -// FreezeTableOldWay - freeze all partitions in table one by one -// This way using for ClickHouse below v19.1 -func (ch *ClickHouse) FreezeTableOldWay(ctx context.Context, table *Table, name string) error { +// FreezeTableByParts - freeze all partitions in table one by one +// also ally `freeze_by_part_where` +func (ch *ClickHouse) FreezeTableByParts(ctx context.Context, table *Table, name string) error { var partitions []struct { PartitionID string `ch:"partition_id"` } @@ -707,7 +707,7 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string } } if version < 19001005 || ch.Config.FreezeByPart { - return ch.FreezeTableOldWay(ctx, table, name) + return ch.FreezeTableByParts(ctx, table, name) } withNameQuery := "" if name != "" { diff --git a/pkg/config/config.go b/pkg/config/config.go index 5f0b2e15..e72fc2b0 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -5,6 +5,7 @@ import ( "fmt" "math" "os" + "regexp" "runtime" "strings" "time" @@ -301,6 +302,9 @@ func (cfg *Config) GetCompressionFormat() string { } } +var freezeByPartBeginAndRE = regexp.MustCompile(`(?im)^\s*AND\s+`) + + // LoadConfig - load config from file + environment variables func LoadConfig(configLocation string) (*Config, error) { cfg := DefaultConfig() @@ -329,6 +333,12 @@ func LoadConfig(configLocation string) (*Config, error) { cfg.AzureBlob.Path = strings.TrimPrefix(cfg.AzureBlob.Path, "/") cfg.S3.Path = strings.TrimPrefix(cfg.S3.Path, "/") cfg.GCS.Path = strings.TrimPrefix(cfg.GCS.Path, "/") + + // https://github.com/Altinity/clickhouse-backup/issues/855 + if cfg.ClickHouse.FreezeByPart && cfg.ClickHouse.FreezeByPartWhere != "" && !freezeByPartBeginAndRE.MatchString(cfg.ClickHouse.FreezeByPartWhere) { + cfg.ClickHouse.FreezeByPartWhere = " AND " + cfg.ClickHouse.FreezeByPartWhere + } + log.SetLevelFromString(cfg.General.LogLevel) if err = ValidateConfig(cfg); err != nil { From 1535dabb3b450848d18d9af085244c5b752b3867 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 1 Mar 2024 13:50:31 +0500 Subject: [PATCH 14/80] don't allow ATTACH TABLE for already exists data --- pkg/filesystemhelper/filesystemhelper.go | 12 ++++++++++++ test/integration/integration_test.go | 11 +++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index 75ffcd98..48a1aa8e 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -136,6 +136,18 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM backupDiskPath := diskMap[backupDiskName] if toDetached { dstParentDir = filepath.Join(dstParentDir, "detached") + + } else { + // avoid to restore to non-empty to avoid attach in already dropped partitions, corner case + existsFiles, err := os.ReadDir(dstParentDir) + if err != nil && !os.IsNotExist(err) { + return err + } + for _, f := range existsFiles { + if f.Name() != "detached" && !strings.HasSuffix(f.Name(), ".txt") && !strings.HasPrefix(f.Name(), "tmp") { + return fmt.Errorf("%s contains exists data, we can't restore directly via ATTACH TABLE, use `clickhouse->restore_as_attach=false` in your config", dstParentDir) + } + } } dstPartPath := filepath.Join(dstParentDir, part.Name) info, err := os.Stat(dstPartPath) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index b911b874..f425e536 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2251,8 +2251,14 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) r.Equal(expectedCount, result, "expect count=%d", expectedCount) } - - out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName) + if remoteStorageType == "FTP" { + // during DROP PARTITION, we create empty covered part, and cant restore via ATTACH TABLE properly, https://github.com/Altinity/clickhouse-backup/issues/756 + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) + r.Error(err) + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "CLICKHOUSE_RESTORE_AS_ATTACH=0 clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) + } else { + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) + } t.Log(out) r.NoError(err) r.Contains(out, "DROP PARTITION") @@ -2276,6 +2282,7 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") r.NoError(err) r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) + out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName) r.NoError(err) r.NotContains(out, "DROP PARTITION") From 8d1ad917432b2fee570772a5e0e7342737286f3a Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 2 Mar 2024 20:28:10 +0400 Subject: [PATCH 15/80] avoid try to restoe table via ATTACH TABLE for `--data --partitions=...` corner case for https://github.com/Altinity/clickhouse-backup/issues/756 --- pkg/filesystemhelper/filesystemhelper.go | 28 ++++++++++++++---------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index 48a1aa8e..9081b4a1 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -120,6 +120,23 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM start := time.Now() dstDataPaths := clickhouse.GetDisksByPaths(disks, tableDataPaths) dbAndTableDir := path.Join(common.TablePathEncode(backupTable.Database), common.TablePathEncode(backupTable.Table)) + if !toDetached { + for backupDiskName := range backupTable.Parts { + dstParentDir, dstParentDirExists := dstDataPaths[backupDiskName] + if dstParentDirExists { + // avoid to restore to non-empty to avoid attach in already dropped partitions, corner case + existsFiles, err := os.ReadDir(dstParentDir) + if err != nil && !os.IsNotExist(err) { + return err + } + for _, f := range existsFiles { + if f.Name() != "detached" && !strings.HasSuffix(f.Name(), ".txt") { + return fmt.Errorf("%s contains exists data %v, we can't restore directly via ATTACH TABLE, use `clickhouse->restore_as_attach=false` in your config", dstParentDir, existsFiles) + } + } + } + } + } for backupDiskName := range backupTable.Parts { for _, part := range backupTable.Parts[backupDiskName] { dstParentDir, dstParentDirExists := dstDataPaths[backupDiskName] @@ -137,17 +154,6 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM if toDetached { dstParentDir = filepath.Join(dstParentDir, "detached") - } else { - // avoid to restore to non-empty to avoid attach in already dropped partitions, corner case - existsFiles, err := os.ReadDir(dstParentDir) - if err != nil && !os.IsNotExist(err) { - return err - } - for _, f := range existsFiles { - if f.Name() != "detached" && !strings.HasSuffix(f.Name(), ".txt") && !strings.HasPrefix(f.Name(), "tmp") { - return fmt.Errorf("%s contains exists data, we can't restore directly via ATTACH TABLE, use `clickhouse->restore_as_attach=false` in your config", dstParentDir) - } - } } dstPartPath := filepath.Join(dstParentDir, part.Name) info, err := os.Stat(dstPartPath) From f9ee98c7aaff1bb589600792392fd37146482b04 Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 4 Mar 2024 22:01:14 +0300 Subject: [PATCH 16/80] clickhouse-keeper 24.2 doesn't work with docker by default --- test/integration/docker-compose_advanced.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 9cc6c4fc..48be8ba7 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -142,7 +142,9 @@ services: hostname: zookeeper volumes: - ./clickhouse-keeper.xml:/etc/clickhouse-keeper/conf.d/clickhouse-keeper.xml - - /var/lib/clickhouse + environment: + - CLICKHOUSE_UID=0 + - CLICKHOUSE_GID=0 networks: - clickhouse-backup healthcheck: From 9251e13c3e1c246fa94e3f86b047dc57ea8b712a Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 4 Mar 2024 22:01:42 +0300 Subject: [PATCH 17/80] clickhouse-keeper 24.2 doesn't work with docker by default --- test/integration/docker-compose_advanced.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 48be8ba7..0f94e340 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -142,6 +142,8 @@ services: hostname: zookeeper volumes: - ./clickhouse-keeper.xml:/etc/clickhouse-keeper/conf.d/clickhouse-keeper.xml + - /var/lib/clickhouse + - /var/lib/clickhouse-keeper environment: - CLICKHOUSE_UID=0 - CLICKHOUSE_GID=0 From aefaebf72980e6c2e60adf2e8edc015f649c6b61 Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 4 Mar 2024 22:41:47 +0300 Subject: [PATCH 18/80] old clickhouse-server versions doesn't support restore_as_attach --- test/integration/integration_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index f425e536..865025a1 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2251,7 +2251,8 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) r.Equal(expectedCount, result, "expect count=%d", expectedCount) } - if remoteStorageType == "FTP" { + + if remoteStorageType == "FTP" && !strings.Contains(backupConfig, "old") { // during DROP PARTITION, we create empty covered part, and cant restore via ATTACH TABLE properly, https://github.com/Altinity/clickhouse-backup/issues/756 out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) r.Error(err) From 33d8ee9a80863369087428e8028cfe1258daed87 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 5 Mar 2024 23:02:15 +0300 Subject: [PATCH 19/80] `--data` for `restore` with `use_embedded_backup_restore: true` will use `allow_non_empty_tables=true` to allow fix https://github.com/Altinity/clickhouse-backup/issues/756 --- ChangeLog.md | 3 ++- pkg/backup/restore.go | 19 +++++++++++-------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index 893f76e4..62d863ee 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -2,7 +2,8 @@ IMPROVEMENTS - added support for `use_embedded_backup_restore: true` with empty `embedded_backup_disk` value, tested on S3/GCS over S3/AzureBlobStorage, fix [695](https://github.com/Altinity/clickhouse-backup/issues/695) - `--rbac, --rbac-only, --configs, --configs-only` now works with `use_embedded_backup_restore: true` - +-- `--data` for `restore` with `use_embedded_backup_restore: true` will use `allow_non_empty_tables=true` to allow fix [756](https://github.com/Altinity/clickhouse-backup/issues/756) + BUG FIXES - continue `S3_MAX_PARTS_COUNT` default value from `2000` to `4000` to continue decrease memory usage for S3 - changed minimal part size for multipart upload in CopyObject from `5Mb` to `10Mb` diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 0df981b5..1e1a8ca5 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -203,7 +203,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } if dataOnly || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { - if err := b.RestoreData(ctx, backupName, metadataPath, tablePattern, partitions, disks); err != nil { + if err := b.RestoreData(ctx, backupName, dataOnly, metadataPath, tablePattern, partitions, disks); err != nil { return err } } @@ -564,7 +564,7 @@ func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, if err != nil { return err } - return b.restoreEmbedded(ctx, backupName, true, tablesForRestore, nil) + return b.restoreEmbedded(ctx, backupName, true, false, tablesForRestore, nil) } func (b *Backuper) fixEmbeddedMetadataRemote(ctx context.Context, backupName string, chVersion int) error { @@ -812,7 +812,7 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci } // RestoreData - restore data for tables matched by tablePattern from backupName -func (b *Backuper) RestoreData(ctx context.Context, backupName string, metadataPath, tablePattern string, partitions []string, disks []clickhouse.Disk) error { +func (b *Backuper) RestoreData(ctx context.Context, backupName string, dataOnly bool, metadataPath, tablePattern string, partitions []string, disks []clickhouse.Disk) error { startRestore := time.Now() log := apexLog.WithFields(apexLog.Fields{ "backup": backupName, @@ -857,7 +857,7 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, metadataP } log.Debugf("found %d tables with data in backup", len(tablesForRestore)) if b.isEmbedded { - err = b.restoreDataEmbedded(ctx, backupName, tablesForRestore, partitionsNameList) + err = b.restoreDataEmbedded(ctx, backupName, dataOnly, tablesForRestore, partitionsNameList) } else { err = b.restoreDataRegular(ctx, backupName, tablePattern, tablesForRestore, diskMap, diskTypes, disks, log) } @@ -868,8 +868,8 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, metadataP return nil } -func (b *Backuper) restoreDataEmbedded(ctx context.Context, backupName string, tablesForRestore ListOfTables, partitionsNameList map[metadata.TableTitle][]string) error { - return b.restoreEmbedded(ctx, backupName, false, tablesForRestore, partitionsNameList) +func (b *Backuper) restoreDataEmbedded(ctx context.Context, backupName string, dataOnly bool, tablesForRestore ListOfTables, partitionsNameList map[metadata.TableTitle][]string) error { + return b.restoreEmbedded(ctx, backupName, false, dataOnly, tablesForRestore, partitionsNameList) } func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, tablePattern string, tablesForRestore ListOfTables, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, log *apexLog.Entry) error { @@ -1148,7 +1148,7 @@ func (b *Backuper) changeTablePatternFromRestoreDatabaseMapping(tablePattern str return tablePattern } -func (b *Backuper) restoreEmbedded(ctx context.Context, backupName string, restoreOnlySchema bool, tablesForRestore ListOfTables, partitionsNameList map[metadata.TableTitle][]string) error { +func (b *Backuper) restoreEmbedded(ctx context.Context, backupName string, schemaOnly, dataOnly bool, tablesForRestore ListOfTables, partitionsNameList map[metadata.TableTitle][]string) error { tablesSQL := "" l := len(tablesForRestore) for i, t := range tablesForRestore { @@ -1182,9 +1182,12 @@ func (b *Backuper) restoreEmbedded(ctx context.Context, backupName string, resto } } var settings []string - if restoreOnlySchema { + if schemaOnly { settings = append(settings, "structure_only=1") } + if dataOnly { + settings = append(settings, "allow_non_empty_tables=1") + } if b.cfg.ClickHouse.EmbeddedRestoreThreads > 0 { settings = append(settings, fmt.Sprintf("restore_threads=%d", b.cfg.ClickHouse.EmbeddedRestoreThreads)) } From 12c2f137a584f957a623a1e1a66f5c9c21bda6c8 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 6 Mar 2024 00:01:34 +0300 Subject: [PATCH 20/80] added support for `--env ENV_NAME=value` cli parameter for allow dynamically override any config parameter, fix https://github.com/Altinity/clickhouse-backup/issues/821 --- ChangeLog.md | 1 + cmd/clickhouse-backup/main.go | 14 ++++++++++---- pkg/config/config.go | 17 ++++++++++++++++- test/integration/integration_test.go | 5 +++-- .../docker-compose/clickhouse-service.yml | 2 ++ .../docker-compose/docker-compose.yml | 2 ++ .../tests/snapshots/cli.py.cli.snapshot | 4 ++-- 7 files changed, 36 insertions(+), 9 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index 62d863ee..7c370166 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,5 +1,6 @@ # v2.5.0 (not released yet) IMPROVEMENTS +- added support for `--env ENV_NAME=value` cli parameter for allow dynamically override any config parameter, fix [821](https://github.com/Altinity/clickhouse-backup/issues/821) - added support for `use_embedded_backup_restore: true` with empty `embedded_backup_disk` value, tested on S3/GCS over S3/AzureBlobStorage, fix [695](https://github.com/Altinity/clickhouse-backup/issues/695) - `--rbac, --rbac-only, --configs, --configs-only` now works with `use_embedded_backup_restore: true` -- `--data` for `restore` with `use_embedded_backup_restore: true` will use `allow_non_empty_tables=true` to allow fix [756](https://github.com/Altinity/clickhouse-backup/issues/756) diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 5e8d951c..3cd9d2fa 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -37,10 +37,16 @@ func main() { } cliapp.Flags = []cli.Flag{ cli.StringFlag{ - Name: "config, c", - Value: config.DefaultConfigPath, - Usage: "Config 'FILE' name.", - EnvVar: "CLICKHOUSE_BACKUP_CONFIG", + Name: "config, c", + Value: config.DefaultConfigPath, + Usage: "Config 'FILE' name.", + EnvVar: "CLICKHOUSE_BACKUP_CONFIG", + Required: false, + }, + cli.StringSliceFlag{ + Name: "environment-override, env", + Usage: "override any environment variable via CLI parameter", + Required: false, }, cli.IntFlag{ Name: "command-id", diff --git a/pkg/config/config.go b/pkg/config/config.go index e72fc2b0..a3c5688d 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -304,7 +304,6 @@ func (cfg *Config) GetCompressionFormat() string { var freezeByPartBeginAndRE = regexp.MustCompile(`(?im)^\s*AND\s+`) - // LoadConfig - load config from file + environment variables func LoadConfig(configLocation string) (*Config, error) { cfg := DefaultConfig() @@ -617,6 +616,7 @@ func DefaultConfig() *Config { } func GetConfigFromCli(ctx *cli.Context) *Config { + OverrideEnvVars(ctx) configPath := GetConfigPath(ctx) cfg, err := LoadConfig(configPath) if err != nil { @@ -637,3 +637,18 @@ func GetConfigPath(ctx *cli.Context) string { } return DefaultConfigPath } + +func OverrideEnvVars(ctx *cli.Context) { + env := ctx.StringSlice("env") + if len(env) > 0 { + for _, v := range env { + envVariable := strings.SplitN(v, "=", 1) + if len(envVariable) < 2 { + envVariable[1] = "true" + } + if err := os.Setenv(envVariable[0], envVariable[1]); err != nil { + log.Warnf("can't override %s=%s, error: %v", envVariable[0], envVariable[1], err) + } + } + } +} diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 865025a1..cb0d050c 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -748,7 +748,8 @@ func runClickHouseClientInsertSystemBackupActions(r *require.Assertions, ch *Tes } } func testAPIBackupActions(r *require.Assertions, ch *TestClickHouse) { - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"create_remote actions_backup1"}, true) + // STANDARD_IA for https://github.com/Altinity/clickhouse-backup/issues/821 + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"create_remote --env S3_STORAGE_CLASS=STANDARD_IA actions_backup1"}, true) runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup1", "restore_remote --rm actions_backup1"}, true) runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup1", "delete remote actions_backup1"}, false) @@ -1172,7 +1173,7 @@ func TestTablePatterns(t *testing.T) { fullCleanup(t, r, ch, []string{testBackupName}, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml") generateTestData(t, r, ch, "S3", defaultTestData) if createPattern { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--env", "S3_STORAGE_CLASS=STANDARD_IA", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) } else { r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", testBackupName)) } diff --git a/test/testflows/clickhouse_backup/docker-compose/clickhouse-service.yml b/test/testflows/clickhouse_backup/docker-compose/clickhouse-service.yml index b74f865a..ea8d0aac 100644 --- a/test/testflows/clickhouse_backup/docker-compose/clickhouse-service.yml +++ b/test/testflows/clickhouse_backup/docker-compose/clickhouse-service.yml @@ -22,7 +22,9 @@ services: start_period: 2s environment: CLICKHOUSE_VERSION: ${CLICKHOUSE_VERSION:-23.3} + CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS: "true" cap_add: - SYS_PTRACE + - SYS_NICE security_opt: - label:disable diff --git a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml index 74cbbc53..ef32272e 100644 --- a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml +++ b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml @@ -138,6 +138,8 @@ services: timeout: 5s retries: 40 start_period: 10s + cap_add: + - SYS_NICE depends_on: clickhouse1: condition: service_healthy diff --git a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot index 08ae0c79..b9b8294e 100644 --- a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot +++ b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot @@ -1,6 +1,6 @@ default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' embedded_backup_threads: 0\', \' embedded_restore_threads: 0\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' check_sum_algorithm: ""\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' embedded_access_key: ""\', \' embedded_secret_key: ""\', \' skip_credentials: false\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 4h\', \' debug: false\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" -help_flag = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.
] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --help, -h show help\n --version, -v print the version'""" +help_flag = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.
] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --environment-override value, --env value override any environment variable via CLI parameter\n --help, -h show help\n --version, -v print the version'""" -cli_usage = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.
] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --help, -h show help\n --version, -v print the version'""" +cli_usage = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.
] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --environment-override value, --env value override any environment variable via CLI parameter\n --help, -h show help\n --version, -v print the version'""" From 1369de08827e6951fb78b1f46eeab197360613c4 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 6 Mar 2024 07:38:44 +0300 Subject: [PATCH 21/80] add integration_test.go command related to https://github.com/Altinity/clickhouse-backup/issues/821 --- test/integration/integration_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index cb0d050c..10c949d2 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1173,6 +1173,7 @@ func TestTablePatterns(t *testing.T) { fullCleanup(t, r, ch, []string{testBackupName}, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml") generateTestData(t, r, ch, "S3", defaultTestData) if createPattern { + // --env for check corner cases https://github.com/Altinity/clickhouse-backup/issues/821 r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--env", "S3_STORAGE_CLASS=STANDARD_IA", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) } else { r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", testBackupName)) From 9a764622c206cfe6f930c97542225091a7948fac Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 6 Mar 2024 08:32:42 +0300 Subject: [PATCH 22/80] WHY backwards incompatibility was back ported from 23.10 to 23.3 and 23.8, https://github.com/ClickHouse/ClickHouse/pull/59808, but not backport https://github.com/ClickHouse/ClickHouse/pull/59808? --- .../docker-compose/clickhouse-service.yml | 7 +++++++ .../docker-compose/custom_entrypoint.sh | 12 ++++++++++++ 2 files changed, 19 insertions(+) create mode 100755 test/testflows/clickhouse_backup/docker-compose/custom_entrypoint.sh diff --git a/test/testflows/clickhouse_backup/docker-compose/clickhouse-service.yml b/test/testflows/clickhouse_backup/docker-compose/clickhouse-service.yml index ea8d0aac..5c9f9ba4 100644 --- a/test/testflows/clickhouse_backup/docker-compose/clickhouse-service.yml +++ b/test/testflows/clickhouse_backup/docker-compose/clickhouse-service.yml @@ -4,6 +4,7 @@ services: clickhouse: image: ${CLICKHOUSE_IMAGE:-clickhouse/clickhouse-server}:${CLICKHOUSE_VERSION:-23.3} volumes: + - "${CLICKHOUSE_TESTS_DIR}/docker-compose/custom_entrypoint.sh:/custom_entrypoint.sh" - "${CLICKHOUSE_TESTS_DIR}/docker-compose/dynamic_settings.sh:/docker-entrypoint-initdb.d/dynamic_settings.sh" - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl" - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/common.xml:/etc/clickhouse-server/config.d/common.xml" @@ -20,6 +21,12 @@ services: timeout: 2s retries: 40 start_period: 2s + # to avoid backward incompatibility ;( + # https://t.me/clickhouse_ru/359960 + # https://t.me/clickhouse_ru/359968 + # https://t.me/clickhouse_ru/362378 + entrypoint: + - "/custom_entrypoint.sh" environment: CLICKHOUSE_VERSION: ${CLICKHOUSE_VERSION:-23.3} CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS: "true" diff --git a/test/testflows/clickhouse_backup/docker-compose/custom_entrypoint.sh b/test/testflows/clickhouse_backup/docker-compose/custom_entrypoint.sh new file mode 100755 index 00000000..3a0f5c7f --- /dev/null +++ b/test/testflows/clickhouse_backup/docker-compose/custom_entrypoint.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# to avoid backward incompatibility ;( +# https://t.me/clickhouse_ru/359960 +# https://t.me/clickhouse_ru/359968 +# https://t.me/clickhouse_ru/362378 + +if [ $# -ne 0 ]; then + /entrypoint.sh "$@" +else + /docker-entrypoint-initdb.d/dynamic_settings.sh + /entrypoint.sh +fi From 2b09d7df0385d921f8de213c74d9083786f1f8e6 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 6 Mar 2024 08:46:41 +0300 Subject: [PATCH 23/80] WHY backwards incompatibility was back ported from 23.10 to 23.3 and 23.8, https://github.com/ClickHouse/ClickHouse/pull/59808, but not backport https://github.com/ClickHouse/ClickHouse/pull/59808? --- .../clickhouse_backup/docker-compose/custom_entrypoint.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/testflows/clickhouse_backup/docker-compose/custom_entrypoint.sh b/test/testflows/clickhouse_backup/docker-compose/custom_entrypoint.sh index 3a0f5c7f..89bafb51 100755 --- a/test/testflows/clickhouse_backup/docker-compose/custom_entrypoint.sh +++ b/test/testflows/clickhouse_backup/docker-compose/custom_entrypoint.sh @@ -7,6 +7,8 @@ if [ $# -ne 0 ]; then /entrypoint.sh "$@" else - /docker-entrypoint-initdb.d/dynamic_settings.sh + for script in /docker-entrypoint-initdb.d/*.sh; do + $script + done /entrypoint.sh fi From d550235e8a9fb55356763323af34d24534487b7e Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 6 Mar 2024 09:53:44 +0300 Subject: [PATCH 24/80] upgrade zookepeer 3.8.4, added support for `--env ENV_NAME=value` cli parameter for allow dynamically override any config parameter, fix https://github.com/Altinity/clickhouse-backup/issues/821 --- pkg/config/config.go | 5 +++-- test/integration/docker-compose.yml | 2 +- .../clickhouse_backup/docker-compose/docker-compose.yml | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index a3c5688d..fd697995 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -642,10 +642,11 @@ func OverrideEnvVars(ctx *cli.Context) { env := ctx.StringSlice("env") if len(env) > 0 { for _, v := range env { - envVariable := strings.SplitN(v, "=", 1) + envVariable := strings.SplitN(v, "=", 2) if len(envVariable) < 2 { - envVariable[1] = "true" + envVariable = append(envVariable, "true") } + log.Infof("override %s=%s", envVariable[0], envVariable[1]) if err := os.Setenv(envVariable[0], envVariable[1]); err != nil { log.Warnf("can't override %s=%s, error: %v", envVariable[0], envVariable[1], err) } diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index c28ccd5d..40083dc3 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -88,7 +88,7 @@ services: zookeeper: # @TODO back :latest default value after resolve https://github.com/ClickHouse/ClickHouse/issues/53749 - image: ${ZOOKEEPER_IMAGE:-docker.io/zookeeper}:${ZOOKEEPER_VERSION:-3.8.3} + image: ${ZOOKEEPER_IMAGE:-docker.io/zookeeper}:${ZOOKEEPER_VERSION:-3.8.4} hostname: zookeeper environment: ZOO_4LW_COMMANDS_WHITELIST: "*" diff --git a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml index ef32272e..3cfdfa51 100644 --- a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml +++ b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml @@ -3,7 +3,7 @@ version: '2.4' services: zookeeper: # @TODO back :latest default value after resolve https://github.com/ClickHouse/ClickHouse/issues/53749 - image: ${ZOOKEEPER_IMAGE:-docker.io/zookeeper}:${ZOOKEEPER_VERSION:-3.8.3} + image: ${ZOOKEEPER_IMAGE:-docker.io/zookeeper}:${ZOOKEEPER_VERSION:-3.8.4} expose: - "2181" environment: From 01f878a4f68d96df3d90da4135ba106b4e1c0fdf Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 6 Mar 2024 10:04:00 +0300 Subject: [PATCH 25/80] remove --env for check corner cases https://github.com/Altinity/clickhouse-backup/issues/821 --- test/integration/integration_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 10c949d2..865025a1 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -748,8 +748,7 @@ func runClickHouseClientInsertSystemBackupActions(r *require.Assertions, ch *Tes } } func testAPIBackupActions(r *require.Assertions, ch *TestClickHouse) { - // STANDARD_IA for https://github.com/Altinity/clickhouse-backup/issues/821 - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"create_remote --env S3_STORAGE_CLASS=STANDARD_IA actions_backup1"}, true) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"create_remote actions_backup1"}, true) runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup1", "restore_remote --rm actions_backup1"}, true) runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup1", "delete remote actions_backup1"}, false) @@ -1173,8 +1172,7 @@ func TestTablePatterns(t *testing.T) { fullCleanup(t, r, ch, []string{testBackupName}, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml") generateTestData(t, r, ch, "S3", defaultTestData) if createPattern { - // --env for check corner cases https://github.com/Altinity/clickhouse-backup/issues/821 - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--env", "S3_STORAGE_CLASS=STANDARD_IA", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) } else { r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", testBackupName)) } From e10c636ace15babbf2089dc8e85ad324b5fecf50 Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 7 Mar 2024 09:21:42 +0300 Subject: [PATCH 26/80] add --env to TestConfis, rollback zookeeper:3.8.3 --- test/integration/docker-compose.yml | 2 +- test/integration/integration_test.go | 2 +- .../clickhouse_backup/docker-compose/docker-compose.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 40083dc3..c28ccd5d 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -88,7 +88,7 @@ services: zookeeper: # @TODO back :latest default value after resolve https://github.com/ClickHouse/ClickHouse/issues/53749 - image: ${ZOOKEEPER_IMAGE:-docker.io/zookeeper}:${ZOOKEEPER_VERSION:-3.8.4} + image: ${ZOOKEEPER_IMAGE:-docker.io/zookeeper}:${ZOOKEEPER_VERSION:-3.8.3} hostname: zookeeper environment: ZOO_4LW_COMMANDS_WHITELIST: "*" diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 865025a1..ab14867f 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -566,7 +566,7 @@ func TestConfigs(t *testing.T) { r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", config, "create", "--configs", "--configs-only", "test_configs_backup")) ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") - r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" S3_COMPRESSION_FORMAT=none ALLOW_EMPTY_BACKUPS=1 clickhouse-backup upload test_configs_backup")) + r.NoError(dockerExec("clickhouse", "bash", "-xec", "clickhouse-backup upload --env CLICKHOUSE_BACKUP_CONFIG="+config+" --env S3_COMPRESSION_FORMAT=none --env ALLOW_EMPTY_BACKUPS=1 test_configs_backup")) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup")) ch.queryWithNoError(r, "SYSTEM RELOAD CONFIG") diff --git a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml index 3cfdfa51..ef32272e 100644 --- a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml +++ b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml @@ -3,7 +3,7 @@ version: '2.4' services: zookeeper: # @TODO back :latest default value after resolve https://github.com/ClickHouse/ClickHouse/issues/53749 - image: ${ZOOKEEPER_IMAGE:-docker.io/zookeeper}:${ZOOKEEPER_VERSION:-3.8.4} + image: ${ZOOKEEPER_IMAGE:-docker.io/zookeeper}:${ZOOKEEPER_VERSION:-3.8.3} expose: - "2181" environment: From a8d8cbc22356726caf63d23821bb7b1f6269269f Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 19 Mar 2024 10:23:21 +0400 Subject: [PATCH 27/80] rename embedded-base-backup to diff-from-remote in `create` command --- cmd/clickhouse-backup/main.go | 6 +++--- pkg/backup/create.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 3cd9d2fa..c55639f1 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -96,7 +96,7 @@ func main() { Description: "Create new backup", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.CreateBackup(c.Args().First(), c.String("t"), c.String("embbedded-base-backup"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) + return b.CreateBackup(c.Args().First(), c.String("t"), c.String("diff-from-remote"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -105,9 +105,9 @@ func main() { Usage: "Create backup only matched with table name patterns, separated by comma, allow ? and * as wildcard", }, cli.StringFlag{ - Name: "embbedded-base-backup", + Name: "diff-from-remote", Hidden: false, - Usage: "Create incremental embedded backup based on other backup name", + Usage: "Create incremental embedded backup or upload incremental object disk data based on other remote backup name", }, cli.StringSliceFlag{ Name: "partitions", diff --git a/pkg/backup/create.go b/pkg/backup/create.go index deabd129..6f0e09b0 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -56,7 +56,7 @@ func NewBackupName() string { // CreateBackup - create new backup of all tables matched by tablePattern // If backupName is empty string will use default backup name -func (b *Backuper) CreateBackup(backupName, tablePattern, embeddedBaseBackup string, partitions []string, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, skipCheckPartsColumns bool, version string, commandId int) error { +func (b *Backuper) CreateBackup(backupName, tablePattern, diffFromRemote string, partitions []string, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, skipCheckPartsColumns bool, version string, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -126,7 +126,7 @@ func (b *Backuper) CreateBackup(backupName, tablePattern, embeddedBaseBackup str backupRBACSize, backupConfigSize := b.createRBACAndConfigsIfNecessary(ctx, backupName, createRBAC, rbacOnly, createConfigs, configsOnly, disks, diskMap, log) if b.cfg.ClickHouse.UseEmbeddedBackupRestore { - err = b.createBackupEmbedded(ctx, backupName, doBackupData, schemaOnly, version, tablePattern, embeddedBaseBackup, partitionsNameList, partitionsIdMap, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, backupRBACSize, backupConfigSize, log, startBackup) + err = b.createBackupEmbedded(ctx, backupName, doBackupData, schemaOnly, version, tablePattern, diffFromRemote, partitionsNameList, partitionsIdMap, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, backupRBACSize, backupConfigSize, log, startBackup) } else { err = b.createBackupLocal(ctx, backupName, doBackupData, schemaOnly, rbacOnly, configsOnly, version, partitionsIdMap, tables, disks, diskMap, diskTypes, allDatabases, allFunctions, backupRBACSize, backupConfigSize, log, startBackup) } From 8b6a92d8cbdce5ac78fd5aff73718bf374afbfba Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 23 Mar 2024 21:18:23 +0400 Subject: [PATCH 28/80] [WIP] create/restore increment backup shall download create parts with recusive required backup part search --- ChangeLog.md | 2 + cmd/clickhouse-backup/main.go | 2 +- pkg/backup/backuper.go | 59 ++++++++++++++++++++ pkg/backup/create.go | 50 +++++++++++------ pkg/backup/create_remote.go | 2 +- pkg/backup/download.go | 7 +-- pkg/backup/restore.go | 42 +++++++++----- pkg/backup/upload.go | 70 ++---------------------- pkg/clickhouse/clickhouse.go | 8 +++ pkg/filesystemhelper/filesystemhelper.go | 34 +++++++++++- pkg/server/server.go | 8 +-- test/integration/integration_test.go | 2 +- 12 files changed, 179 insertions(+), 107 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index e7ec63fd..fc5c0fc1 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -4,6 +4,7 @@ IMPROVEMENTS - added support for `use_embedded_backup_restore: true` with empty `embedded_backup_disk` value, tested on S3/GCS over S3/AzureBlobStorage, fix [695](https://github.com/Altinity/clickhouse-backup/issues/695) - `--rbac, --rbac-only, --configs, --configs-only` now works with `use_embedded_backup_restore: true` -- `--data` for `restore` with `use_embedded_backup_restore: true` will use `allow_non_empty_tables=true` to allow fix [756](https://github.com/Altinity/clickhouse-backup/issues/756) +- added `--diff-from-remote` parameter for `create` command, will copy only new data parts object disk data, also allows to download properly object disk data from required backup during `restore`, fix [865](https://github.com/Altinity/clickhouse-backup/issues/865) BUG FIXES - continue `S3_MAX_PARTS_COUNT` default value from `2000` to `4000` to continue decrease memory usage for S3 @@ -11,6 +12,7 @@ BUG FIXES - restore SQL UDF functions after restore tables - execute `ALTER TABLE ... DROP PARTITION` instead of `DROP TABLE` for `restore` and `restore_remote` with parameters `--data --partitions=...`, fix [756](https://github.com/Altinity/clickhouse-backup/issues/756) - fix wrong behavior for `freeze_by_part` + `freeze_by_part_where`, fix [855](https://github.com/Altinity/clickhouse-backup/issues/855) +- apply `CLICKHOUSE_SKIP_TABLES_ENGINES` during `create` command # v2.4.34 BUG FIXES diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index c55639f1..41ea7999 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -96,7 +96,7 @@ func main() { Description: "Create new backup", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.CreateBackup(c.Args().First(), c.String("t"), c.String("diff-from-remote"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) + return b.CreateBackup(c.Args().First(), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index 715f16fd..9cae517f 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" "net/url" "os" "path" @@ -305,3 +306,61 @@ func (b *Backuper) getObjectDiskPath() (string, error) { return "", fmt.Errorf("cleanBackupObjectDisks: requesst object disks path but have unsupported remote_storage: %s", b.cfg.General.RemoteStorage) } } + +func (b *Backuper) getTablesDiffFromLocal(ctx context.Context, diffFrom string, tablePattern string) (tablesForUploadFromDiff map[metadata.TableTitle]metadata.TableMetadata, err error) { + tablesForUploadFromDiff = make(map[metadata.TableTitle]metadata.TableMetadata) + diffFromBackup, err := b.ReadBackupMetadataLocal(ctx, diffFrom) + if err != nil { + return nil, err + } + if len(diffFromBackup.Tables) != 0 { + metadataPath := path.Join(b.DefaultDataPath, "backup", diffFrom, "metadata") + // empty partitions, because we don't want filter + diffTablesList, _, err := b.getTableListByPatternLocal(ctx, metadataPath, tablePattern, false, []string{}) + if err != nil { + return nil, err + } + for _, t := range diffTablesList { + tablesForUploadFromDiff[metadata.TableTitle{ + Database: t.Database, + Table: t.Table, + }] = t + } + } + return tablesForUploadFromDiff, nil +} + +func (b *Backuper) getTablesDiffFromRemote(ctx context.Context, diffFromRemote string, tablePattern string) (tablesForUploadFromDiff map[metadata.TableTitle]metadata.TableMetadata, err error) { + tablesForUploadFromDiff = make(map[metadata.TableTitle]metadata.TableMetadata) + backupList, err := b.dst.BackupList(ctx, true, diffFromRemote) + if err != nil { + return nil, err + } + var diffRemoteMetadata *metadata.BackupMetadata + for _, backup := range backupList { + if backup.BackupName == diffFromRemote { + if backup.Legacy { + return nil, fmt.Errorf("%s have legacy format and can't be used as diff-from-remote source", diffFromRemote) + } + diffRemoteMetadata = &backup.BackupMetadata + break + } + } + if diffRemoteMetadata == nil { + return nil, fmt.Errorf("%s not found on remote storage", diffFromRemote) + } + + if len(diffRemoteMetadata.Tables) != 0 { + diffTablesList, err := getTableListByPatternRemote(ctx, b, diffRemoteMetadata, tablePattern, false) + if err != nil { + return nil, err + } + for _, t := range diffTablesList { + tablesForUploadFromDiff[metadata.TableTitle{ + Database: t.Database, + Table: t.Table, + }] = t + } + } + return tablesForUploadFromDiff, nil +} diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 6f0e09b0..93645a43 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -56,7 +56,7 @@ func NewBackupName() string { // CreateBackup - create new backup of all tables matched by tablePattern // If backupName is empty string will use default backup name -func (b *Backuper) CreateBackup(backupName, tablePattern, diffFromRemote string, partitions []string, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, skipCheckPartsColumns bool, version string, commandId int) error { +func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, partitions []string, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, skipCheckPartsColumns bool, version string, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -126,9 +126,9 @@ func (b *Backuper) CreateBackup(backupName, tablePattern, diffFromRemote string, backupRBACSize, backupConfigSize := b.createRBACAndConfigsIfNecessary(ctx, backupName, createRBAC, rbacOnly, createConfigs, configsOnly, disks, diskMap, log) if b.cfg.ClickHouse.UseEmbeddedBackupRestore { - err = b.createBackupEmbedded(ctx, backupName, doBackupData, schemaOnly, version, tablePattern, diffFromRemote, partitionsNameList, partitionsIdMap, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, backupRBACSize, backupConfigSize, log, startBackup) + err = b.createBackupEmbedded(ctx, backupName, diffFromRemote, doBackupData, schemaOnly, version, tablePattern, partitionsNameList, partitionsIdMap, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, backupRBACSize, backupConfigSize, log, startBackup) } else { - err = b.createBackupLocal(ctx, backupName, doBackupData, schemaOnly, rbacOnly, configsOnly, version, partitionsIdMap, tables, disks, diskMap, diskTypes, allDatabases, allFunctions, backupRBACSize, backupConfigSize, log, startBackup) + err = b.createBackupLocal(ctx, backupName, diffFromRemote, doBackupData, schemaOnly, rbacOnly, configsOnly, version, partitionsIdMap, tables, tablePattern, disks, diskMap, diskTypes, allDatabases, allFunctions, backupRBACSize, backupConfigSize, log, startBackup) } if err != nil { // delete local backup if can't create @@ -177,7 +177,7 @@ func (b *Backuper) createRBACAndConfigsIfNecessary(ctx context.Context, backupNa return backupRBACSize, backupConfigSize } -func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, doBackupData, schemaOnly, rbacOnly, configsOnly bool, backupVersion string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, backupRBACSize, backupConfigSize uint64, log *apexLog.Entry, startBackup time.Time) error { +func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRemote string, doBackupData, schemaOnly, rbacOnly, configsOnly bool, backupVersion string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, tablePattern string, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, backupRBACSize, backupConfigSize uint64, log *apexLog.Entry, startBackup time.Time) error { // Create backup dir on all clickhouse disks for _, disk := range disks { if err := filesystemhelper.Mkdir(path.Join(disk.Path, "backup"), b.ch, disks); err != nil { @@ -208,7 +208,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, doB } } } - if isObjectDiskContainsTables { + if isObjectDiskContainsTables || diffFromRemote != "" { var err error if err = config.ValidateObjectDiskConfig(b.cfg); err != nil { return err @@ -226,6 +226,15 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, doB } }() } + var tablesDiffFromRemote map[metadata.TableTitle]metadata.TableMetadata + if diffFromRemote != "" { + var diffFromRemoteErr error + tablesDiffFromRemote, diffFromRemoteErr = b.getTablesDiffFromRemote(ctx, diffFromRemote, tablePattern) + if diffFromRemoteErr != nil { + return fmt.Errorf("b.getTablesDiffFromRemote return error: %v", diffFromRemoteErr) + } + } + var backupDataSize, backupMetadataSize uint64 var metaMutex sync.Mutex createBackupWorkingGroup, createCtx := errgroup.WithContext(ctx) @@ -246,9 +255,9 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, doB log.Debug("create data") shadowBackupUUID := strings.ReplaceAll(uuid.New().String(), "-", "") var addTableToBackupErr error - disksToPartsMap, realSize, addTableToBackupErr = b.AddTableToBackup(createCtx, backupName, shadowBackupUUID, disks, &table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) + disksToPartsMap, realSize, addTableToBackupErr = b.AddTableToLocalBackup(createCtx, backupName, tablesDiffFromRemote, shadowBackupUUID, disks, &table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) if addTableToBackupErr != nil { - log.Errorf("b.AddTableToBackup error: %v", addTableToBackupErr) + log.Errorf("b.AddTableToLocalBackup error: %v", addTableToBackupErr) return addTableToBackupErr } // more precise data size calculation @@ -300,14 +309,14 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, doB } backupMetaFile := path.Join(b.DefaultDataPath, "backup", backupName, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "regular", diskMap, diskTypes, disks, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions, log); err != nil { + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, diffFromRemote, backupVersion, "regular", diskMap, diskTypes, disks, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions, log); err != nil { return fmt.Errorf("createBackupMetadata return error: %v", err) } log.WithField("duration", utils.HumanizeDuration(time.Since(startBackup))).Info("done") return nil } -func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName string, doBackupData, schemaOnly bool, backupVersion, tablePattern, baseBackup string, partitionsNameList map[metadata.TableTitle][]string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, backupRBACSize, backupConfigSize uint64, log *apexLog.Entry, startBackup time.Time) error { +func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, baseBackup string, doBackupData, schemaOnly bool, backupVersion, tablePattern string, partitionsNameList map[metadata.TableTitle][]string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, backupRBACSize, backupConfigSize uint64, log *apexLog.Entry, startBackup time.Time) error { // TODO: Implement sharded backup operations for embedded backups if doesShard(b.cfg.General.ShardedOperationMode) { return fmt.Errorf("cannot perform embedded backup: %w", errShardOperationUnsupported) @@ -431,7 +440,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName string, } } backupMetaFile := path.Join(backupPath, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "embedded", diskMap, diskTypes, disks, backupDataSize[0].Size, backupMetadataSize, backupRBACSize, backupConfigSize, tablesTitle, allDatabases, allFunctions, log); err != nil { + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, baseBackup, backupVersion, "embedded", diskMap, diskTypes, disks, backupDataSize[0].Size, backupMetadataSize, backupRBACSize, backupConfigSize, tablesTitle, allDatabases, allFunctions, log); err != nil { return err } @@ -654,7 +663,7 @@ func (b *Backuper) createBackupRBACReplicated(ctx context.Context, rbacBackup st return rbacDataSize, nil } -func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBackupUUID string, diskList []clickhouse.Disk, table *clickhouse.Table, partitionsIdsMap common.EmptyMap) (map[string][]metadata.Part, map[string]int64, error) { +func (b *Backuper) AddTableToLocalBackup(ctx context.Context, backupName string, tablesDiffFromRemote map[metadata.TableTitle]metadata.TableMetadata, shadowBackupUUID string, diskList []clickhouse.Disk, table *clickhouse.Table, partitionsIdsMap common.EmptyMap) (map[string][]metadata.Part, map[string]int64, error) { log := b.log.WithFields(apexLog.Fields{ "backup": backupName, "operation": "create", @@ -703,7 +712,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku return nil, nil, err } // If partitionsIdsMap is not empty, only parts in this partition will back up. - parts, size, err := filesystemhelper.MoveShadow(shadowPath, backupShadowPath, partitionsIdsMap, version) + parts, size, err := filesystemhelper.MoveShadowToBackup(shadowPath, backupShadowPath, partitionsIdsMap, tablesDiffFromRemote[metadata.TableTitle{Database: table.Database, Table: table.Name}], disk, version) if err != nil { return nil, nil, err } @@ -712,7 +721,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku log.WithField("disk", disk.Name).Debug("shadow moved") if len(parts) > 0 && (b.isDiskTypeObject(disk.Type) || b.isDiskTypeEncryptedObject(disk, diskList)) { start := time.Now() - if size, err = b.uploadObjectDiskParts(ctx, backupName, backupShadowPath, disk); err != nil { + if size, err = b.uploadObjectDiskParts(ctx, backupName, tablesDiffFromRemote[metadata.TableTitle{Database: table.Database, Table: table.Name}], backupShadowPath, disk); err != nil { return disksToPartsMap, realSize, err } realSize[disk.Name] += size @@ -741,7 +750,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku return disksToPartsMap, realSize, nil } -func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName, backupShadowPath string, disk clickhouse.Disk) (int64, error) { +func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName string, tableDiffFromRemote metadata.TableMetadata, backupShadowPath string, disk clickhouse.Disk) (int64, error) { var size int64 var err error if err = object_disk.InitCredentialsAndConnections(ctx, b.ch, b.cfg, disk.Name); err != nil { @@ -768,7 +777,15 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName, backup return nil } var realSize, objSize int64 - + // upload only not required parts, https://github.com/Altinity/clickhouse-backup/issues/865 + if tableDiffFromRemote.Database != "" && tableDiffFromRemote.Table != "" && len(tableDiffFromRemote.Parts[disk.Name]) > 0 { + partPaths := strings.SplitN(strings.TrimPrefix(fPath, backupShadowPath), "/", 2) + for _, part := range tableDiffFromRemote.Parts[disk.Name] { + if part.Name == partPaths[0] { + return nil + } + } + } uploadObjectDiskPartsWorkingGroup.Go(func() error { objPartFileMeta, readMetadataErr := object_disk.ReadMetadataFromFile(fPath) if readMetadataErr != nil { @@ -808,13 +825,14 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName, backup return size, nil } -func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, backupName, version, tags string, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize uint64, tableMetas []metadata.TableTitle, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, log *apexLog.Entry) error { +func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, backupName, requiredBackup, version, tags string, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize uint64, tableMetas []metadata.TableTitle, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, log *apexLog.Entry) error { select { case <-ctx.Done(): return ctx.Err() default: backupMetadata := metadata.BackupMetadata{ BackupName: backupName, + RequiredBackup: requiredBackup, Disks: diskMap, DiskTypes: diskTypes, ClickhouseBackupVersion: version, diff --git a/pkg/backup/create_remote.go b/pkg/backup/create_remote.go index 04da9379..40debb62 100644 --- a/pkg/backup/create_remote.go +++ b/pkg/backup/create_remote.go @@ -15,7 +15,7 @@ func (b *Backuper) CreateToRemote(backupName, diffFrom, diffFromRemote, tablePat if backupName == "" { backupName = NewBackupName() } - if err := b.CreateBackup(backupName, tablePattern, diffFromRemote, partitions, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, version, commandId); err != nil { + if err := b.CreateBackup(backupName, diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, version, commandId); err != nil { return err } if err := b.Upload(backupName, diffFrom, diffFromRemote, tablePattern, partitions, schemaOnly, resume, commandId); err != nil { diff --git a/pkg/backup/download.go b/pkg/backup/download.go index 5c7637f9..0160597c 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -265,7 +265,6 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ backupMetadata.CompressedSize = 0 backupMetadata.DataFormat = "" - backupMetadata.RequiredBackup = "" backupMetadata.ConfigSize = configSize backupMetadata.RBACSize = rbacSize @@ -737,7 +736,7 @@ func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata. return fmt.Errorf("after downloadDiffRemoteFile %s exists but is not directory", downloadedPartPath) } if err = b.makePartHardlinks(downloadedPartPath, existsPath); err != nil { - return fmt.Errorf("can't to add link to exists part %s -> %s error: %v", newPath, existsPath, err) + return fmt.Errorf("can't to add link to exists part %s -> %s error: %v", downloadedPartPath, existsPath, err) } } if err != nil && !os.IsNotExist(err) { @@ -836,7 +835,7 @@ func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadat } // recursive find if part in RequiredBackup also Required - tableRemoteFiles, found, err := b.findDiffRecursive(ctx, requiredBackup, log, table, requiredTable, part, disk) + tableRemoteFiles, found, err := b.findDiffRecursive(ctx, requiredBackup, table, requiredTable, part, disk, log) if found { return tableRemoteFiles, nil } @@ -877,7 +876,7 @@ func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadat return nil, fmt.Errorf("%s.%s %s not found on %s and all required backups sequence", table.Database, table.Table, part.Name, requiredBackup.BackupName) } -func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metadata.BackupMetadata, log *apexLog.Entry, table metadata.TableMetadata, requiredTable *metadata.TableMetadata, part metadata.Part, disk string) (map[string]string, bool, error) { +func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, requiredTable *metadata.TableMetadata, part metadata.Part, disk string, log *apexLog.Entry) (map[string]string, bool, error) { log.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffRecursive"}).Debugf("start") found := false for _, requiredParts := range requiredTable.Parts { diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 1e1a8ca5..14b12b46 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -203,7 +203,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } if dataOnly || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { - if err := b.RestoreData(ctx, backupName, dataOnly, metadataPath, tablePattern, partitions, disks); err != nil { + if err := b.RestoreData(ctx, backupName, backupMetadata, dataOnly, metadataPath, tablePattern, partitions, disks); err != nil { return err } } @@ -812,7 +812,7 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci } // RestoreData - restore data for tables matched by tablePattern from backupName -func (b *Backuper) RestoreData(ctx context.Context, backupName string, dataOnly bool, metadataPath, tablePattern string, partitions []string, disks []clickhouse.Disk) error { +func (b *Backuper) RestoreData(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, dataOnly bool, metadataPath, tablePattern string, partitions []string, disks []clickhouse.Disk) error { startRestore := time.Now() log := apexLog.WithFields(apexLog.Fields{ "backup": backupName, @@ -859,7 +859,7 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, dataOnly if b.isEmbedded { err = b.restoreDataEmbedded(ctx, backupName, dataOnly, tablesForRestore, partitionsNameList) } else { - err = b.restoreDataRegular(ctx, backupName, tablePattern, tablesForRestore, diskMap, diskTypes, disks, log) + err = b.restoreDataRegular(ctx, backupName, backupMetadata, tablePattern, tablesForRestore, diskMap, diskTypes, disks, log) } if err != nil { return err @@ -872,7 +872,7 @@ func (b *Backuper) restoreDataEmbedded(ctx context.Context, backupName string, d return b.restoreEmbedded(ctx, backupName, false, dataOnly, tablesForRestore, partitionsNameList) } -func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, tablePattern string, tablesForRestore ListOfTables, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, log *apexLog.Entry) error { +func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, tablePattern string, tablesForRestore ListOfTables, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, log *apexLog.Entry) error { if len(b.cfg.General.RestoreDatabaseMapping) > 0 { tablePattern = b.changeTablePatternFromRestoreDatabaseMapping(tablePattern) } @@ -915,11 +915,11 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta restoreBackupWorkingGroup.Go(func() error { // https://github.com/Altinity/clickhouse-backup/issues/529 if b.cfg.ClickHouse.RestoreAsAttach { - if restoreErr := b.restoreDataRegularByAttach(restoreCtx, backupName, table, diskMap, diskTypes, disks, dstTable, log); restoreErr != nil { + if restoreErr := b.restoreDataRegularByAttach(restoreCtx, backupName, backupMetadata, table, diskMap, diskTypes, disks, dstTable, log); restoreErr != nil { return restoreErr } } else { - if restoreErr := b.restoreDataRegularByParts(restoreCtx, backupName, table, diskMap, diskTypes, disks, dstTable, log); restoreErr != nil { + if restoreErr := b.restoreDataRegularByParts(restoreCtx, backupName, backupMetadata, table, diskMap, diskTypes, disks, dstTable, log); restoreErr != nil { return restoreErr } } @@ -939,12 +939,12 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta return nil } -func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, log *apexLog.Entry) error { +func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, log *apexLog.Entry) error { if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, diskMap, dstTable.DataPaths, b.ch, false); err != nil { return fmt.Errorf("can't copy data to storage '%s.%s': %v", table.Database, table.Table, err) } log.Debug("data to 'storage' copied") - if err := b.downloadObjectDiskParts(ctx, backupName, table, diskMap, diskTypes, disks); err != nil { + if err := b.downloadObjectDiskParts(ctx, backupName, backupMetadata, table, diskMap, diskTypes, disks); err != nil { return fmt.Errorf("can't restore object_disk server-side copy data parts '%s.%s': %v", table.Database, table.Table, err) } if err := b.ch.AttachTable(ctx, table, dstTable); err != nil { @@ -953,12 +953,12 @@ func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName st return nil } -func (b *Backuper) restoreDataRegularByParts(ctx context.Context, backupName string, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, log *apexLog.Entry) error { +func (b *Backuper) restoreDataRegularByParts(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, log *apexLog.Entry) error { if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, diskMap, dstTable.DataPaths, b.ch, true); err != nil { return fmt.Errorf("can't copy data to detached '%s.%s': %v", table.Database, table.Table, err) } log.Debug("data to 'detached' copied") - if err := b.downloadObjectDiskParts(ctx, backupName, table, diskMap, diskTypes, disks); err != nil { + if err := b.downloadObjectDiskParts(ctx, backupName, backupMetadata, table, diskMap, diskTypes, disks); err != nil { return fmt.Errorf("can't restore object_disk server-side copy data parts '%s.%s': %v", table.Database, table.Table, err) } if err := b.ch.AttachDataParts(table, dstTable); err != nil { @@ -967,7 +967,7 @@ func (b *Backuper) restoreDataRegularByParts(ctx context.Context, backupName str return nil } -func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName string, backupTable metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk) error { +func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, backupTable metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk) error { log := apexLog.WithFields(apexLog.Fields{ "operation": "downloadObjectDiskParts", "table": fmt.Sprintf("%s.%s", backupTable.Database, backupTable.Table), @@ -1024,6 +1024,16 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin dstDiskName = part.RebalancedDisk } partPath := path.Join(diskMap[dstDiskName], "backup", backupName, "shadow", dbAndTableDir, dstDiskName, part.Name) + srcBackupName := backupName + srcDiskName := diskName + // copy from required backup for required data parts, https://github.com/Altinity/clickhouse-backup/issues/865 + if part.Required && backupMetadata.RequiredBackup != "" { + var findRecusiveErr error + srcBackupName, srcDiskName, findRecusiveErr = b.findObjectDiskPartRecursive(backupMetadata, backupTable, part, diskName) + if findRecusiveErr != nil { + return findRecusiveErr + } + } walkErr := filepath.Walk(partPath, func(fPath string, fInfo fs.FileInfo, err error) error { if err != nil { return err @@ -1059,13 +1069,13 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin } if b.cfg.General.RemoteStorage == "s3" && (diskType == "s3" || diskType == "encrypted") { srcBucket = b.cfg.S3.Bucket - srcKey = path.Join(b.cfg.S3.ObjectDiskPath, backupName, diskName, storageObject.ObjectRelativePath) + srcKey = path.Join(b.cfg.S3.ObjectDiskPath, srcBackupName, srcDiskName, storageObject.ObjectRelativePath) } else if b.cfg.General.RemoteStorage == "gcs" && (diskType == "s3" || diskType == "encrypted") { srcBucket = b.cfg.GCS.Bucket - srcKey = path.Join(b.cfg.GCS.ObjectDiskPath, backupName, diskName, storageObject.ObjectRelativePath) + srcKey = path.Join(b.cfg.GCS.ObjectDiskPath, srcBackupName, srcDiskName, storageObject.ObjectRelativePath) } else if b.cfg.General.RemoteStorage == "azblob" && (diskType == "azure_blob_storage" || diskType == "encrypted") { srcBucket = b.cfg.AzureBlob.Container - srcKey = path.Join(b.cfg.AzureBlob.ObjectDiskPath, backupName, diskName, storageObject.ObjectRelativePath) + srcKey = path.Join(b.cfg.AzureBlob.ObjectDiskPath, srcBackupName, srcDiskName, storageObject.ObjectRelativePath) } else { return fmt.Errorf("incompatible object_disk[%s].Type=%s amd remote_storage: %s", diskName, diskType, b.cfg.General.RemoteStorage) } @@ -1093,6 +1103,10 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin return nil } +func (b *Backuper) findObjectDiskPartRecursive(backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, part metadata.Part, name string) (string, string, error) { + return "", "", fmt.Errorf("not implemented") +} + func (b *Backuper) checkMissingTables(tablesForRestore ListOfTables, chTables []clickhouse.Table) []string { var missingTables []string for _, table := range tablesForRestore { diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index d9dc9f94..95ff0735 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -105,16 +105,18 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str tablesForUploadFromDiff := map[metadata.TableTitle]metadata.TableMetadata{} if diffFrom != "" && !b.isEmbedded { - tablesForUploadFromDiff, err = b.getTablesForUploadDiffLocal(ctx, diffFrom, backupMetadata, tablePattern) + tablesForUploadFromDiff, err = b.getTablesDiffFromLocal(ctx, diffFrom, tablePattern) if err != nil { - return fmt.Errorf("b.getTablesForUploadDiffLocal return error: %v", err) + return fmt.Errorf("b.getTablesDiffFromLocal return error: %v", err) } + backupMetadata.RequiredBackup = diffFrom } if diffFromRemote != "" && !b.isEmbedded { - tablesForUploadFromDiff, err = b.getTablesForUploadDiffRemote(ctx, diffFromRemote, backupMetadata, tablePattern) + tablesForUploadFromDiff, err = b.getTablesDiffFromRemote(ctx, diffFromRemote, tablePattern) if err != nil { - return fmt.Errorf("b.getTablesForUploadDiffRemote return error: %v", err) + return fmt.Errorf("b.getTablesDiffFromRemote return error: %v", err) } + backupMetadata.RequiredBackup = diffFromRemote } if b.resume { b.resumableState = resumable.NewState(b.DefaultDataPath, backupName, "upload", map[string]interface{}{ @@ -321,66 +323,6 @@ func (b *Backuper) prepareTableListToUpload(ctx context.Context, backupName stri return tablesForUpload, nil } -func (b *Backuper) getTablesForUploadDiffLocal(ctx context.Context, diffFrom string, backupMetadata *metadata.BackupMetadata, tablePattern string) (tablesForUploadFromDiff map[metadata.TableTitle]metadata.TableMetadata, err error) { - tablesForUploadFromDiff = make(map[metadata.TableTitle]metadata.TableMetadata) - diffFromBackup, err := b.ReadBackupMetadataLocal(ctx, diffFrom) - if err != nil { - return nil, err - } - if len(diffFromBackup.Tables) != 0 { - backupMetadata.RequiredBackup = diffFrom - metadataPath := path.Join(b.DefaultDataPath, "backup", diffFrom, "metadata") - // empty partitions, because we don't want filter - diffTablesList, _, err := b.getTableListByPatternLocal(ctx, metadataPath, tablePattern, false, []string{}) - if err != nil { - return nil, err - } - for _, t := range diffTablesList { - tablesForUploadFromDiff[metadata.TableTitle{ - Database: t.Database, - Table: t.Table, - }] = t - } - } - return tablesForUploadFromDiff, nil -} - -func (b *Backuper) getTablesForUploadDiffRemote(ctx context.Context, diffFromRemote string, backupMetadata *metadata.BackupMetadata, tablePattern string) (tablesForUploadFromDiff map[metadata.TableTitle]metadata.TableMetadata, err error) { - tablesForUploadFromDiff = make(map[metadata.TableTitle]metadata.TableMetadata) - backupList, err := b.dst.BackupList(ctx, true, diffFromRemote) - if err != nil { - return nil, err - } - var diffRemoteMetadata *metadata.BackupMetadata - for _, backup := range backupList { - if backup.BackupName == diffFromRemote { - if backup.Legacy { - return nil, fmt.Errorf("%s have legacy format and can't be used as diff-from-remote source", diffFromRemote) - } - diffRemoteMetadata = &backup.BackupMetadata - break - } - } - if diffRemoteMetadata == nil { - return nil, fmt.Errorf("%s not found on remote storage", diffFromRemote) - } - - if len(diffRemoteMetadata.Tables) != 0 { - backupMetadata.RequiredBackup = diffFromRemote - diffTablesList, err := getTableListByPatternRemote(ctx, b, diffRemoteMetadata, tablePattern, false) - if err != nil { - return nil, err - } - for _, t := range diffTablesList { - tablesForUploadFromDiff[metadata.TableTitle{ - Database: t.Database, - Table: t.Table, - }] = t - } - } - return tablesForUploadFromDiff, nil -} - func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, diffFrom string, diffFromRemote string) error { log := b.log.WithField("logger", "validateUploadParams") if b.cfg.General.RemoteStorage == "none" { diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 923c2699..92a00b03 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -362,6 +362,14 @@ func (ch *ClickHouse) GetTables(ctx context.Context, tablePattern string) ([]Tab if ch.Config.UseEmbeddedBackupRestore && (strings.HasPrefix(t.Name, ".inner_id.") /*|| strings.HasPrefix(t.Name, ".inner.")*/) { t.Skip = true } + if len(ch.Config.SkipTableEngines) > 0 { + for _, engine := range ch.Config.SkipTableEngines { + if t.Engine == engine { + t.Skip = true + break + } + } + } if t.Skip { tables[i] = t continue diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index 9081b4a1..1a5c5b24 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -229,8 +229,8 @@ func IsFileInPartition(disk, fileName string, partitionsBackupMap common.EmptyMa return ok } -func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap, version int) ([]metadata.Part, int64, error) { - log := apexLog.WithField("logger", "MoveShadow") +func MoveShadowToBackup(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap, tableDiffFromRemote metadata.TableMetadata, disk clickhouse.Disk, version int) ([]metadata.Part, int64, error) { + log := apexLog.WithField("logger", "MoveShadowToBackup") size := int64(0) parts := make([]metadata.Part, 0) err := filepath.Walk(shadowPath, func(filePath string, info os.FileInfo, err error) error { @@ -252,6 +252,13 @@ func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.E if len(partitionsBackupMap) != 0 && !IsPartInPartition(pathParts[3], partitionsBackupMap) { return nil } + if tableDiffFromRemote.Database != "" && tableDiffFromRemote.Table != "" && len(tableDiffFromRemote.Parts) > 0 && len(tableDiffFromRemote.Parts[disk.Name]) > 0 { + var isRequiredPartAdded, partExists bool + parts, isRequiredPartAdded, partExists = addRequiredPartIfNotExists(parts, pathParts[3], tableDiffFromRemote, disk) + if isRequiredPartAdded || partExists { + return nil + } + } dstFilePath := filepath.Join(backupPartsPath, pathParts[3]) if info.IsDir() { if !strings.HasSuffix(pathParts[3], ".proj") { @@ -275,6 +282,29 @@ func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.E return parts, size, err } +func addRequiredPartIfNotExists(parts []metadata.Part, relativePath string, tableDiffFromRemote metadata.TableMetadata, disk clickhouse.Disk) ([]metadata.Part, bool, bool) { + isRequiredPartAdded := false + exists := false + for _, p := range parts { + if p.Name == relativePath || strings.HasPrefix(relativePath, p.Name+"/") { + exists = true + break + } + } + if !exists { + for _, diffPart := range tableDiffFromRemote.Parts[disk.Name] { + if diffPart.Name == relativePath || strings.HasPrefix(relativePath, diffPart.Name+"/") { + parts = append(parts, metadata.Part{ + Name: relativePath, + Required: true, + }) + isRequiredPartAdded = true + } + } + } + return parts, isRequiredPartAdded, exists +} + func IsDuplicatedParts(part1, part2 string) error { log := apexLog.WithField("logger", "IsDuplicatedParts") p1, err := os.Open(part1) diff --git a/pkg/server/server.go b/pkg/server/server.go index f9ec0393..ff241d9a 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -806,7 +806,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) return } tablePattern := "" - embeddedBaseBackup := "" + diffFromRemote := "" partitionsToBackup := make([]string, 0) backupName := backup.NewBackupName() schemaOnly := false @@ -819,8 +819,8 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) tablePattern = tp[0] fullCommand = fmt.Sprintf("%s --tables=\"%s\"", fullCommand, tablePattern) } - if baseBackup, exists := query["embedded-base-backup"]; exists { - embeddedBaseBackup = baseBackup[0] + if baseBackup, exists := query["diff-from-remote"]; exists { + diffFromRemote = baseBackup[0] } if partitions, exist := query["partitions"]; exist { partitionsToBackup = strings.Split(partitions[0], ",") @@ -866,7 +866,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) go func() { err, _ := api.metrics.ExecuteWithMetrics("create", 0, func() error { b := backup.NewBackuper(cfg) - return b.CreateBackup(backupName, tablePattern, embeddedBaseBackup, partitionsToBackup, schemaOnly, createRBAC, false, createConfigs, false, checkPartsColumns, api.clickhouseBackupVersion, commandId) + return b.CreateBackup(backupName, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, createRBAC, false, createConfigs, false, checkPartsColumns, api.clickhouseBackupVersion, commandId) }) if err != nil { api.log.Errorf("API /backup/create error: %v", err) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 9d8900ff..5fc74068 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2296,7 +2296,7 @@ func replaceStorageDiskNameForReBalance(r *require.Assertions, ch *TestClickHous r.NoError(dockerExec("clickhouse", "cp", "-vf", origFile, dstFile)) } if isRebalanced { - r.NoError(dockerExec("clickhouse", "bash", "-xc", "cp -rfl /var/lib/clickhouse/disks/"+oldDisk+"/*", "/var/lib/clickhouse/disks/"+newDisk+"/")) + r.NoError(dockerExec("clickhouse", "bash", "-xc", "cp -aflv -t /var/lib/clickhouse/disks/"+newDisk+"/ /var/lib/clickhouse/disks/"+oldDisk+"/*")) r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/"+oldDisk+"")) } ch.chbackend.Close() From dd87132cda8206ba7d9c0f9b7d407c1af3735a40 Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 25 Mar 2024 23:19:33 +0400 Subject: [PATCH 29/80] added `--diff-from-remote` parameter for `create` command, will copy only new data parts object disk data, also allows to download properly object disk data from required backup during `restore`, fix https://github.com/Altinity/clickhouse-backup/issues/865 --- pkg/backup/delete.go | 22 +++++++--- pkg/backup/download.go | 25 ++++++++--- pkg/backup/restore.go | 45 ++++++++++++++++---- pkg/filesystemhelper/filesystemhelper.go | 32 +++++++------- test/integration/docker-compose_advanced.yml | 2 +- 5 files changed, 89 insertions(+), 37 deletions(-) diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index 9bc41944..b2fcfcee 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -179,9 +179,11 @@ func (b *Backuper) cleanEmbeddedAndObjectDiskLocalIfSameRemoteNotPresent(ctx con return err } if !skip && (hasObjectDisks || (b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk == "")) { - if err = b.cleanBackupObjectDisks(ctx, backupName); err != nil { - log.Warnf("b.cleanBackupObjectDisks return error: %v", err) + if deletedKeys, deleteErr := b.cleanBackupObjectDisks(ctx, backupName); deleteErr != nil { + log.Warnf("b.cleanBackupObjectDisks return error: %v", deleteErr) return err + } else { + log.Infof("cleanBackupObjectDisks deleted %d keys", deletedKeys) } } if !skip && (b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "") { @@ -335,8 +337,10 @@ func (b *Backuper) cleanEmbeddedAndObjectDiskRemoteIfSameLocalNotPresent(ctx con return nil } if b.hasObjectDisksRemote(backup) || (b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk == "") { - if err = b.cleanBackupObjectDisks(ctx, backup.BackupName); err != nil { - log.Warnf("b.cleanBackupObjectDisks return error: %v", err) + if deletedKeys, deleteErr := b.cleanBackupObjectDisks(ctx, backup.BackupName); deleteErr != nil { + log.Warnf("b.cleanBackupObjectDisks return error: %v", deleteErr) + } else { + log.Infof("cleanBackupObjectDisks deleted %d keys", deletedKeys) } return nil } @@ -379,22 +383,26 @@ func (b *Backuper) cleanRemoteEmbedded(ctx context.Context, backup storage.Backu } // cleanBackupObjectDisks - recursive delete / -func (b *Backuper) cleanBackupObjectDisks(ctx context.Context, backupName string) error { +func (b *Backuper) cleanBackupObjectDisks(ctx context.Context, backupName string) (uint, error) { objectDiskPath, err := b.getObjectDiskPath() if err != nil { - return err + return 0, err } //walk absolute path, delete relative - return b.dst.WalkAbsolute(ctx, path.Join(objectDiskPath, backupName), true, func(ctx context.Context, f storage.RemoteFile) error { + deletedKeys := uint(0) + walkErr := b.dst.WalkAbsolute(ctx, path.Join(objectDiskPath, backupName), true, func(ctx context.Context, f storage.RemoteFile) error { if b.dst.Kind() == "azblob" { if f.Size() > 0 || !f.LastModified().IsZero() { + deletedKeys += 1 return b.dst.DeleteFileFromObjectDiskBackup(ctx, path.Join(backupName, f.Name())) } else { return nil } } + deletedKeys += 1 return b.dst.DeleteFileFromObjectDiskBackup(ctx, path.Join(backupName, f.Name())) }) + return deletedKeys, walkErr } func (b *Backuper) skipIfSameLocalBackupPresent(ctx context.Context, backupName, tags string) (bool, error) { diff --git a/pkg/backup/download.go b/pkg/backup/download.go index 0160597c..17a0efd2 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -223,7 +223,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ idx := i dataGroup.Go(func() error { start := time.Now() - if err := b.downloadTableData(dataCtx, remoteBackup.BackupMetadata, *tableMetadataAfterDownload[idx]); err != nil { + if err := b.downloadTableData(dataCtx, remoteBackup.BackupMetadata, *tableMetadataAfterDownload[idx], disks); err != nil { return err } log. @@ -287,6 +287,21 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ b.resumableState.Close() } + //clean partially downloaded requiredBackup + if remoteBackup.RequiredBackup != "" { + if localBackups, _, err = b.GetLocalBackups(ctx, disks); err == nil { + for _, localBackup := range localBackups { + if localBackup.BackupName != remoteBackup.BackupName && localBackup.DataSize+localBackup.CompressedSize+localBackup.MetadataSize == 0 { + if err = b.RemoveBackupLocal(ctx, localBackup.BackupName, disks); err != nil { + return fmt.Errorf("downloadWithDiff -> RemoveBackupLocal cleaning error: %v", err) + } + } + } + } else { + return fmt.Errorf("downloadWithDiff -> GetLocalBackups cleaning error: %v", err) + } + } + log. WithField("duration", utils.HumanizeDuration(time.Since(startDownload))). WithField("size", utils.FormatBytes(dataSize+metadataSize+rbacSize+configSize)). @@ -567,7 +582,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st return uint64(remoteFileInfo.Size()), nil } -func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata) error { +func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata, disks []clickhouse.Disk) error { log := b.log.WithField("logger", "downloadTableData") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) ctx, cancel := context.WithCancel(ctx) @@ -664,8 +679,8 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. return fmt.Errorf("one of downloadTableData go-routine return error: %v", err) } - if !b.isEmbedded { - err := b.downloadDiffParts(ctx, remoteBackup, table, dbAndTableDir) + if !b.isEmbedded && remoteBackup.RequiredBackup != "" { + err := b.downloadDiffParts(ctx, remoteBackup, table, dbAndTableDir, disks) if err != nil { return err } @@ -674,7 +689,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. return nil } -func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata, dbAndTableDir string) error { +func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata, dbAndTableDir string, disks []clickhouse.Disk) error { log := b.log.WithField("operation", "downloadDiffParts") log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Debug("start") start := time.Now() diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 14b12b46..9af61615 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -160,8 +160,13 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par return nil } } - - if b.cfg.ClickHouse.UseEmbeddedBackupRestore && b.cfg.ClickHouse.EmbeddedBackupDisk == "" { + isObjectDiskPresents := false + for _, d := range disks { + if isObjectDiskPresents = b.isDiskTypeObject(d.Type); isObjectDiskPresents { + break + } + } + if (b.cfg.ClickHouse.UseEmbeddedBackupRestore && b.cfg.ClickHouse.EmbeddedBackupDisk == "") || isObjectDiskPresents { if b.dst, err = storage.NewBackupDestination(ctx, b.cfg, b.ch, false, backupName); err != nil { return err } @@ -1028,10 +1033,10 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin srcDiskName := diskName // copy from required backup for required data parts, https://github.com/Altinity/clickhouse-backup/issues/865 if part.Required && backupMetadata.RequiredBackup != "" { - var findRecusiveErr error - srcBackupName, srcDiskName, findRecusiveErr = b.findObjectDiskPartRecursive(backupMetadata, backupTable, part, diskName) - if findRecusiveErr != nil { - return findRecusiveErr + var findRecursiveErr error + srcBackupName, srcDiskName, findRecursiveErr = b.findObjectDiskPartRecursive(ctx, backupMetadata, backupTable, part, diskName, log) + if findRecursiveErr != nil { + return findRecursiveErr } } walkErr := filepath.Walk(partPath, func(fPath string, fInfo fs.FileInfo, err error) error { @@ -1103,8 +1108,32 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin return nil } -func (b *Backuper) findObjectDiskPartRecursive(backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, part metadata.Part, name string) (string, string, error) { - return "", "", fmt.Errorf("not implemented") +func (b *Backuper) findObjectDiskPartRecursive(ctx context.Context, backup metadata.BackupMetadata, table metadata.TableMetadata, part metadata.Part, diskName string, log *apexLog.Entry) (string, string, error) { + if !part.Required { + return backup.BackupName, diskName, nil + } + if part.Required && backup.RequiredBackup == "" { + return "", "", fmt.Errorf("part %s have required flag, in %s but backup.RequiredBackup is empty", part.Name, backup.BackupName) + } + requiredBackup, err := b.ReadBackupMetadataRemote(ctx, backup.RequiredBackup) + if err != nil { + return "", "", err + } + var requiredTable *metadata.TableMetadata + requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, log, metadata.TableTitle{Database: table.Database, Table: table.Table}) + // @todo think about add check what if disk type could changed (should already restricted, cause upload seek part in the same disk name) + for requiredDiskName, parts := range requiredTable.Parts { + for _, requiredPart := range parts { + if requiredPart.Name == part.Name { + if requiredPart.Required { + return b.findObjectDiskPartRecursive(ctx, *requiredBackup, *requiredTable, requiredPart, requiredDiskName, log) + } + return requiredBackup.BackupName, requiredDiskName, nil + } + } + + } + return "", "", fmt.Errorf("part %s have required flag in %s, but not found in %s", part.Name, backup.BackupName, backup.RequiredBackup) } func (b *Backuper) checkMissingTables(tablesForRestore ListOfTables, chTables []clickhouse.Table) []string { diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index 1a5c5b24..40b32ddf 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -252,16 +252,16 @@ func MoveShadowToBackup(shadowPath, backupPartsPath string, partitionsBackupMap if len(partitionsBackupMap) != 0 && !IsPartInPartition(pathParts[3], partitionsBackupMap) { return nil } + var isRequiredPartFound, partExists bool if tableDiffFromRemote.Database != "" && tableDiffFromRemote.Table != "" && len(tableDiffFromRemote.Parts) > 0 && len(tableDiffFromRemote.Parts[disk.Name]) > 0 { - var isRequiredPartAdded, partExists bool - parts, isRequiredPartAdded, partExists = addRequiredPartIfNotExists(parts, pathParts[3], tableDiffFromRemote, disk) - if isRequiredPartAdded || partExists { + parts, isRequiredPartFound, partExists = addRequiredPartIfNotExists(parts, pathParts[3], tableDiffFromRemote, disk) + if isRequiredPartFound { return nil } } dstFilePath := filepath.Join(backupPartsPath, pathParts[3]) if info.IsDir() { - if !strings.HasSuffix(pathParts[3], ".proj") { + if !strings.HasSuffix(pathParts[3], ".proj") && !isRequiredPartFound && !partExists { parts = append(parts, metadata.Part{ Name: pathParts[3], }) @@ -283,26 +283,26 @@ func MoveShadowToBackup(shadowPath, backupPartsPath string, partitionsBackupMap } func addRequiredPartIfNotExists(parts []metadata.Part, relativePath string, tableDiffFromRemote metadata.TableMetadata, disk clickhouse.Disk) ([]metadata.Part, bool, bool) { - isRequiredPartAdded := false + isRequiredPartFound := false exists := false - for _, p := range parts { - if p.Name == relativePath || strings.HasPrefix(relativePath, p.Name+"/") { - exists = true - break - } - } - if !exists { - for _, diffPart := range tableDiffFromRemote.Parts[disk.Name] { - if diffPart.Name == relativePath || strings.HasPrefix(relativePath, diffPart.Name+"/") { + for _, diffPart := range tableDiffFromRemote.Parts[disk.Name] { + if diffPart.Name == relativePath || strings.HasPrefix(relativePath, diffPart.Name+"/") { + for _, p := range parts { + if p.Name == relativePath || strings.HasPrefix(relativePath, p.Name+"/") { + exists = true + break + } + } + if !exists { parts = append(parts, metadata.Part{ Name: relativePath, Required: true, }) - isRequiredPartAdded = true } + isRequiredPartFound = true } } - return parts, isRequiredPartAdded, exists + return parts, isRequiredPartFound, exists } func IsDuplicatedParts(part1, part2 string) error { diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 0f94e340..767ee544 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -294,7 +294,7 @@ services: - "8123:8123" - "9000:9000" # for delve debugger -# - "40001:40001" + - "40002:40002" networks: - clickhouse-backup links: From eaead75e33b3f54efee9aec6a214bf4b9a28da4a Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 26 Mar 2024 06:43:20 +0400 Subject: [PATCH 30/80] fix TestKeepBackupRemoteAndDiffFromRemote Signed-off-by: Slach --- test/integration/integration_test.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 5fc74068..449449ad 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1435,11 +1435,24 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { } latestIncrementBackup := fmt.Sprintf("keep_remote_backup_%d", len(backupNames)-1) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", latestIncrementBackup)) + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml list local") + r.NoError(err) + prevIncrementBackup := fmt.Sprintf("keep_remote_backup_%d", len(backupNames)-2) + for _, backupName := range backupNames { + if backupName == latestIncrementBackup { + r.Contains(out, backupName) + } else if backupName == prevIncrementBackup { + r.Contains(out, "+"+backupName) + } else { + r.NotContains(out, backupName) + } + } r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", latestIncrementBackup)) var res uint64 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s_%s`.`%s_%s`", Issue331Atomic, t.Name(), Issue331Atomic, t.Name()))) r.Equal(uint64(200), res) - fullCleanup(t, r, ch, backupNames, []string{"remote", "local"}, databaseList, true, true, "config-s3.yml") + fullCleanup(t, r, ch, []string{latestIncrementBackup}, []string{"local"}, nil, true, true, "config-s3.yml") + fullCleanup(t, r, ch, backupNames, []string{"remote"}, databaseList, true, true, "config-s3.yml") checkObjectStorageIsEmpty(t, r, "S3") } From f19ec418219f2d82ee5e38b44813ab81d9589ca6 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 26 Mar 2024 11:01:33 +0400 Subject: [PATCH 31/80] fix TestProjections Signed-off-by: Slach --- test/integration/integration_test.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 449449ad..8a477c0a 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1346,14 +1346,17 @@ func TestProjections(t *testing.T) { var counts uint64 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) r.Equal(uint64(10), counts) + counts = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM system.parts WHERE database='default' AND table='table_with_projection' AND has(projections,'x')")) + r.Equal(uint64(10), counts) + err = ch.chbackend.Query("DROP TABLE default.table_with_projection NO DELAY") r.NoError(err) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_increment")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_full")) + + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment")) } func TestCheckSystemPartsColumns(t *testing.T) { From 8f847a3c207e1af2e5380d5af6a9cfaa6a10fae5 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 26 Mar 2024 11:33:18 +0400 Subject: [PATCH 32/80] fix TestProjections --- test/integration/integration_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 8a477c0a..b5cb1665 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1346,9 +1346,11 @@ func TestProjections(t *testing.T) { var counts uint64 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) r.Equal(uint64(10), counts) - counts = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM system.parts WHERE database='default' AND table='table_with_projection' AND has(projections,'x')")) - r.Equal(uint64(10), counts) + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.9") >= 0 { + counts = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM system.parts WHERE database='default' AND table='table_with_projection' AND has(projections,'x')")) + r.Equal(uint64(10), counts) + } err = ch.chbackend.Query("DROP TABLE default.table_with_projection NO DELAY") r.NoError(err) From 0cc21b2d02c0dac5b85dbe182b71f7958969d0ca Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 26 Mar 2024 12:41:12 +0400 Subject: [PATCH 33/80] fix tests, will skip required_backup/metadata/db/table.json from resumable state --- pkg/backup/download.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/backup/download.go b/pkg/backup/download.go index 17a0efd2..f54247b1 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -195,7 +195,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ idx := i tableTitle := t metadataGroup.Go(func() error { - downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, metadataLogger, tableTitle, schemaOnly, partitions) + downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, metadataLogger, tableTitle, schemaOnly, partitions, b.resume) if err != nil { return err } @@ -413,11 +413,11 @@ func (b *Backuper) downloadTableMetadataIfNotExists(ctx context.Context, backupN return tm, nil } // we always download full metadata in this case without filter by partitions - tm, _, err := b.downloadTableMetadata(ctx, backupName, nil, log.WithFields(apexLog.Fields{"operation": "downloadTableMetadataIfNotExists", "backupName": backupName, "table_metadata_diff": fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)}), tableTitle, false, nil) + tm, _, err := b.downloadTableMetadata(ctx, backupName, nil, log.WithFields(apexLog.Fields{"operation": "downloadTableMetadataIfNotExists", "backupName": backupName, "table_metadata_diff": fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)}), tableTitle, false, nil, false) return tm, err } -func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, disks []clickhouse.Disk, log *apexLog.Entry, tableTitle metadata.TableTitle, schemaOnly bool, partitions []string) (*metadata.TableMetadata, uint64, error) { +func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, disks []clickhouse.Disk, log *apexLog.Entry, tableTitle metadata.TableTitle, schemaOnly bool, partitions []string, resume bool) (*metadata.TableMetadata, uint64, error) { start := time.Now() size := uint64(0) metadataFiles := map[string]string{} @@ -430,7 +430,7 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, } var tableMetadata metadata.TableMetadata for remoteMetadataFile, localMetadataFile := range metadataFiles { - if b.resume { + if resume { isProcessed, processedSize := b.resumableState.IsAlreadyProcessed(localMetadataFile) if isProcessed && strings.HasSuffix(localMetadataFile, ".json") { tmBody, err := os.ReadFile(localMetadataFile) @@ -502,7 +502,7 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, size += jsonSize tableMetadata.LocalFile = localMetadataFile } - if b.resume { + if resume { b.resumableState.AppendToState(localMetadataFile, written) } } From b61bcbd3414309de3604e29a0b36aac3d2f8441f Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 26 Mar 2024 16:21:22 +0400 Subject: [PATCH 34/80] fix tests, add `partial required backup %s deleted log` and properly handle already downloaded parts with required flag --- pkg/backup/download.go | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/pkg/backup/download.go b/pkg/backup/download.go index f54247b1..ee964617 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -294,6 +294,8 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if localBackup.BackupName != remoteBackup.BackupName && localBackup.DataSize+localBackup.CompressedSize+localBackup.MetadataSize == 0 { if err = b.RemoveBackupLocal(ctx, localBackup.BackupName, disks); err != nil { return fmt.Errorf("downloadWithDiff -> RemoveBackupLocal cleaning error: %v", err) + } else { + b.log.Infof("partial required backup %s deleted", localBackup.BackupName) } } } @@ -727,6 +729,17 @@ func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata. return fmt.Errorf("%s stat return error: %v", existsPath, err) } if err != nil && os.IsNotExist(err) { + //if existPath already processed then expect non empty newPath + if b.resume && b.resumableState.IsAlreadyProcessedBool(existsPath) { + if newPathDirList, newPathDirErr := os.ReadDir(newPath); newPathDirErr != nil { + newPathDirErr = fmt.Errorf("os.ReadDir(%s) error: %v", newPath, newPathDirErr) + log.Error(newPathDirErr.Error()) + return newPathDirErr + } else if len(newPathDirList) == 0 { + return fmt.Errorf("os.ReadDir(%s) expect return non empty list", newPath) + } + continue + } partForDownload := part diskForDownload := disk if !diskExists { @@ -751,11 +764,11 @@ func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata. return fmt.Errorf("after downloadDiffRemoteFile %s exists but is not directory", downloadedPartPath) } if err = b.makePartHardlinks(downloadedPartPath, existsPath); err != nil { - return fmt.Errorf("can't to add link to exists part %s -> %s error: %v", downloadedPartPath, existsPath, err) + return fmt.Errorf("can't to add link to rebalanced part %s -> %s error: %v", downloadedPartPath, existsPath, err) } } if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("after downloadDiffRemoteFile %s stat return error: %v", downloadedPartPath, err) + return fmt.Errorf("after downloadDiffRemoteFile os.Stat(%s) return error: %v", downloadedPartPath, err) } } atomic.AddUint32(&downloadedDiffParts, 1) @@ -786,6 +799,9 @@ func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata. func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLock *sync.Mutex, diffRemoteFilesCache map[string]*sync.Mutex, tableRemoteFile string, tableLocalDir string) error { log := b.log.WithField("logger", "downloadDiffRemoteFile") + if b.resume && b.resumableState.IsAlreadyProcessedBool(tableRemoteFile) { + return nil + } diffRemoteFilesLock.Lock() namedLock, isCached := diffRemoteFilesCache[tableRemoteFile] if isCached { @@ -817,6 +833,9 @@ func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLo } } namedLock.Unlock() + if b.resume { + b.resumableState.AppendToState(tableRemoteFile, 0) + } log.Debugf("finish download from %s", tableRemoteFile) } return nil @@ -1022,7 +1041,7 @@ func (b *Backuper) makePartHardlinks(exists, new string) error { existsFInfo, existsStatErr := os.Stat(existsF) newFInfo, newStatErr := os.Stat(newF) if existsStatErr != nil || newStatErr != nil || !os.SameFile(existsFInfo, newFInfo) { - log.Warnf("Link %s -> %s error: %v", newF, existsF, err) + log.Warnf("Link %s -> %s error: %v, existsStatErr: %v newStatErr: %v", existsF, newF, err, existsStatErr, newStatErr) return err } } From fc415910242b7f5a1f9420e35a403b19adf5e77d Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 26 Mar 2024 16:40:17 +0400 Subject: [PATCH 35/80] fix tests --- test/integration/integration_test.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index b5cb1665..90eff1a4 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2228,13 +2228,9 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig st // test end log.Info("Clean after finish") - // CUSTOM and EMBEDDED download increment doesn't download full - if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { - fullCleanup(t, r, ch, []string{incrementBackupName}, []string{"local"}, nil, true, false, backupConfig) - fullCleanup(t, r, ch, []string{testBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true, backupConfig) - } else { - fullCleanup(t, r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, true, true, backupConfig) - } + // during download increment, partially downloaded full will clean + fullCleanup(t, r, ch, []string{incrementBackupName}, []string{"local"}, nil, true, false, backupConfig) + fullCleanup(t, r, ch, []string{testBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true, backupConfig) replaceStorageDiskNameForReBalance(r, ch, remoteStorageType, true) checkObjectStorageIsEmpty(t, r, remoteStorageType) } From ded3d4b218165e3c47bda52aeed973a91465efcc Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 26 Mar 2024 17:08:50 +0400 Subject: [PATCH 36/80] add description for incremental native backup, fix https://github.com/Altinity/clickhouse-backup/issues/735 --- ChangeLog.md | 3 ++- pkg/backup/create.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ChangeLog.md b/ChangeLog.md index 20f18da4..8c95332b 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -5,7 +5,8 @@ IMPROVEMENTS - `--rbac, --rbac-only, --configs, --configs-only` now works with `use_embedded_backup_restore: true` -- `--data` for `restore` with `use_embedded_backup_restore: true` will use `allow_non_empty_tables=true` to allow fix [756](https://github.com/Altinity/clickhouse-backup/issues/756) - added `--diff-from-remote` parameter for `create` command, will copy only new data parts object disk data, also allows to download properly object disk data from required backup during `restore`, fix [865](https://github.com/Altinity/clickhouse-backup/issues/865) - +- added support of native Clickhouse incremental backup for `use_embedded_backup_restore: true` fix [735](https://github.com/Altinity/clickhouse-backup/issues/735) + BUG FIXES - continue `S3_MAX_PARTS_COUNT` default value from `2000` to `4000` to continue decrease memory usage for S3 - changed minimal part size for multipart upload in CopyObject from `5Mb` to `10Mb` diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 93645a43..8eec9a29 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -496,6 +496,7 @@ func (b *Backuper) generateEmbeddedBackupSQL(ctx context.Context, backupName str if b.cfg.ClickHouse.EmbeddedBackupThreads > 0 { backupSettings = append(backupSettings, fmt.Sprintf("backup_threads=%d", b.cfg.ClickHouse.EmbeddedBackupThreads)) } + // incremental native backup https://github.com/Altinity/clickhouse-backup/issues/735 if baseBackup != "" { backupSettings = append(backupSettings, fmt.Sprintf("base_backup='%s'", baseBackup)) } From d39b3e5b6baa7a472cea31c470e561005b82f58a Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 04:07:26 +0400 Subject: [PATCH 37/80] fix TestIntegrationCustom* failures --- pkg/backup/restore.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 9af61615..6b8f5f1c 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -161,9 +161,11 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } } isObjectDiskPresents := false - for _, d := range disks { - if isObjectDiskPresents = b.isDiskTypeObject(d.Type); isObjectDiskPresents { - break + if b.cfg.General.RemoteStorage != "custom" { + for _, d := range disks { + if isObjectDiskPresents = b.isDiskTypeObject(d.Type); isObjectDiskPresents { + break + } } } if (b.cfg.ClickHouse.UseEmbeddedBackupRestore && b.cfg.ClickHouse.EmbeddedBackupDisk == "") || isObjectDiskPresents { From e6e50f0496b9dad45e42f9fa6a19c2a7f2805e27 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 04:54:40 +0400 Subject: [PATCH 38/80] switched to golang 1.22 --- .github/workflows/build.yaml | 4 ++-- .github/workflows/release.yaml | 2 +- ChangeLog.md | 1 + Dockerfile | 6 +++--- Vagrantfile | 2 +- go.mod | 2 +- test/integration/install_delve.sh | 6 +++--- 7 files changed, 12 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 4f3c0e05..677cd482 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -16,7 +16,7 @@ jobs: strategy: matrix: golang-version: - - "1.21" + - "1.22" steps: - name: Checkout project uses: actions/checkout@v4 @@ -211,7 +211,7 @@ jobs: strategy: matrix: golang-version: - - "1.21" + - "1.22" clickhouse: - '1.1.54394' - '19.17' diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f535944b..46d21f9d 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,7 +12,7 @@ jobs: strategy: matrix: golang-version: - - "1.21" + - "1.22" steps: - name: Checkout project diff --git a/ChangeLog.md b/ChangeLog.md index 8c95332b..fd01c90d 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -6,6 +6,7 @@ IMPROVEMENTS -- `--data` for `restore` with `use_embedded_backup_restore: true` will use `allow_non_empty_tables=true` to allow fix [756](https://github.com/Altinity/clickhouse-backup/issues/756) - added `--diff-from-remote` parameter for `create` command, will copy only new data parts object disk data, also allows to download properly object disk data from required backup during `restore`, fix [865](https://github.com/Altinity/clickhouse-backup/issues/865) - added support of native Clickhouse incremental backup for `use_embedded_backup_restore: true` fix [735](https://github.com/Altinity/clickhouse-backup/issues/735) +- switched to golang 1.22 BUG FIXES - continue `S3_MAX_PARTS_COUNT` default value from `2000` to `4000` to continue decrease memory usage for S3 diff --git a/Dockerfile b/Dockerfile index 1a37513c..85fbca81 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,16 +15,16 @@ RUN rm -fv /etc/apt/sources.list.d/clickhouse.list && \ echo "deb https://ppa.launchpadcontent.net/longsleep/golang-backports/ubuntu ${DISTRIB_CODENAME} main" > /etc/apt/sources.list.d/golang.list && \ echo "deb-src https://ppa.launchpadcontent.net/longsleep/golang-backports/ubuntu ${DISTRIB_CODENAME} main" >> /etc/apt/sources.list.d/golang.list && \ ( apt-get update || true ) && \ - apt-get install -y --no-install-recommends libc-dev golang-1.21 make git gcc musl-dev musl-tools && \ + apt-get install -y --no-install-recommends libc-dev golang-1.22 make git gcc musl-dev musl-tools && \ wget -q -P /root/ https://musl.cc/aarch64-linux-musl-cross.tgz && \ tar -xvf /root/aarch64-linux-musl-cross.tgz -C /root/ && \ mkdir -p /root/go/ -RUN ln -nsfv /usr/lib/go-1.21/bin/go /usr/bin/go +RUN ln -nsfv /usr/lib/go-1.22/bin/go /usr/bin/go VOLUME /root/.cache/go ENV GOCACHE=/root/.cache/go ENV GOPATH=/root/go/ -ENV GOROOT=/usr/lib/go-1.21/ +ENV GOROOT=/usr/lib/go-1.22/ RUN go env WORKDIR /src/ # cache modules when go.mod go.sum changed diff --git a/Vagrantfile b/Vagrantfile index 12eae47a..6e40276d 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -101,7 +101,7 @@ Vagrant.configure(2) do |config| apt-get install --no-install-recommends -y clickhouse-client clickhouse-server # golang - export GOLANG_VERSION=1.21 + export GOLANG_VERSION=1.22 apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 52B59B1571A79DBC054901C0F6BC817356A3D45E add-apt-repository ppa:longsleep/golang-backports apt-get install --no-install-recommends -y golang-${GOLANG_VERSION} diff --git a/go.mod b/go.mod index 8e9bfed9..172cba24 100644 --- a/go.mod +++ b/go.mod @@ -137,4 +137,4 @@ require ( google.golang.org/protobuf v1.33.0 // indirect ) -go 1.21 +go 1.22 diff --git a/test/integration/install_delve.sh b/test/integration/install_delve.sh index 16f3a115..8a2263e0 100755 --- a/test/integration/install_delve.sh +++ b/test/integration/install_delve.sh @@ -4,16 +4,16 @@ apt-get update && apt-get install -y software-properties-common apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 52B59B1571A79DBC054901C0F6BC817356A3D45E add-apt-repository -y ppa:longsleep/golang-backports apt-get update -apt-get install -y golang-1.21 +apt-get install -y golang-1.22 mkdir -p ~/go/ export GOPATH=~/go/ grep -q -F 'export GOPATH=$GOPATH' ~/.bashrc || echo "export GOPATH=$GOPATH" >> ~/.bashrc grep -q -F 'export GOPATH=$GOPATH' /root/.bashrc || echo "export GOPATH=$GOPATH" >> /root/.bashrc -export GOROOT=/usr/lib/go-1.21/ +export GOROOT=/usr/lib/go-1.22/ grep -q -F 'export GOROOT=$GOROOT' ~/.bashrc || echo "export GOROOT=$GOROOT" >> ~/.bashrc grep -q -F 'export GOROOT=$GOROOT' /root/.bashrc || echo "export GOROOT=$GOROOT" >> /root/.bashrc -ln -nsfv /usr/lib/go-1.21/bin/go /usr/bin/go +ln -nsfv /usr/lib/go-1.22/bin/go /usr/bin/go CGO_ENABLED=0 GO111MODULE=on go install -ldflags "-s -w -extldflags '-static'" github.com/go-delve/delve/cmd/dlv@latest From 5973727dfc88a9e968db272aa681022a499a2437 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 05:36:07 +0400 Subject: [PATCH 39/80] switched to golang 1.22, second try --- .github/workflows/build.yaml | 2 +- .github/workflows/release.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 677cd482..86d5bb17 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -49,7 +49,7 @@ jobs: - name: Build clickhouse-backup binary id: make-race env: - GOROOT: ${{ env.GOROOT_1_20_X64 }} + GOROOT: ${{ env.GOROOT_1_22_X64 }} run: | make build/linux/amd64/clickhouse-backup build/linux/arm64/clickhouse-backup make build/linux/amd64/clickhouse-backup-fips build/linux/arm64/clickhouse-backup-fips diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 46d21f9d..a9a3ba15 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -55,7 +55,7 @@ jobs: - name: Prepare binaries id: make env: - GOROOT: ${{ env.GOROOT_1_20_X64 }} + GOROOT: ${{ env.GOROOT_1_22_X64 }} run: | make build build-fips config test #make build-fips-darwin From 07cbfd3a01c4578ecf5ede17e571dfe9fffaacec Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 05:38:17 +0400 Subject: [PATCH 40/80] switched to golang 1.22 --- .github/workflows/build.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 86d5bb17..73a4ca0e 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -183,7 +183,8 @@ jobs: - name: Format testflows coverage run: | sudo chmod -Rv a+rw test/testflows/_coverage_/ - ls -la test/testflows/_coverage_ + ls -la test/testflows/_coverage_ + go env go tool covdata textfmt -i test/testflows/_coverage_/ -o test/testflows/_coverage_/coverage.out - name: Report testflows coverage uses: coverallsapp/github-action@v2 @@ -192,7 +193,7 @@ jobs: parallel: true format: golang flag-name: testflows-${{ matrix.clickhouse }} -# todo wait when resolve https://github.com/actions/upload-artifact/issues/270 and uncomment + # todo possible failures https://github.com/actions/upload-artifact/issues/270 - name: Upload testflows logs uses: actions/upload-artifact@v4 with: From f226b22dc04efa8cf3db335393c1f6be2c6211f4 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 07:50:36 +0400 Subject: [PATCH 41/80] debug failed format testflows coverage --- .github/workflows/build.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 73a4ca0e..da35a0b9 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -181,6 +181,8 @@ jobs: tfs --debug --no-colors report results -a "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/" ./test/testflows/raw.log - --confidential --copyright "Altinity LTD" --logo ./test/testflows/altinity.png | ~/venv/qa/bin/tfs --debug --no-colors document convert > ./test/testflows/report.html sudo chmod -Rv +rx test/testflows/clickhouse_backup/_instances - name: Format testflows coverage + env: + GOROOT: ${{ env.GOROOT_1_22_X64 }} run: | sudo chmod -Rv a+rw test/testflows/_coverage_/ ls -la test/testflows/_coverage_ @@ -257,6 +259,7 @@ jobs: - name: Running integration tests env: + GOROOT: ${{ env.GOROOT_1_22_X64 }} CLICKHOUSE_VERSION: ${{ matrix.clickhouse }} # just options for advanced logging # RUN_TESTS: "TestFIPS" @@ -304,6 +307,8 @@ jobs: docker-compose -f test/integration/${COMPOSE_FILE} ps -a go test -timeout 60m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go - name: Format integration coverage + env: + GOROOT: ${{ env.GOROOT_1_22_X64 }} run: | sudo chmod -Rv a+rw test/integration/_coverage_/ ls -la test/integration/_coverage_ From bc9f1c48f0eadf98cbc6220a400f0bfed5d8ad51 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 08:12:30 +0400 Subject: [PATCH 42/80] fix tests --- .github/workflows/build.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index da35a0b9..71557fbe 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -114,12 +114,20 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: + golang-version: + - "1.22" clickhouse: - '22.3' - '22.8' - '23.3' - '23.8' steps: + - name: Setup golang + id: setup-go + uses: actions/setup-go@v5 + with: + go-version: '^${{ matrix.golang-version }}' + - name: Checkout project uses: actions/checkout@v4 From ee227f5f4d75ec83d50926b913b7fbaf0a282bce Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 09:06:47 +0400 Subject: [PATCH 43/80] debug slow perfomance TestIntegrationGCS --- .github/workflows/build.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 71557fbe..ddbc7bb7 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -270,8 +270,9 @@ jobs: GOROOT: ${{ env.GOROOT_1_22_X64 }} CLICKHOUSE_VERSION: ${{ matrix.clickhouse }} # just options for advanced logging - # RUN_TESTS: "TestFIPS" - # LOG_LEVEL: "debug" + RUN_TESTS: "TestIntegrationGCS" + LOG_LEVEL: "debug" + GCS_DEBUG: "true" # SFTP_DEBUG: "true" # AZBLOB_DEBUG: "true" # FTP_DEBUG: "true" From 944999494d458ae3299592d57fc4bc3070ec8cd9 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 10:31:04 +0400 Subject: [PATCH 44/80] debug slow performance TestIntegrationGCS --- .github/workflows/build.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ddbc7bb7..aba6d65f 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -269,10 +269,10 @@ jobs: env: GOROOT: ${{ env.GOROOT_1_22_X64 }} CLICKHOUSE_VERSION: ${{ matrix.clickhouse }} - # just options for advanced logging - RUN_TESTS: "TestIntegrationGCS" - LOG_LEVEL: "debug" - GCS_DEBUG: "true" + # options for advanced debug CI/CD + # RUN_TESTS: "TestIntegrationGCS" + # LOG_LEVEL: "debug" + # GCS_DEBUG: "true" # SFTP_DEBUG: "true" # AZBLOB_DEBUG: "true" # FTP_DEBUG: "true" From 3bbc16cf9f6606c9ce7461e6f9dcf61aaa976c92 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 12:35:44 +0400 Subject: [PATCH 45/80] add duration logging for restore --- pkg/backup/restore.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 6b8f5f1c..e1a3b42a 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -45,6 +45,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } ctx, cancel = context.WithCancel(ctx) defer cancel() + startRestore := time.Now() backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") if err := b.prepareRestoreDatabaseMapping(databaseMapping); err != nil { return err @@ -222,7 +223,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } } } - log.Info("done") + log.WithField("duration", utils.HumanizeDuration(time.Since(startRestore))).Info("done") return nil } @@ -541,6 +542,7 @@ func (b *Backuper) RestoreSchema(ctx context.Context, backupName string, tablesF "backup": backupName, "operation": "restore_schema", }) + startRestoreSchema := time.Now() if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version, log); dropErr != nil { return dropErr @@ -554,6 +556,7 @@ func (b *Backuper) RestoreSchema(ctx context.Context, backupName string, tablesF if restoreErr != nil { return restoreErr } + log.WithField("duration", utils.HumanizeDuration(time.Since(startRestoreSchema))).Info("done") return nil } @@ -820,7 +823,7 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci // RestoreData - restore data for tables matched by tablePattern from backupName func (b *Backuper) RestoreData(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, dataOnly bool, metadataPath, tablePattern string, partitions []string, disks []clickhouse.Disk) error { - startRestore := time.Now() + startRestoreData := time.Now() log := apexLog.WithFields(apexLog.Fields{ "backup": backupName, "operation": "restore_data", @@ -871,7 +874,7 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, backupMet if err != nil { return err } - log.WithField("duration", utils.HumanizeDuration(time.Since(startRestore))).Info("done") + log.WithField("duration", utils.HumanizeDuration(time.Since(startRestoreData))).Info("done") return nil } From 5771984b46aa4b7d6590d25d4897642aff3a3c8c Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 17:35:16 +0400 Subject: [PATCH 46/80] fixed behavior for upload / download when .inner. table missing for MATERIALIZED VIEW by table pattern, fix https://github.com/Altinity/clickhouse-backup/issues/765 --- ChangeLog.md | 1 + pkg/backup/download.go | 44 ++++++++++++++++++++++++++++ pkg/backup/table_pattern.go | 1 + test/integration/integration_test.go | 19 ++++++++++-- 4 files changed, 63 insertions(+), 2 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index fd01c90d..ed565224 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -15,6 +15,7 @@ BUG FIXES - execute `ALTER TABLE ... DROP PARTITION` instead of `DROP TABLE` for `restore` and `restore_remote` with parameters `--data --partitions=...`, fix [756](https://github.com/Altinity/clickhouse-backup/issues/756) - fix wrong behavior for `freeze_by_part` + `freeze_by_part_where`, fix [855](https://github.com/Altinity/clickhouse-backup/issues/855) - apply `CLICKHOUSE_SKIP_TABLES_ENGINES` during `create` command +- fixed behavior for upload / download when .inner. table missing for MATERIALIZED VIEW by table pattern, fix [765](https://github.com/Altinity/clickhouse-backup/issues/765) # v2.4.35 IMPROVEMENTS diff --git a/pkg/backup/download.go b/pkg/backup/download.go index ee964617..dbf3bd98 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -207,6 +207,13 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if err := metadataGroup.Wait(); err != nil { return fmt.Errorf("one of Download Metadata go-routine return error: %v", err) } + // download, missed .inner. tables, https://github.com/Altinity/clickhouse-backup/issues/765 + var missedInnerTableErr error + tableMetadataAfterDownload, tablesForDownload, metadataSize, missedInnerTableErr = b.downloadMissedInnerTablesMetadata(ctx, backupName, metadataSize, tablesForDownload, tableMetadataAfterDownload, disks, schemaOnly, partitions, log) + if missedInnerTableErr != nil { + return missedInnerTableErr + } + if !schemaOnly { if reBalanceErr := b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownload, disks, remoteBackup, log); reBalanceErr != nil { return reBalanceErr @@ -515,6 +522,43 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, return &tableMetadata, size, nil } +// downloadMissedInnerTablesMetadata - download, missed .inner. tables if materialized view query not contains `TO db.table` clause, https://github.com/Altinity/clickhouse-backup/issues/765 +// @todo think about parallel download if sequentially will slow +func (b *Backuper) downloadMissedInnerTablesMetadata(ctx context.Context, backupName string, metadataSize uint64, tablesForDownload []metadata.TableTitle, tableMetadataAfterDownload []*metadata.TableMetadata, disks []clickhouse.Disk, schemaOnly bool, partitions []string, log *apexLog.Entry) ([]*metadata.TableMetadata, []metadata.TableTitle, uint64, error) { + for _, t := range tableMetadataAfterDownload { + if strings.HasPrefix(t.Query, "ATTACH MATERIALIZED") || strings.HasPrefix(t.Query, "CREATE MATERIALIZED") { + if strings.Contains(t.Query, " TO ") && !strings.Contains(t.Query, " TO INNER UUID") { + continue + } + var innerTableName string + if matches := uuidRE.FindStringSubmatch(t.Query); len(matches) > 0 { + innerTableName = fmt.Sprintf(".inner_id.%s", matches[1]) + } else { + innerTableName = fmt.Sprintf(".inner.%s", t.Table) + } + innerTableExists := false + for _, existsTable := range tablesForDownload { + if existsTable.Table == innerTableName && existsTable.Database == t.Database { + innerTableExists = true + break + } + } + if !innerTableExists { + innerTableTitle := metadata.TableTitle{Database: t.Database, Table: innerTableName} + metadataLogger := log.WithField("missed_inner_metadata", fmt.Sprintf("%s.%s", innerTableTitle.Database, innerTableTitle.Table)) + innerTableMetadata, size, err := b.downloadTableMetadata(ctx, backupName, disks, metadataLogger, innerTableTitle, schemaOnly, partitions, b.resume) + if err != nil { + return tableMetadataAfterDownload, tablesForDownload, metadataSize, err + } + metadataSize += size + tablesForDownload = append(tablesForDownload, innerTableTitle) + tableMetadataAfterDownload = append(tableMetadataAfterDownload, innerTableMetadata) + } + } + } + return tableMetadataAfterDownload, tablesForDownload, metadataSize, nil +} + func (b *Backuper) downloadRBACData(ctx context.Context, remoteBackup storage.Backup) (uint64, error) { return b.downloadBackupRelatedDir(ctx, remoteBackup, "access") } diff --git a/pkg/backup/table_pattern.go b/pkg/backup/table_pattern.go index 5986ae2f..42877de3 100644 --- a/pkg/backup/table_pattern.go +++ b/pkg/backup/table_pattern.go @@ -251,6 +251,7 @@ func (b *Backuper) enrichTablePatternsByInnerDependencies(metadataPath string, t innerTableFile = path.Join(innerTableFile, common.TablePathEncode(fmt.Sprintf(".inner.%s", table))) innerTableName += fmt.Sprintf(".inner.%s", table) } + // https://github.com/Altinity/clickhouse-backup/issues/765, .inner. table could be dropped manually, .inner. table is required for ATTACH if _, err := os.Stat(path.Join(metadataPath, innerTableFile+".json")); err != nil { return err } diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 90eff1a4..a64ad31f 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1709,7 +1709,8 @@ func TestInnerTablesMaterializedView(t *testing.T) { ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_inner (v UInt64) ENGINE=MergeTree() ORDER BY v AS SELECT v FROM test_mv.src_table") ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_dst TO test_mv.dst_table AS SELECT v FROM test_mv.src_table") ch.queryWithNoError(r, "INSERT INTO test_mv.src_table SELECT number FROM numbers(100)") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) dropSQL := "DROP DATABASE test_mv" isAtomic, err := ch.chbackend.IsAtomic("test_mv") r.NoError(err) @@ -1717,14 +1718,28 @@ func TestInnerTablesMaterializedView(t *testing.T) { dropSQL += " NO DELAY" } ch.queryWithNoError(r, dropSQL) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) var rowCnt uint64 + + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) r.Equal(uint64(100), rowCnt) r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_dst")) r.Equal(uint64(100), rowCnt) + r.NoError(ch.dropDatabase("test_mv")) + + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mv")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) + r.Equal(uint64(100), rowCnt) + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_dst")) + r.Equal(uint64(100), rowCnt) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mv")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_mv")) } func TestFIPS(t *testing.T) { From 00b82639449ee81200f09ee94ff4cca4bcab32de Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 18:46:16 +0400 Subject: [PATCH 47/80] fix corner cases for TestSkipTablesAndSkipTableEngines, cause some tables could be skipped --- pkg/backup/download.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/backup/download.go b/pkg/backup/download.go index dbf3bd98..80399a07 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -526,6 +526,9 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, // @todo think about parallel download if sequentially will slow func (b *Backuper) downloadMissedInnerTablesMetadata(ctx context.Context, backupName string, metadataSize uint64, tablesForDownload []metadata.TableTitle, tableMetadataAfterDownload []*metadata.TableMetadata, disks []clickhouse.Disk, schemaOnly bool, partitions []string, log *apexLog.Entry) ([]*metadata.TableMetadata, []metadata.TableTitle, uint64, error) { for _, t := range tableMetadataAfterDownload { + if t == nil { + continue + } if strings.HasPrefix(t.Query, "ATTACH MATERIALIZED") || strings.HasPrefix(t.Query, "CREATE MATERIALIZED") { if strings.Contains(t.Query, " TO ") && !strings.Contains(t.Query, " TO INNER UUID") { continue From 3c5001cc6511d3f0ca5509d35e0f2941da277f03 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 18:50:24 +0400 Subject: [PATCH 48/80] add Markdown Headers for API descriptions --- ReadMe.md | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/ReadMe.md b/ReadMe.md index 9ec68d9c..0ad9ce8c 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -634,35 +634,35 @@ That can lead to data corruption. Use the `clickhouse-backup server` command to run as a REST API server. In general, the API attempts to mirror the CLI commands. -> **GET /** +### GET / List all current applicable HTTP routes -> **POST /** +### POST / -> **POST /restart** +### POST /restart Restart HTTP server, close all current connections, close listen socket, open listen socket again, all background go-routines breaks with contexts -> **GET /backup/kill** +### GET /backup/kill Kill selected command from `GET /backup/actions` command list, kill process should be near immediate, but some go-routines (upload one data part) could continue to run. - Optional query argument `command` may contain the command name to kill, or if it is omitted then kill the first "in progress" command. -> **GET /backup/tables** +### GET /backup/tables Print list of tables: `curl -s localhost:7171/backup/tables | jq .`, exclude pattern matched tables from `skip_tables` configuration parameters - Optional query argument `table` works the same as the `--table value` CLI argument. -> **GET /backup/tables/all** +### GET /backup/tables/all Print list of tables: `curl -s localhost:7171/backup/tables/all | jq .`, ignore `skip_tables` configuration parameters. - Optional query argument `table` works the same as the `--table value` CLI argument. -> **POST /backup/create** +### POST /backup/create Create new backup: `curl -s localhost:7171/backup/create -X POST | jq .` @@ -677,7 +677,7 @@ Create new backup: `curl -s localhost:7171/backup/create -X POST | jq .` Note: this operation is asynchronous, so the API will return once the operation has started. -> **POST /backup/watch** +### POST /backup/watch Run background watch process and create full+incremental backups sequence: `curl -s localhost:7171/backup/watch -X POST | jq .` You can't run watch twice with the same parameters even when `allow_parallel: true` @@ -694,16 +694,16 @@ You can't run watch twice with the same parameters even when `allow_parallel: tr Note: this operation is asynchronous and can only be stopped with `kill -s SIGHUP $(pgrep -f clickhouse-backup)` or call `/restart`, `/backup/kill`. The API will return immediately once the operation has started. -> **POST /backup/clean** +### POST /backup/clean Clean the `shadow` folders using all available paths from `system.disks` -> **POST /backup/clean/remote_broken** +### POST /backup/clean/remote_broken Remove Note: this operation is sync, and could take a lot of time, increase http timeouts during call -> **POST /backup/upload** +### POST /backup/upload Upload backup to remote storage: `curl -s localhost:7171/backup/upload/ -X POST | jq .` @@ -717,7 +717,7 @@ Upload backup to remote storage: `curl -s localhost:7171/backup/upload/ **GET /backup/list/{where}** +### GET /backup/list/{where} Print a list of backups: `curl -s localhost:7171/backup/list | jq .` Print a list of only local backups: `curl -s localhost:7171/backup/list/local | jq .` @@ -726,7 +726,7 @@ Print a list of only remote backups: `curl -s localhost:7171/backup/list/remote Note: The `Size` field will not be set for the local backups that have just been created or are in progress. Note: The `Size` field will not be set for the remote backups with upload status in progress. -> **POST /backup/download** +### POST /backup/download Download backup from remote storage: `curl -s localhost:7171/backup/download/ -X POST | jq .` @@ -738,7 +738,7 @@ Download backup from remote storage: `curl -s localhost:7171/backup/download/ **POST /backup/restore** +### POST /backup/restore Create schema and restore data from backup: `curl -s localhost:7171/backup/restore/ -X POST | jq .` @@ -753,21 +753,21 @@ Create schema and restore data from backup: `curl -s localhost:7171/backup/resto - Optional query argument `restore_database_mapping` works the same as the `--restore-database-mapping` CLI argument. - Optional query argument `callback` allow pass callback URL which will call with POST with `application/json` with payload `{"status":"error|success","error":"not empty when error happens"}`. -> **POST /backup/delete** +### POST /backup/delete Delete specific remote backup: `curl -s localhost:7171/backup/delete/remote/ -X POST | jq .` Delete specific local backup: `curl -s localhost:7171/backup/delete/local/ -X POST | jq .` -> **GET /backup/status** +### GET /backup/status Display list of currently running asynchronous operations: `curl -s localhost:7171/backup/status | jq .` -> **POST /backup/actions** +### POST /backup/actions Execute multiple backup actions: `curl -X POST -d '{"command":"create test_backup"}' -s localhost:7171/backup/actions` -> **GET /backup/actions** +### GET /backup/actions Display a list of all operations from start of API server: `curl -s localhost:7171/backup/actions | jq .` From 5950e57310e781687e89c2d7a569ed6216c93de3 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 27 Mar 2024 19:53:31 +0400 Subject: [PATCH 49/80] added `--delete-source` parameter for `upload` and `create_remote` commands to explicitly delete local backup after upload, fix https://github.com/Altinity/clickhouse-backup/issues/777 --- ChangeLog.md | 1 + ReadMe.md | 1 + cmd/clickhouse-backup/main.go | 14 ++++++++++++-- pkg/backup/create_remote.go | 4 ++-- pkg/backup/upload.go | 12 +++++++++--- pkg/backup/watch.go | 4 ++-- pkg/server/server.go | 8 +++++++- test/integration/integration_test.go | 5 ++--- 8 files changed, 36 insertions(+), 13 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index ed565224..fa548c4f 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,5 +1,6 @@ # v2.5.0 (not released yet) IMPROVEMENTS +- added `--delete-source` parameter for `upload` and `create_remote` commands to explicitly delete local backup after upload, fix [777](https://github.com/Altinity/clickhouse-backup/issues/777) - added support for `--env ENV_NAME=value` cli parameter for allow dynamically override any config parameter, fix [821](https://github.com/Altinity/clickhouse-backup/issues/821) - added support for `use_embedded_backup_restore: true` with empty `embedded_backup_disk` value, tested on S3/GCS over S3/AzureBlobStorage, fix [695](https://github.com/Altinity/clickhouse-backup/issues/695) - `--rbac, --rbac-only, --configs, --configs-only` now works with `use_embedded_backup_restore: true` diff --git a/ReadMe.md b/ReadMe.md index 0ad9ce8c..593de5fe 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -707,6 +707,7 @@ Note: this operation is sync, and could take a lot of time, increase http timeou Upload backup to remote storage: `curl -s localhost:7171/backup/upload/ -X POST | jq .` +- Optional query argument `delete-source` works the same as the `--delete-source` CLI argument. - Optional query argument `diff-from` works the same as the `--diff-from` CLI argument. - Optional query argument `diff-from-remote` works the same as the `--diff-from-remote` CLI argument. - Optional query argument `table` works the same as the `--table value` CLI argument. diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 41ea7999..ea4f02aa 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -158,7 +158,7 @@ func main() { Description: "Create and upload", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.CreateToRemote(c.Args().First(), c.String("diff-from"), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) + return b.CreateToRemote(c.Args().First(), c.Bool("delete-source"), c.String("diff-from"), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -221,6 +221,11 @@ func main() { Hidden: false, Usage: "Skip check system.parts_columns to disallow backup inconsistent column types for data parts", }, + cli.BoolFlag{ + Name: "delete, delete-source, delete-local", + Hidden: false, + Usage: "explicitly delete local backup after upload", + }, ), }, { @@ -229,7 +234,7 @@ func main() { UsageText: "clickhouse-backup upload [-t, --tables=.
] [--partitions=] [-s, --schema] [--diff-from=] [--diff-from-remote=] [--resumable] ", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.Upload(c.Args().First(), c.String("diff-from"), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("resume"), c.Int("command-id")) + return b.Upload(c.Args().First(), c.Bool("delete-source"), c.String("diff-from"), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("resume"), c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -267,6 +272,11 @@ func main() { Hidden: false, Usage: "Save intermediate upload state and resume upload if backup exists on remote storage, ignored with 'remote_storage: custom' or 'use_embedded_backup_restore: true'", }, + cli.BoolFlag{ + Name: "delete, delete-source, delete-local", + Hidden: false, + Usage: "explicitly delete local backup after upload", + }, ), }, { diff --git a/pkg/backup/create_remote.go b/pkg/backup/create_remote.go index 40debb62..8c9806f6 100644 --- a/pkg/backup/create_remote.go +++ b/pkg/backup/create_remote.go @@ -5,7 +5,7 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/status" ) -func (b *Backuper) CreateToRemote(backupName, diffFrom, diffFromRemote, tablePattern string, partitions []string, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, resume bool, version string, commandId int) error { +func (b *Backuper) CreateToRemote(backupName string, deleteSource bool, diffFrom, diffFromRemote, tablePattern string, partitions []string, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, resume bool, version string, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -18,7 +18,7 @@ func (b *Backuper) CreateToRemote(backupName, diffFrom, diffFromRemote, tablePat if err := b.CreateBackup(backupName, diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, version, commandId); err != nil { return err } - if err := b.Upload(backupName, diffFrom, diffFromRemote, tablePattern, partitions, schemaOnly, resume, commandId); err != nil { + if err := b.Upload(backupName, deleteSource, diffFrom, diffFromRemote, tablePattern, partitions, schemaOnly, resume, commandId); err != nil { return err } diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 95ff0735..d0f5f3ef 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -32,7 +32,7 @@ import ( "github.com/yargevad/filepathx" ) -func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern string, partitions []string, schemaOnly, resume bool, commandId int) error { +func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFromRemote, tablePattern string, partitions []string, schemaOnly, resume bool, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -232,15 +232,21 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str WithField("size", utils.FormatBytes(uint64(compressedDataSize)+uint64(metadataSize)+uint64(len(newBackupMetadataBody))+backupMetadata.RBACSize+backupMetadata.ConfigSize)). Info("done") - // Clean + // Remote old backup retention if err = b.RemoveOldBackupsRemote(ctx); err != nil { return fmt.Errorf("can't remove old backups on remote storage: %v", err) } - // fix https://github.com/Altinity/clickhouse-backup/issues/834 + // Local old backup retention, fix https://github.com/Altinity/clickhouse-backup/issues/834 if err = b.RemoveOldBackupsLocal(ctx, false, nil); err != nil { return fmt.Errorf("can't remove old local backups: %v", err) } + // explicitly delete local backup after successful upload, fix https://github.com/Altinity/clickhouse-backup/issues/777 + if b.cfg.General.BackupsToKeepLocal >= 0 && deleteSource { + if err = b.RemoveBackupLocal(ctx, backupName, disks); err != nil { + return fmt.Errorf("can't explicitly delete local source backup: %v", err) + } + } return nil } diff --git a/pkg/backup/watch.go b/pkg/backup/watch.go index ae4a2307..18eeeb39 100644 --- a/pkg/backup/watch.go +++ b/pkg/backup/watch.go @@ -126,14 +126,14 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t } if metrics != nil { createRemoteErr, createRemoteErrCount = metrics.ExecuteWithMetrics("create_remote", createRemoteErrCount, func() error { - return b.CreateToRemote(backupName, "", diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, false, backupConfigs, false, skipCheckPartsColumns, false, version, commandId) + return b.CreateToRemote(backupName, false, "", diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, false, backupConfigs, false, skipCheckPartsColumns, false, version, commandId) }) deleteLocalErr, deleteLocalErrCount = metrics.ExecuteWithMetrics("delete", deleteLocalErrCount, func() error { return b.RemoveBackupLocal(ctx, backupName, nil) }) } else { - createRemoteErr = b.CreateToRemote(backupName, "", diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, false, backupConfigs, false, skipCheckPartsColumns, false, version, commandId) + createRemoteErr = b.CreateToRemote(backupName, false, "", diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, false, backupConfigs, false, skipCheckPartsColumns, false, version, commandId) if createRemoteErr != nil { log.Errorf("create_remote %s return error: %v", backupName, createRemoteErr) createRemoteErrCount += 1 diff --git a/pkg/server/server.go b/pkg/server/server.go index ff241d9a..2c8fa77a 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -1054,6 +1054,7 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) } vars := mux.Vars(r) query := r.URL.Query() + deleteSource := false diffFrom := "" diffFromRemote := "" name := utils.CleanBackupNameRE.ReplaceAllString(vars["name"], "") @@ -1063,6 +1064,11 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) resume := false fullCommand := "upload" + if _, exist := query["delete-source"]; exist { + deleteSource = true + fullCommand = fmt.Sprintf("%s --deleteSource", fullCommand) + } + if df, exist := query["diff-from"]; exist { diffFrom = df[0] fullCommand = fmt.Sprintf("%s --diff-from=\"%s\"", fullCommand, diffFrom) @@ -1101,7 +1107,7 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) commandId, _ := status.Current.Start(fullCommand) err, _ := api.metrics.ExecuteWithMetrics("upload", 0, func() error { b := backup.NewBackuper(cfg) - return b.Upload(name, diffFrom, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, resume, commandId) + return b.Upload(name, deleteSource, diffFrom, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, resume, commandId) }) if err != nil { api.log.Errorf("Upload error: %v", err) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index a64ad31f..f6aff7cf 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1727,9 +1727,8 @@ func TestInnerTablesMaterializedView(t *testing.T) { r.Equal(uint64(100), rowCnt) r.NoError(ch.dropDatabase("test_mv")) - - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mv")) + // https://github.com/Altinity/clickhouse-backup/issues/777 + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_mv", "--delete-source", "--tables=test_mv.mv_with*,test_mv.dst*")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) From f863b3f87cad56b95b2fd0b5ba945d23a83bd4a7 Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 28 Mar 2024 00:32:25 +0400 Subject: [PATCH 50/80] added `clickhouse-server:24.3` to CI/CD pipelines --- .github/workflows/build.yaml | 2 ++ ChangeLog.md | 1 + test/integration/run.sh | 2 +- test/testflows/run.sh | 4 ++-- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index aba6d65f..90630ccc 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -121,6 +121,7 @@ jobs: - '22.8' - '23.3' - '23.8' + - '24.3' steps: - name: Setup golang id: setup-go @@ -234,6 +235,7 @@ jobs: - '22.8' - '23.3' - '23.8' + - '24.3' steps: - name: Checkout project uses: actions/checkout@v4 diff --git a/ChangeLog.md b/ChangeLog.md index a138e15f..e4f6a4c5 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -9,6 +9,7 @@ IMPROVEMENTS - added support of native Clickhouse incremental backup for `use_embedded_backup_restore: true` fix [735](https://github.com/Altinity/clickhouse-backup/issues/735) - added `GCS_CHUNK_SIZE` config parameter, try to speedup GCS upload fix [874](https://github.com/Altinity/clickhouse-backup/pull/874), thanks @dermasmid - switched to golang 1.22 +- added `clickhouse-server:24.3` to CI/CD pipelines BUG FIXES - continue `S3_MAX_PARTS_COUNT` default value from `2000` to `4000` to continue decrease memory usage for S3 diff --git a/test/integration/run.sh b/test/integration/run.sh index 6288336c..cf96bb5d 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -8,7 +8,7 @@ rm -rf "${CUR_DIR}/_coverage_/*" source "${CUR_DIR}/.env" -export CLICKHOUSE_VERSION=${CLICKHOUSE_VERSION:-23.8} +export CLICKHOUSE_VERSION=${CLICKHOUSE_VERSION:-24.3} if [[ "${CLICKHOUSE_VERSION}" =~ ^2[1-9]+ || "${CLICKHOUSE_VERSION}" == "head" ]]; then export CLICKHOUSE_IMAGE=${CLICKHOUSE_IMAGE:-clickhouse/clickhouse-server} else diff --git a/test/testflows/run.sh b/test/testflows/run.sh index e3208f8f..e37b7ac9 100755 --- a/test/testflows/run.sh +++ b/test/testflows/run.sh @@ -2,7 +2,7 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" mkdir -p "${CUR_DIR}/_coverage_/" rm -rf "${CUR_DIR}/_coverage_/*" source "${CUR_DIR}/.env" -export CLICKHOUSE_VERSION=${CLICKHOUSE_VERSION:-23.8} +export CLICKHOUSE_VERSION=${CLICKHOUSE_VERSION:-24.3} if [[ "${CLICKHOUSE_VERSION}" =~ ^2[1-9]+ || "${CLICKHOUSE_VERSION}" == "head" ]]; then export CLICKHOUSE_IMAGE=${CLICKHOUSE_IMAGE:-clickhouse/clickhouse-server} else @@ -10,4 +10,4 @@ else fi make clean build-race-docker python3 "${CUR_DIR}/clickhouse_backup/regression.py" --debug --only="${RUN_TESTS:-*}" -go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" \ No newline at end of file +go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" From 594da6db7a3c30ddea7b0d36d55f5b64271f7520 Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 28 Mar 2024 15:38:26 +0400 Subject: [PATCH 51/80] remove added `clickhouse-server:24.3` from CI/CD pipelines, wait when resolve https://github.com/ClickHouse/ClickHouse/issues/62018 --- .github/workflows/build.yaml | 6 ++++-- ChangeLog.md | 1 - pkg/backup/download.go | 3 +++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 90630ccc..4e0ccffd 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -121,7 +121,8 @@ jobs: - '22.8' - '23.3' - '23.8' - - '24.3' + # wait when resolve https://github.com/ClickHouse/ClickHouse/issues/62018 + # - '24.3' steps: - name: Setup golang id: setup-go @@ -235,7 +236,8 @@ jobs: - '22.8' - '23.3' - '23.8' - - '24.3' + # wait when resolve https://github.com/ClickHouse/ClickHouse/issues/62018 + # - '24.3' steps: - name: Checkout project uses: actions/checkout@v4 diff --git a/ChangeLog.md b/ChangeLog.md index e4f6a4c5..a138e15f 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -9,7 +9,6 @@ IMPROVEMENTS - added support of native Clickhouse incremental backup for `use_embedded_backup_restore: true` fix [735](https://github.com/Altinity/clickhouse-backup/issues/735) - added `GCS_CHUNK_SIZE` config parameter, try to speedup GCS upload fix [874](https://github.com/Altinity/clickhouse-backup/pull/874), thanks @dermasmid - switched to golang 1.22 -- added `clickhouse-server:24.3` to CI/CD pipelines BUG FIXES - continue `S3_MAX_PARTS_COUNT` default value from `2000` to `4000` to continue decrease memory usage for S3 diff --git a/pkg/backup/download.go b/pkg/backup/download.go index 80399a07..426f6258 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -525,6 +525,9 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, // downloadMissedInnerTablesMetadata - download, missed .inner. tables if materialized view query not contains `TO db.table` clause, https://github.com/Altinity/clickhouse-backup/issues/765 // @todo think about parallel download if sequentially will slow func (b *Backuper) downloadMissedInnerTablesMetadata(ctx context.Context, backupName string, metadataSize uint64, tablesForDownload []metadata.TableTitle, tableMetadataAfterDownload []*metadata.TableMetadata, disks []clickhouse.Disk, schemaOnly bool, partitions []string, log *apexLog.Entry) ([]*metadata.TableMetadata, []metadata.TableTitle, uint64, error) { + if b.isEmbedded { + return tableMetadataAfterDownload, tablesForDownload, metadataSize, nil + } for _, t := range tableMetadataAfterDownload { if t == nil { continue From fc8ab15bba1c7c7a6bbfb83dced97e2839ed97fb Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 29 Mar 2024 07:35:06 +0400 Subject: [PATCH 52/80] added `clickhouse-server:24.3` to CI/CD pipelines, fix https://github.com/ClickHouse/ClickHouse/issues/62018 --- .github/workflows/build.yaml | 6 ++---- ChangeLog.md | 1 + pkg/clickhouse/clickhouse.go | 12 +++++++++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 4e0ccffd..90630ccc 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -121,8 +121,7 @@ jobs: - '22.8' - '23.3' - '23.8' - # wait when resolve https://github.com/ClickHouse/ClickHouse/issues/62018 - # - '24.3' + - '24.3' steps: - name: Setup golang id: setup-go @@ -236,8 +235,7 @@ jobs: - '22.8' - '23.3' - '23.8' - # wait when resolve https://github.com/ClickHouse/ClickHouse/issues/62018 - # - '24.3' + - '24.3' steps: - name: Checkout project uses: actions/checkout@v4 diff --git a/ChangeLog.md b/ChangeLog.md index a138e15f..2c984a56 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -9,6 +9,7 @@ IMPROVEMENTS - added support of native Clickhouse incremental backup for `use_embedded_backup_restore: true` fix [735](https://github.com/Altinity/clickhouse-backup/issues/735) - added `GCS_CHUNK_SIZE` config parameter, try to speedup GCS upload fix [874](https://github.com/Altinity/clickhouse-backup/pull/874), thanks @dermasmid - switched to golang 1.22 +- added `clickhouse/clickhouse-server:24.3` to CI/CD BUG FIXES - continue `S3_MAX_PARTS_COUNT` default value from `2000` to `4000` to continue decrease memory usage for S3 diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 92a00b03..b1f84e6e 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -251,13 +251,15 @@ func (ch *ClickHouse) getDisksFromSystemDisks(ctx context.Context) ([]Disk, erro return nil, ctx.Err() default: type DiskFields struct { - DiskTypePresent uint64 `ch:"is_disk_type_present"` - FreeSpacePresent uint64 `ch:"is_free_space_present"` - StoragePolicyPresent uint64 `ch:"is_storage_policy_present"` + DiskTypePresent uint64 `ch:"is_disk_type_present"` + ObjectStorageTypePresent uint64 `ch:"is_object_storage_type_present"` + FreeSpacePresent uint64 `ch:"is_free_space_present"` + StoragePolicyPresent uint64 `ch:"is_storage_policy_present"` } diskFields := make([]DiskFields, 0) if err := ch.SelectContext(ctx, &diskFields, "SELECT countIf(name='type') AS is_disk_type_present, "+ + "countIf(name='object_storage_type') AS is_object_storage_type_present, "+ "countIf(name='free_space') AS is_free_space_present, "+ "countIf(name='disks') AS is_storage_policy_present "+ "FROM system.columns WHERE database='system' AND table IN ('disks','storage_policies') ", @@ -268,6 +270,10 @@ func (ch *ClickHouse) getDisksFromSystemDisks(ctx context.Context) ([]Disk, erro if len(diskFields) > 0 && diskFields[0].DiskTypePresent > 0 { diskTypeSQL = "any(d.type)" } + if len(diskFields) > 0 && diskFields[0].ObjectStorageTypePresent > 0 { + diskTypeSQL = "any(lower(if(d.type='ObjectStorage',d.object_storage_type,d.type)))" + } + diskFreeSpaceSQL := "toUInt64(0)" if len(diskFields) > 0 && diskFields[0].FreeSpacePresent > 0 { diskFreeSpaceSQL = "min(d.free_space)" From b6b5fc46f217424c7ddc4d435fbab1f36ec54804 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 29 Mar 2024 21:55:29 +0400 Subject: [PATCH 53/80] fix testflows for 24.3, workaround for https://github.com/ClickHouse/ClickHouse/issues/62092 --- test/testflows/clickhouse_backup/tests/cloud_storage.py | 4 ++-- test/testflows/clickhouse_backup/tests/steps.py | 4 ++-- test/testflows/clickhouse_backup/tests/views.py | 6 +++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/test/testflows/clickhouse_backup/tests/cloud_storage.py b/test/testflows/clickhouse_backup/tests/cloud_storage.py index f5449358..4c636c5b 100644 --- a/test/testflows/clickhouse_backup/tests/cloud_storage.py +++ b/test/testflows/clickhouse_backup/tests/cloud_storage.py @@ -48,7 +48,7 @@ def incremental_remote_storage(self): time.sleep(10) with And("I save table contents"): - contents_before = clickhouse.query(f"SELECT * FROM {table_name}").output.split('\n') + contents_before = clickhouse.query(f"SELECT * FROM {table_name} FORMAT TSVRaw").output.split('\n') with And("I do create_remote --diff-from-remote"): backup.cmd(f"clickhouse-backup create_remote --diff-from-remote={table_name}_1 {table_name}_2") @@ -72,7 +72,7 @@ def incremental_remote_storage(self): assert table_name in r, error() with And("I check table contents are restored"): - contents_after = clickhouse.query(f"SELECT * FROM {table_name} ORDER BY OrderBy").output.split('\n') + contents_after = clickhouse.query(f"SELECT * FROM {table_name} ORDER BY OrderBy FORMAT TSVRaw").output.split('\n') for line in contents_after: assert line in contents_before, error() diff --git a/test/testflows/clickhouse_backup/tests/steps.py b/test/testflows/clickhouse_backup/tests/steps.py index 94695c36..f6cea81d 100644 --- a/test/testflows/clickhouse_backup/tests/steps.py +++ b/test/testflows/clickhouse_backup/tests/steps.py @@ -117,8 +117,8 @@ def populate_table(self, node, table_name, columns, database="default", size=10, str_random_schema = ", ".join(random_schema) str_insert_columns = ", ".join(insert_columns) - - node.query(f"INSERT INTO {database}.{table_name} ({str_insert_columns}) SELECT * FROM generateRandom('{str_random_schema}', NULL, 10, 2) LIMIT {size}") + # @todo return NULL to random_seed generateRandom parameter when resolve https://github.com/ClickHouse/ClickHouse/issues/62092 + node.query(f"INSERT INTO {database}.{table_name} ({str_insert_columns}) SELECT * FROM generateRandom('{str_random_schema}', {random.randint(1,10000)}, 10, 2) LIMIT {size}") @TestStep(Given) diff --git a/test/testflows/clickhouse_backup/tests/views.py b/test/testflows/clickhouse_backup/tests/views.py index 2cc97eb9..888f1194 100644 --- a/test/testflows/clickhouse_backup/tests/views.py +++ b/test/testflows/clickhouse_backup/tests/views.py @@ -97,7 +97,11 @@ def window_view(self): """Test that window view is handled properly by clickhouse-backup. """ base_table_name = self.context.views_base_name - if os.environ.get('CLICKHOUSE_VERSION', '22.8') >= '22.6': + if os.environ.get('CLICKHOUSE_VERSION', '24.3') >= '24.3': + create_query = f"CREATE WINDOW VIEW {base_table_name}_wview " \ + f"ENGINE AggregatingMergeTree() ORDER BY t AS SELECT count(Version) v, tumbleStart(w_id) t " \ + f"FROM default.{base_table_name} GROUP BY tumble(Time, INTERVAL '10' SECOND) AS w_id SETTINGS allow_experimental_analyzer=0" + elif os.environ.get('CLICKHOUSE_VERSION', '24.3') >= '22.6': create_query = f"CREATE WINDOW VIEW {base_table_name}_wview " \ f"ENGINE AggregatingMergeTree() ORDER BY t AS SELECT count(Version) v, tumbleStart(w_id) t " \ f"FROM default.{base_table_name} GROUP BY tumble(Time, INTERVAL '10' SECOND) AS w_id" From 8bce1e7e841edb94db7de24ff518c6c43ea73d9c Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 30 Mar 2024 22:06:43 +0400 Subject: [PATCH 54/80] fix TestRBAC for 24.3 --- pkg/backup/create.go | 24 +++++++++++++----------- pkg/backup/download.go | 2 +- pkg/backup/upload.go | 2 +- pkg/clickhouse/clickhouse.go | 2 +- 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 8eec9a29..26e6aeda 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -332,19 +332,21 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, baseBac if !schemaOnly && !doBackupData { backupDataSize = append(backupDataSize, clickhouse.BackupDataSize{Size: 0}) } - l := 0 - for _, table := range tables { - if !table.Skip { - l += 1 - } - } - if l == 0 && (schemaOnly || doBackupData) { - return fmt.Errorf("`use_embedded_backup_restore: true` not found tables for backup, check your parameter --tables=%v", tablePattern) - } - tablesTitle := make([]metadata.TableTitle, l) + var tablesTitle []metadata.TableTitle if schemaOnly || doBackupData { - if _, isBackupDiskExists := diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk]; !isBackupDiskExists && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + l := 0 + for _, table := range tables { + if !table.Skip { + l += 1 + } + } + if l == 0 { + return fmt.Errorf("`use_embedded_backup_restore: true` not found tables for backup, check your parameter --tables=%v", tablePattern) + } + tablesTitle = make([]metadata.TableTitle, l) + + if _, isBackupDiskExists := diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk]; b.cfg.ClickHouse.EmbeddedBackupDisk != "" && !isBackupDiskExists { return fmt.Errorf("backup disk `%s` not exists in system.disks", b.cfg.ClickHouse.EmbeddedBackupDisk) } if b.cfg.ClickHouse.EmbeddedBackupDisk == "" { diff --git a/pkg/backup/download.go b/pkg/backup/download.go index 426f6258..babba1b0 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -262,7 +262,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ backupMetadata.DataSize = dataSize backupMetadata.MetadataSize = metadataSize - if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" && backupMetadata.Tables != nil && len(backupMetadata.Tables) > 0 { localClickHouseBackupFile := path.Join(b.EmbeddedBackupDataPath, backupName, ".backup") remoteClickHouseBackupFile := path.Join(backupName, ".backup") if err = b.downloadSingleBackupFile(ctx, remoteClickHouseBackupFile, localClickHouseBackupFile, disks); err != nil { diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index d0f5f3ef..0e79649c 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -217,7 +217,7 @@ func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFr return fmt.Errorf("can't upload %s: %v", remoteBackupMetaFile, err) } } - if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { + if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" && backupMetadata.Tables != nil && len(backupMetadata.Tables) > 0 { localClickHouseBackupFile := path.Join(b.EmbeddedBackupDataPath, backupName, ".backup") remoteClickHouseBackupFile := path.Join(backupName, ".backup") if err = b.uploadSingleBackupFile(ctx, localClickHouseBackupFile, remoteClickHouseBackupFile); err != nil { diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index b1f84e6e..f1274442 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -656,7 +656,7 @@ func (ch *ClickHouse) GetVersion(ctx context.Context) (int, error) { func (ch *ClickHouse) GetVersionDescribe(ctx context.Context) string { var result string - query := "SELECT value FROM `system`.`build_options` where name='VERSION_DESCRIBE'" + query := "SELECT value FROM `system`.`build_options` WHERE name='VERSION_DESCRIBE'" if err := ch.SelectSingleRow(ctx, &result, query); err != nil { return "" } From 6b233744ad87c07060207b05c43ab19398c4730e Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 31 Mar 2024 13:27:57 +0400 Subject: [PATCH 55/80] fix TestIntegrationAzure for 24.3 --- pkg/backup/backuper.go | 2 +- pkg/backup/restore.go | 10 ++++++++-- pkg/storage/object_disk/object_disk.go | 10 +++++----- test/integration/install_delve.sh | 1 + 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index 9cae517f..06c9876e 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -161,7 +161,7 @@ func (b *Backuper) populateBackupShardField(ctx context.Context, tables []clickh } func (b *Backuper) isDiskTypeObject(diskType string) bool { - return diskType == "s3" || diskType == "azure_blob_storage" + return diskType == "s3" || diskType == "azure_blob_storage" || diskType == "azure" } func (b *Backuper) isDiskTypeEncryptedObject(disk clickhouse.Disk, disks []clickhouse.Disk) bool { diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index e1a3b42a..b5be0bbb 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -543,7 +543,6 @@ func (b *Backuper) RestoreSchema(ctx context.Context, backupName string, tablesF "operation": "restore_schema", }) startRestoreSchema := time.Now() - if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version, log); dropErr != nil { return dropErr } @@ -566,6 +565,13 @@ var emptyReplicatedMergeTreeRE = regexp.MustCompile(`(?m)Replicated(MergeTree|Re func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, tablesForRestore ListOfTables, version int) error { var err error + if tablesForRestore == nil || len(tablesForRestore) == 0 { + if !b.cfg.General.AllowEmptyBackups { + return fmt.Errorf("no tables for restore") + } + b.log.Warnf("no tables for restore in embeddded backup %s/metadata.json", backupName) + return nil + } if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { err = b.fixEmbeddedMetadataLocal(ctx, backupName, version) } else { @@ -1083,7 +1089,7 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin } else if b.cfg.General.RemoteStorage == "gcs" && (diskType == "s3" || diskType == "encrypted") { srcBucket = b.cfg.GCS.Bucket srcKey = path.Join(b.cfg.GCS.ObjectDiskPath, srcBackupName, srcDiskName, storageObject.ObjectRelativePath) - } else if b.cfg.General.RemoteStorage == "azblob" && (diskType == "azure_blob_storage" || diskType == "encrypted") { + } else if b.cfg.General.RemoteStorage == "azblob" && (diskType == "azure_blob_storage" || diskType == "azure" || diskType == "encrypted") { srcBucket = b.cfg.AzureBlob.Container srcKey = path.Join(b.cfg.AzureBlob.ObjectDiskPath, srcBackupName, srcDiskName, storageObject.ObjectRelativePath) } else { diff --git a/pkg/storage/object_disk/object_disk.go b/pkg/storage/object_disk/object_disk.go index 623c5124..29621b44 100644 --- a/pkg/storage/object_disk/object_disk.go +++ b/pkg/storage/object_disk/object_disk.go @@ -204,7 +204,7 @@ func (c *ObjectStorageConnection) GetRemoteStorage() storage.RemoteStorage { switch c.Type { case "s3": return c.S3 - case "azure_blob_storage": + case "azure", "azure_blob_storage": return c.AzureBlob } apexLog.Fatalf("invalid ObjectStorageConnection.type %s", c.Type) @@ -215,7 +215,7 @@ func (c *ObjectStorageConnection) GetRemoteBucket() string { switch c.Type { case "s3": return c.S3.Config.Bucket - case "azure_blob_storage": + case "azure", "azure_blob_storage": return c.AzureBlob.Config.Container } apexLog.Fatalf("invalid ObjectStorageConnection.type %s", c.Type) @@ -226,7 +226,7 @@ func (c *ObjectStorageConnection) GetRemotePath() string { switch c.Type { case "s3": return c.S3.Config.Path - case "azure_blob_storage": + case "azure", "azure_blob_storage": return c.AzureBlob.Config.Path } apexLog.Fatalf("invalid ObjectStorageConnection.type %s", c.Type) @@ -356,7 +356,7 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) e } DisksCredentials.Store(diskName, creds) break - case "azure_blob_storage": + case "azure", "azure_blob_storage": creds := ObjectStorageCredentials{ Type: "azblob", } @@ -427,7 +427,7 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf if !exists { return nil, fmt.Errorf("%s is not presnet in object_disk.SystemDisks", diskName) } - if disk.Type != "s3" && disk.Type != "s3_plain" && disk.Type != "azure_blob_storage" && disk.Type != "encrypted" { + if disk.Type != "s3" && disk.Type != "s3_plain" && disk.Type != "azure_blob_storage" && disk.Type != "azure" && disk.Type != "encrypted" { return nil, fmt.Errorf("%s have unsupported type %s", diskName, disk.Type) } connection.MetadataPath = disk.Path diff --git a/test/integration/install_delve.sh b/test/integration/install_delve.sh index 8a2263e0..ad8fb5f6 100755 --- a/test/integration/install_delve.sh +++ b/test/integration/install_delve.sh @@ -18,6 +18,7 @@ ln -nsfv /usr/lib/go-1.22/bin/go /usr/bin/go CGO_ENABLED=0 GO111MODULE=on go install -ldflags "-s -w -extldflags '-static'" github.com/go-delve/delve/cmd/dlv@latest # GO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -tags osusergo,netgo -gcflags "all=-N -l" -ldflags "-extldflags '-static' -X 'main.version=debug'" -o build/linux/amd64/clickhouse-backup ./cmd/clickhouse-backup +# /root/go/bin/dlv --listen=:40001 --headless=true --api-version=2 --accept-multiclient exec /bin/clickhouse-backup -- -c /etc/clickhouse-backup/config-azblob.yml restore --schema TestIntegrationAzure_full_6516689450475708573 # /root/go/bin/dlv --listen=:40001 --headless=true --api-version=2 --accept-multiclient exec /bin/clickhouse-backup -- -c /etc/clickhouse-server/config.d/ch-backup.yaml upload debug_upload --table # USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLES=*.test_memory /root/go/bin/dlv --listen=:40001 --headless=true --api-version=2 --accept-multiclient exec /bin/clickhouse-backup -- -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup # /root/go/bin/dlv --listen=:40001 --headless=true --api-version=2 --accept-multiclient exec /bin/clickhouse-backup -- download test_rbac_backup From cbef4461032159c4caea295d7b815879b3c31f40 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 31 Mar 2024 14:44:36 +0400 Subject: [PATCH 56/80] explicitly delete local backup during upload, fix https://github.com/Altinity/clickhouse-backup/issues/777 --- ChangeLog.md | 2 +- cmd/clickhouse-backup/main.go | 4 ++-- pkg/backup/upload.go | 21 ++++++++++++++++++--- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index 2c984a56..c0f658ac 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,6 +1,6 @@ # v2.5.0 (not released yet) IMPROVEMENTS -- added `--delete-source` parameter for `upload` and `create_remote` commands to explicitly delete local backup after upload, fix [777](https://github.com/Altinity/clickhouse-backup/issues/777) +- added `--delete-source` parameter for `upload` and `create_remote` commands to explicitly delete local backup during upload, fix [777](https://github.com/Altinity/clickhouse-backup/issues/777) - added support for `--env ENV_NAME=value` cli parameter for allow dynamically override any config parameter, fix [821](https://github.com/Altinity/clickhouse-backup/issues/821) - added support for `use_embedded_backup_restore: true` with empty `embedded_backup_disk` value, tested on S3/GCS over S3/AzureBlobStorage, fix [695](https://github.com/Altinity/clickhouse-backup/issues/695) - `--rbac, --rbac-only, --configs, --configs-only` now works with `use_embedded_backup_restore: true` diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index ea4f02aa..fb8b23ce 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -224,7 +224,7 @@ func main() { cli.BoolFlag{ Name: "delete, delete-source, delete-local", Hidden: false, - Usage: "explicitly delete local backup after upload", + Usage: "explicitly delete local backup during upload", }, ), }, @@ -275,7 +275,7 @@ func main() { cli.BoolFlag{ Name: "delete, delete-source, delete-local", Hidden: false, - Usage: "explicitly delete local backup after upload", + Usage: "explicitly delete local backup during upload", }, ), }, diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 0e79649c..8d9ee267 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -153,7 +153,7 @@ func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFr if !schemaOnly && (!b.isEmbedded || b.cfg.ClickHouse.EmbeddedBackupDisk != "") { var files map[string][]string var err error - files, uploadedBytes, err = b.uploadTableData(uploadCtx, backupName, tablesForUpload[idx]) + files, uploadedBytes, err = b.uploadTableData(uploadCtx, backupName, deleteSource, tablesForUpload[idx]) if err != nil { return err } @@ -488,7 +488,7 @@ func (b *Backuper) uploadBackupRelatedDir(ctx context.Context, localBackupRelate return uint64(remoteUploaded.Size()), nil } -func (b *Backuper) uploadTableData(ctx context.Context, backupName string, table metadata.TableMetadata) (map[string][]string, int64, error) { +func (b *Backuper) uploadTableData(ctx context.Context, backupName string, deleteSource bool, table metadata.TableMetadata) (map[string][]string, int64, error) { dbAndTablePath := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) uploadedFiles := map[string][]string{} capacity := 0 @@ -547,7 +547,14 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, table b.resumableState.AppendToState(remotePathFull, uploadPathBytes) } } - log.Debugf("finish upload %d files to %s", len(partFiles), remotePath) + // https://github.com/Altinity/clickhouse-backup/issues/777 + if deleteSource { + for _, f := range partFiles { + if err := os.Remove(path.Join(backupPath, f)); err != nil { + return fmt.Errorf("can't remove %s, %v", path.Join(backupPath, f), err) + } + } + } return nil }) } else { @@ -585,6 +592,14 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, table if b.resume { b.resumableState.AppendToState(remoteDataFile, remoteFile.Size()) } + // https://github.com/Altinity/clickhouse-backup/issues/777 + if deleteSource { + for _, f := range localFiles { + if err = os.Remove(path.Join(backupPath, f)); err != nil { + return fmt.Errorf("can't remove %s, %v", path.Join(backupPath, f), err) + } + } + } log.Debugf("finish upload to %s", remoteDataFile) return nil }) From 453ee6530baaf7b3b78f87281334bacb43470bc2 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 31 Mar 2024 20:13:09 +0400 Subject: [PATCH 57/80] added `--remote-backup` cli parameter to `tables` command and `GET /backup/table`, fix https://github.com/Altinity/clickhouse-backup/issues/778 --- ChangeLog.md | 1 + ReadMe.md | 4 +- cmd/clickhouse-backup/main.go | 9 ++- pkg/backup/list.go | 112 +++++++++++++++++++++++++-- pkg/server/server.go | 12 ++- test/integration/integration_test.go | 41 +++++++++- 6 files changed, 164 insertions(+), 15 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index c0f658ac..c6a78ed6 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -8,6 +8,7 @@ IMPROVEMENTS - added `--diff-from-remote` parameter for `create` command, will copy only new data parts object disk data, also allows to download properly object disk data from required backup during `restore`, fix [865](https://github.com/Altinity/clickhouse-backup/issues/865) - added support of native Clickhouse incremental backup for `use_embedded_backup_restore: true` fix [735](https://github.com/Altinity/clickhouse-backup/issues/735) - added `GCS_CHUNK_SIZE` config parameter, try to speedup GCS upload fix [874](https://github.com/Altinity/clickhouse-backup/pull/874), thanks @dermasmid +- added `--remote-backup` cli parameter to `tables` command and `GET /backup/table`, fix [778](https://github.com/Altinity/clickhouse-backup/issues/778) - switched to golang 1.22 - added `clickhouse/clickhouse-server:24.3` to CI/CD diff --git a/ReadMe.md b/ReadMe.md index fb36f3a4..bad6388e 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -655,13 +655,15 @@ Kill selected command from `GET /backup/actions` command list, kill process shou Print list of tables: `curl -s localhost:7171/backup/tables | jq .`, exclude pattern matched tables from `skip_tables` configuration parameters -- Optional query argument `table` works the same as the `--table value` CLI argument. +- Optional query argument `table` works the same as the `--table=pattern` CLI argument. +- Optional query argument `remote_backup`works the same as `--remote-backup=name` CLI argument. ### GET /backup/tables/all Print list of tables: `curl -s localhost:7171/backup/tables/all | jq .`, ignore `skip_tables` configuration parameters. - Optional query argument `table` works the same as the `--table value` CLI argument. +- Optional query argument `remote_backup`works the same as `--remote-backup=name` CLI argument. ### POST /backup/create diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index fb8b23ce..8a04dbd9 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -71,10 +71,10 @@ func main() { { Name: "tables", Usage: "List of tables, exclude skip_tables", - UsageText: "clickhouse-backup tables [-t, --tables=.
]] [--all]", + UsageText: "clickhouse-backup tables [--tables=.
] [--remote-backup=] [--all]", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.PrintTables(c.Bool("all"), c.String("table")) + return b.PrintTables(c.Bool("all"), c.String("table"), c.String("remote-backup")) }, Flags: append(cliapp.Flags, cli.BoolFlag{ @@ -87,6 +87,11 @@ func main() { Hidden: false, Usage: "List tables only match with table name patterns, separated by comma, allow ? and * as wildcard", }, + cli.StringFlag{ + Name: "remote-backup", + Hidden: false, + Usage: "List tables from remote backup", + }, ), }, { diff --git a/pkg/backup/list.go b/pkg/backup/list.go index c83aea6c..5bf13d78 100644 --- a/pkg/backup/list.go +++ b/pkg/backup/list.go @@ -8,6 +8,7 @@ import ( "io" "os" "path" + "path/filepath" "sort" "strings" "text/tabwriter" @@ -46,7 +47,7 @@ func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) fmt.Println(backupList[len(backupList)-1].BackupName) case "penult", "prev", "previous", "p": if len(backupList) < 2 { - return fmt.Errorf("no penult backup is found") + return fmt.Errorf("no previous backup is found") } fmt.Println(backupList[len(backupList)-2].BackupName) case "all", "": @@ -402,14 +403,33 @@ func (b *Backuper) GetTables(ctx context.Context, tablePattern string) ([]clickh } // PrintTables - print all tables suitable for backup -func (b *Backuper) PrintTables(printAll bool, tablePattern string) error { +func (b *Backuper) PrintTables(printAll bool, tablePattern, remoteBackup string) error { + var err error ctx, cancel, _ := status.Current.GetContextWithCancel(status.NotFromAPI) defer cancel() - if err := b.ch.Connect(); err != nil { + if err = b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) } defer b.ch.Close() - log := b.log.WithField("logger", "PrintTables") + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', tabwriter.DiscardEmptyColumns) + if remoteBackup == "" { + if err = b.printTablesLocal(ctx, tablePattern, printAll, w); err != nil { + return err + } + } else { + if err = b.printTablesRemote(ctx, remoteBackup, tablePattern, printAll, w); err != nil { + return err + } + + } + if err := w.Flush(); err != nil { + b.log.Errorf("can't flush tabular writer error: %v", err) + } + return nil +} + +func (b *Backuper) printTablesLocal(ctx context.Context, tablePattern string, printAll bool, w *tabwriter.Writer) error { + log := b.log.WithField("logger", "PrintTablesLocal") allTables, err := b.GetTables(ctx, tablePattern) if err != nil { return err @@ -418,7 +438,6 @@ func (b *Backuper) PrintTables(printAll bool, tablePattern string) error { if err != nil { return err } - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', tabwriter.DiscardEmptyColumns) for _, table := range allTables { if table.Skip && !printAll { continue @@ -437,8 +456,87 @@ func (b *Backuper) PrintTables(printAll bool, tablePattern string) error { log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } - if err := w.Flush(); err != nil { - log.Errorf("can't flush tabular writer error: %v", err) + return nil +} + +func (b *Backuper) GetTablesRemote(ctx context.Context, backupName string, tablePattern string) ([]clickhouse.Table, error) { + if !b.ch.IsOpen { + if err := b.ch.Connect(); err != nil { + return []clickhouse.Table{}, fmt.Errorf("can't connect to clickhouse: %v", err) + } + defer b.ch.Close() + } + if b.cfg.General.RemoteStorage == "none" || b.cfg.General.RemoteStorage == "custom" { + return nil, fmt.Errorf("GetTablesRemote does not support `none` and `custom` remote storage") + } + if b.dst == nil { + bd, err := storage.NewBackupDestination(ctx, b.cfg, b.ch, false, "") + if err != nil { + return nil, err + } + err = bd.Connect(ctx) + if err != nil { + return nil, fmt.Errorf("can't connect to remote storage: %v", err) + } + defer func() { + if err := bd.Close(ctx); err != nil { + b.log.Warnf("can't close BackupDestination error: %v", err) + } + }() + + b.dst = bd } + backupList, err := b.dst.BackupList(ctx, true, backupName) + if err != nil { + return nil, err + } + + var tables []clickhouse.Table + tablePatterns := []string{"*"} + + if tablePattern != "" { + tablePatterns = strings.Split(tablePattern, ",") + } + + for _, remoteBackup := range backupList { + if remoteBackup.BackupName == backupName { + for _, t := range remoteBackup.Tables { + isInformationSchema := IsInformationSchema(t.Database) + tableName := fmt.Sprintf("%s.%s", t.Database, t.Table) + shallSkipped := b.shouldSkipByTableName(tableName) + matched := false + for _, p := range tablePatterns { + if matched, _ = filepath.Match(strings.Trim(p, " \t\r\n"), tableName); matched { + break + } + } + tables = append(tables, clickhouse.Table{ + Database: t.Database, + Name: t.Table, + Skip: !matched || (isInformationSchema || shallSkipped), + }) + } + } + } + + return tables, nil +} + +// printTablesRemote https://github.com/Altinity/clickhouse-backup/issues/778 +func (b *Backuper) printTablesRemote(ctx context.Context, backupName string, tablePattern string, printAll bool, w *tabwriter.Writer) error { + tables, err := b.GetTablesRemote(ctx, backupName, tablePattern) + if err != nil { + return err + } + + for _, t := range tables { + if t.Skip && !printAll { + continue + } + if bytes, err := fmt.Fprintf(w, "%s.%s\tskip=%v\n", t.Database, t.Name, t.Skip); err != nil { + b.log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + } + } + return nil } diff --git a/pkg/server/server.go b/pkg/server/server.go index 2c8fa77a..4b4b3174 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -656,16 +656,22 @@ func (api *APIServer) httpTablesHandler(w http.ResponseWriter, r *http.Request) } b := backup.NewBackuper(cfg) q := r.URL.Query() - tables, err := b.GetTables(context.Background(), q.Get("table")) + var tables []clickhouse.Table + // https://github.com/Altinity/clickhouse-backup/issues/778 + if q.Get("remote_backup") != "" { + tables, err = b.GetTablesRemote(context.Background(), q.Get("remote_backup"), q.Get("table")) + } else { + tables, err = b.GetTables(context.Background(), q.Get("table")) + } if err != nil { api.writeError(w, http.StatusInternalServerError, "tables", err) return } - if r.URL.Path != "/backup/tables/all" { - tables := api.getTablesWithSkip(tables) + if r.URL.Path == "/backup/tables/all" { api.sendJSONEachRow(w, http.StatusOK, tables) return } + tables = api.getTablesWithSkip(tables) api.sendJSONEachRow(w, http.StatusOK, tables) } diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index f6aff7cf..e08757e5 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -692,11 +692,13 @@ func TestServerAPI(t *testing.T) { testAPIBackupTables(r) + testAPIBackupUpload(r) + + testAPIBackupTablesRemote(r) + log.Info("Check /backup/actions") ch.queryWithNoError(r, "SELECT count() FROM system.backup_actions") - testAPIBackupUpload(r) - testAPIBackupList(t, r) testAPIDeleteLocalDownloadRestore(r) @@ -997,6 +999,25 @@ func testAPIBackupTables(r *require.Assertions) { } } +func testAPIBackupTablesRemote(r *require.Assertions) { + + log.Info("Check /backup/tables?remote_backup=z_backup_1") + out, err := dockerExecOut( + "clickhouse-backup", + "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables?remote_backup=z_backup_1\"", + ) + log.Debug(out) + r.NoError(err) + r.Contains(out, "long_schema") + r.NotContains(out, "system") + r.NotContains(out, "Connection refused") + r.NotContains(out, "another operation is currently running") + r.NotContains(out, "\"status\":\"error\"") + r.NotContains(out, "INFORMATION_SCHEMA") + r.NotContains(out, "information_schema") + +} + func testAPIBackupCreate(r *require.Assertions) { log.Info("Check /backup/create") out, err := dockerExecOut( @@ -1277,8 +1298,24 @@ func TestTablePatterns(t *testing.T) { generateTestData(t, r, ch, "S3", defaultTestData) if createPattern { r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) + out, err := dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName) + r.NoError(err) + r.Contains(out, dbNameOrdinaryTest) + r.NotContains(out, dbNameAtomicTest) + out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", "--remote-backup", testBackupName, "--tables", " "+dbNameOrdinaryTest+".*", testBackupName) + r.NoError(err) + r.Contains(out, dbNameOrdinaryTest) + r.NotContains(out, dbNameAtomicTest) } else { r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", testBackupName)) + out, err := dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", testBackupName) + r.NoError(err) + r.Contains(out, dbNameOrdinaryTest) + r.Contains(out, dbNameAtomicTest) + out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", "--remote-backup", testBackupName, testBackupName) + r.NoError(err) + r.Contains(out, dbNameOrdinaryTest) + r.Contains(out, dbNameAtomicTest) } r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", testBackupName)) From a9dcc5031da99efaaea4bfe9f9abb76dfb2c4c89 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 3 Apr 2024 20:47:36 +0400 Subject: [PATCH 58/80] - added `rbac_conflict_resolution: recreate` option for RBAC object name conflicts during restore, fix https://github.com/Altinity/clickhouse-backup/issues/851 --- ChangeLog.md | 2 + Dockerfile | 9 +- ReadMe.md | 10 +- cmd/clickhouse-backup/main.go | 2 +- pkg/backup/create.go | 5 +- pkg/backup/restore.go | 342 +++++++++++++++++++++++---- pkg/backup/restore_remote.go | 4 +- pkg/backup/restore_test.go | 85 +++++++ pkg/clickhouse/structs.go | 9 + pkg/config/config.go | 4 + pkg/keeper/keeper.go | 48 +++- pkg/server/server.go | 8 +- test/integration/integration_test.go | 37 +-- 13 files changed, 489 insertions(+), 76 deletions(-) create mode 100644 pkg/backup/restore_test.go diff --git a/ChangeLog.md b/ChangeLog.md index c6a78ed6..04b788a8 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -9,6 +9,8 @@ IMPROVEMENTS - added support of native Clickhouse incremental backup for `use_embedded_backup_restore: true` fix [735](https://github.com/Altinity/clickhouse-backup/issues/735) - added `GCS_CHUNK_SIZE` config parameter, try to speedup GCS upload fix [874](https://github.com/Altinity/clickhouse-backup/pull/874), thanks @dermasmid - added `--remote-backup` cli parameter to `tables` command and `GET /backup/table`, fix [778](https://github.com/Altinity/clickhouse-backup/issues/778) +- added `rbac_always_backup: true` option to default config, will create backup for RBAC objects automatically, restore still require `--rbac` to avoid destructive actions, fix [793](https://github.com/Altinity/clickhouse-backup/issues/793) +- added `rbac_conflict_resolution: recreate` option for RBAC object name conflicts during restore, fix [851](https://github.com/Altinity/clickhouse-backup/issues/851) - switched to golang 1.22 - added `clickhouse/clickhouse-server:24.3` to CI/CD diff --git a/Dockerfile b/Dockerfile index 85fbca81..13599b21 100644 --- a/Dockerfile +++ b/Dockerfile @@ -76,16 +76,17 @@ COPY --from=builder-fips /src/build/ /src/build/ CMD /src/build/${TARGETPLATFORM}/clickhouse-backup-fips --help -FROM alpine:3.18 AS image_short +FROM alpine:3.19 AS image_short ARG TARGETPLATFORM MAINTAINER Eugene Klimov RUN addgroup -S -g 101 clickhouse \ && adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse server" -u 101 clickhouse -RUN apk update && apk add --no-cache ca-certificates tzdata bash curl && update-ca-certificates +RUN apk update && apk add --no-cache ca-certificates tzdata bash curl libcap-setcap && update-ca-certificates COPY entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh COPY build/${TARGETPLATFORM}/clickhouse-backup /bin/clickhouse-backup RUN chmod +x /bin/clickhouse-backup +RUN setcap cap_sys_nice=+ep /bin/clickhouse-backup # USER clickhouse ENTRYPOINT ["/entrypoint.sh"] CMD [ "/bin/clickhouse-backup", "--help" ] @@ -96,6 +97,7 @@ ARG TARGETPLATFORM MAINTAINER Eugene Klimov COPY build/${TARGETPLATFORM}/clickhouse-backup-fips /bin/clickhouse-backup RUN chmod +x /bin/clickhouse-backup +RUN setcap cap_sys_nice=+ep /bin/clickhouse-backup FROM ${CLICKHOUSE_IMAGE}:${CLICKHOUSE_VERSION} AS image_full @@ -106,7 +108,7 @@ RUN apt-get update && apt-get install -y gpg xxd bsdmainutils parallel && wget - echo "deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main" > /etc/apt/sources.list.d/kopia.list && \ wget -c "https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)" -O /usr/bin/yq && chmod +x /usr/bin/yq && \ apt-get update -y && \ - apt-get install -y ca-certificates tzdata bash curl restic rsync rclone jq gpg kopia && \ + apt-get install -y ca-certificates tzdata bash curl restic rsync rclone jq gpg kopia libcap2-bin && \ update-ca-certificates && \ rm -rf /var/lib/apt/lists/* && rm -rf /var/cache/apt/* @@ -114,6 +116,7 @@ COPY entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh COPY build/${TARGETPLATFORM}/clickhouse-backup /bin/clickhouse-backup RUN chmod +x /bin/clickhouse-backup +RUN setcap cap_sys_nice=+ep /bin/clickhouse-backup # USER clickhouse diff --git a/ReadMe.md b/ReadMe.md index bad6388e..90611616 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -414,6 +414,9 @@ general: cpu_nice_priority: 15 # CPU niceness priority, to allow throttling СЗГ intensive operation, more details https://manpages.ubuntu.com/manpages/xenial/man1/nice.1.html io_nice_priority: "idle" # IO niceness priority, to allow throttling disk intensive operation, more details https://manpages.ubuntu.com/manpages/xenial/man1/ionice.1.html + + rbac_backup_always: true # always, backup RBAC objects + rbac_resolve_conflicts: "recreate" # action, when RBAC object with the same name already exists, allow "recreate", "ignore", "fail" values clickhouse: username: default # CLICKHOUSE_USERNAME password: "" # CLICKHOUSE_PASSWORD @@ -456,11 +459,14 @@ clickhouse: # available prefixes # - sql: will execute SQL query # - exec: will execute command via shell - restart_command: "sql:SYSTEM SHUTDOWN" + restart_command: "exec:systemctl restart clickhouse-server" ignore_not_exists_error_during_freeze: true # CLICKHOUSE_IGNORE_NOT_EXISTS_ERROR_DURING_FREEZE, helps to avoid backup failures when running frequent CREATE / DROP tables and databases during backup, `clickhouse-backup` will ignore `code: 60` and `code: 81` errors during execution of `ALTER TABLE ... FREEZE` check_replicas_before_attach: true # CLICKHOUSE_CHECK_REPLICAS_BEFORE_ATTACH, helps avoiding concurrent ATTACH PART execution when restoring ReplicatedMergeTree tables use_embedded_backup_restore: false # CLICKHOUSE_USE_EMBEDDED_BACKUP_RESTORE, use BACKUP / RESTORE SQL statements instead of regular SQL queries to use features of modern ClickHouse server versions - backup_mutations: true # CLICKHOUSE_BACKUP_MUTATIONS, allow backup mutations from system.mutations WHERE is_done AND apply it during restore + embedded_backup_disk: "" # CLICKHOUSE_EMBEDDED_BACKUP_DISK - disk from system.disks which will use when `use_embedded_backup_restore: true` + embedded_backup_threads: 0 # CLICKHOUSE_EMBEDDED_BACKUP_THREADS - how many threads will use for BACKUP sql command when `use_embedded_backup_restore: true`, 0 means - equal available CPU cores + embedded_restore_threads: 0 # CLICKHOUSE_EMBEDDED_RESTORE_THREADS - how many threads will use for RESTORE sql command when `use_embedded_backup_restore: true`, 0 means - equal available CPU cores + backup_mutations: true # CLICKHOUSE_BACKUP_MUTATIONS, allow backup mutations from system.mutations WHERE is_done=0 and apply it during restore restore_as_attach: false # CLICKHOUSE_RESTORE_AS_ATTACH, allow restore tables which have inconsistent data parts structure and mutations in progress check_parts_columns: true # CLICKHOUSE_CHECK_PARTS_COLUMNS, check data types from system.parts_columns during create backup to guarantee mutation is complete max_connections: 0 # CLICKHOUSE_MAX_CONNECTIONS, how many parallel connections could be opened during operations diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 8a04dbd9..f7f7cad4 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -336,7 +336,7 @@ func main() { UsageText: "clickhouse-backup restore [-t, --tables=.
] [-m, --restore-database-mapping=:[,<...>]] [--partitions=] [-s, --schema] [-d, --data] [--rm, --drop] [-i, --ignore-dependencies] [--rbac] [--configs] ", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.Restore(c.Args().First(), c.String("t"), c.StringSlice("restore-database-mapping"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("d"), c.Bool("rm"), c.Bool("ignore-dependencies"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Int("command-id")) + return b.Restore(c.Args().First(), c.String("t"), c.StringSlice("restore-database-mapping"), c.StringSlice("partitions"), c.Bool("schema"), c.Bool("data"), c.Bool("drop"), c.Bool("ignore-dependencies"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 26e6aeda..b54608d8 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -81,6 +81,9 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, if skipCheckPartsColumns && b.cfg.ClickHouse.CheckPartsColumns { b.cfg.ClickHouse.CheckPartsColumns = false } + if b.cfg.General.RBACBackupAlways { + createRBAC = true + } allDatabases, err := b.ch.GetDatabases(ctx, b.cfg, tablePattern) if err != nil { @@ -645,7 +648,7 @@ func (b *Backuper) createBackupRBACReplicated(ctx context.Context, rbacBackup st rbacDataSize := uint64(0) if err = b.ch.SelectContext(ctx, &replicatedRBAC, "SELECT name FROM system.user_directories WHERE type='replicated'"); err == nil && len(replicatedRBAC) > 0 { k := keeper.Keeper{Log: b.log.WithField("logger", "keeper")} - if err = k.Connect(ctx, b.ch, b.cfg); err != nil { + if err = k.Connect(ctx, b.ch); err != nil { return 0, err } defer k.Close() diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index b5be0bbb..796be57d 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -1,6 +1,7 @@ package backup import ( + "bufio" "context" "encoding/json" "fmt" @@ -38,7 +39,7 @@ import ( var CreateDatabaseRE = regexp.MustCompile(`(?m)^CREATE DATABASE (\s*)(\S+)(\s*)`) // Restore - restore tables matched by tablePattern from backupName -func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, partitions []string, schemaOnly, dataOnly, dropTable, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly bool, commandId int) error { +func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, partitions []string, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly bool, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -119,7 +120,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par for _, database := range backupMetadata.Databases { targetDB := database.Name if !IsInformationSchema(targetDB) { - if err = b.restoreEmptyDatabase(ctx, targetDB, tablePattern, database, dropTable, schemaOnly, ignoreDependencies, version); err != nil { + if err = b.restoreEmptyDatabase(ctx, targetDB, tablePattern, database, dropExists, schemaOnly, ignoreDependencies, version); err != nil { return err } } @@ -139,7 +140,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } needRestart := false if rbacOnly || restoreRBAC { - if err := b.restoreRBAC(ctx, backupName, disks); err != nil { + if err := b.restoreRBAC(ctx, backupName, disks, version, dropExists); err != nil { return err } log.Infof("RBAC successfully restored") @@ -193,12 +194,12 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } if !rbacOnly && !configsOnly { - tablesForRestore, partitionsNames, err = b.getTablesForRestoreLocal(ctx, backupName, metadataPath, tablePattern, dropTable, partitions) + tablesForRestore, partitionsNames, err = b.getTablesForRestoreLocal(ctx, backupName, metadataPath, tablePattern, dropExists, partitions) if err != nil { return err } } - if schemaOnly || dropTable || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { + if schemaOnly || dropExists || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { if err = b.RestoreSchema(ctx, backupName, tablesForRestore, ignoreDependencies, version); err != nil { return err } @@ -372,12 +373,27 @@ func (b *Backuper) prepareRestoreDatabaseMapping(databaseMapping []string) error } // restoreRBAC - copy backup_name>/rbac folder to access_data_path -func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []clickhouse.Disk) error { +func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []clickhouse.Disk, version int, dropExists bool) error { log := b.log.WithField("logger", "restoreRBAC") accessPath, err := b.ch.GetAccessManagementPath(ctx, nil) if err != nil { return err } + var k *keeper.Keeper + replicatedUserDirectories := make([]clickhouse.UserDirectory, 0) + if err = b.ch.SelectContext(ctx, &replicatedUserDirectories, "SELECT name FROM system.user_directories WHERE type='replicated'"); err == nil && len(replicatedUserDirectories) > 0 { + k = &keeper.Keeper{Log: b.log.WithField("logger", "keeper")} + if connErr := k.Connect(ctx, b.ch); connErr != nil { + return fmt.Errorf("but can't connect to keeper: %v", connErr) + } + defer k.Close() + } + + // https://github.com/Altinity/clickhouse-backup/issues/851 + if err = b.restoreRBACResolveAllConflicts(ctx, backupName, accessPath, version, k, replicatedUserDirectories, dropExists); err != nil { + return err + } + if err = b.restoreBackupRelatedDir(backupName, "access", accessPath, disks, []string{"*.jsonl"}); err == nil { markFile := path.Join(accessPath, "need_rebuild_lists.mark") log.Infof("create %s for properly rebuild RBAC after restart clickhouse-server", markFile) @@ -405,13 +421,269 @@ func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []c if err != nil && os.IsNotExist(err) { return nil } - if err = b.restoreRBACReplicated(ctx, backupName, "access"); err != nil && !os.IsNotExist(err) { + if err = b.restoreRBACReplicated(backupName, "access", k, replicatedUserDirectories); err != nil && !os.IsNotExist(err) { return err } return nil } -func (b *Backuper) restoreRBACReplicated(ctx context.Context, backupName string, backupPrefixDir string) error { +func (b *Backuper) restoreRBACResolveAllConflicts(ctx context.Context, backupName string, accessPath string, version int, k *keeper.Keeper, replicatedUserDirectories []clickhouse.UserDirectory, dropExists bool) error { + backupAccessPath := path.Join(b.DefaultDataPath, "backup", backupName, "access") + + walkErr := filepath.Walk(backupAccessPath, func(fPath string, fInfo fs.FileInfo, err error) error { + if err != nil { + return err + } + if fInfo.IsDir() { + return nil + } + if strings.HasSuffix(fPath, ".sql") { + sql, readErr := os.ReadFile(fPath) + if readErr != nil { + return readErr + } + if resolveErr := b.resolveRBACConflictIfExist(ctx, string(sql), accessPath, version, k, replicatedUserDirectories, dropExists); resolveErr != nil { + return resolveErr + } + b.log.Debugf("%s b.resolveRBACConflictIfExist(%s) no error", fPath, string(sql)) + } + if strings.HasSuffix(fPath, ".jsonl") { + file, openErr := os.Open(fPath) + if openErr != nil { + return openErr + } + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + data := keeper.DumpNode{} + jsonErr := json.Unmarshal([]byte(line), &data) + if jsonErr != nil { + b.log.Errorf("can't %s json.Unmarshal error: %v line: %s", fPath, line, jsonErr) + continue + } + if strings.HasPrefix(data.Path, "uuid/") { + if resolveErr := b.resolveRBACConflictIfExist(ctx, data.Value, accessPath, version, k, replicatedUserDirectories, dropExists); resolveErr != nil { + return resolveErr + } + b.log.Debugf("%s:%s b.resolveRBACConflictIfExist(%s) no error", fPath, data.Path, data.Value) + } + + } + if scanErr := scanner.Err(); scanErr != nil { + return scanErr + } + + if closeErr := file.Close(); closeErr != nil { + b.log.Warnf("can't close %s error: %v", fPath, closeErr) + } + + } + return nil + }) + if !os.IsNotExist(walkErr) { + return walkErr + } + return nil +} + +func (b *Backuper) resolveRBACConflictIfExist(ctx context.Context, sql string, accessPath string, version int, k *keeper.Keeper, replicatedUserDirectories []clickhouse.UserDirectory, dropExists bool) error { + kind, name, detectErr := b.detectRBACObject(sql) + if detectErr != nil { + return detectErr + } + if isExists, existsRBACType, existsRBACObjectId := b.isRBACExists(ctx, kind, name, accessPath, version, k, replicatedUserDirectories); isExists { + b.log.Warnf("RBAC object kind=%s, name=%s already present, will %s", kind, name, b.cfg.General.RBACConflictResolution) + if b.cfg.General.RBACConflictResolution == "recreate" || dropExists { + if dropErr := b.dropExistsRBAC(ctx, kind, name, accessPath, existsRBACType, existsRBACObjectId, k); dropErr != nil { + return dropErr + } + return nil + } + if b.cfg.General.RBACConflictResolution == "fail" { + return fmt.Errorf("RBAC object kind=%s, name=%s already present, change ", kind, name) + } + } + return nil +} + +func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, accessPath string, version int, k *keeper.Keeper, replicatedUserDirectories []clickhouse.UserDirectory) (bool, string, string) { + //search in sql system.users, system.quotas, system.row_policies, system.roles, system.settings_profiles + if version > 200005000 { + var rbacSystemTableNames = map[string]string{ + "ROLE": "roles", + "ROW POLICY": "row_policies", + "SETTINGS PROFILE": "settings_profiles", + "QUOTA": "quotes", + "USER": "users", + } + systemTable, systemTableExists := rbacSystemTableNames[kind] + if !systemTableExists { + b.log.Errorf("unsupported RBAC object kind: %s", kind) + return false, "", "" + } + isRBACExistsSQL := fmt.Sprintf("SELECT id, name FROM `system`.`%s` WHERE name=? LIMIT 1", systemTable) + existsRBACRow := make([]clickhouse.RBACObject, 0) + if err := b.ch.SelectSingleRow(ctx, &existsRBACRow, isRBACExistsSQL, name); err != nil { + b.log.Errorf("RBAC object resolve failed kind: %s, name: %s, error: %v", kind, name, err) + return false, "", "" + } + if len(existsRBACRow) == 0 { + return false, "", "" + } + return true, "sql", existsRBACRow[0].Id + } + + checkRBACExists := func(sql string) bool { + existsKind, existsName, detectErr := b.detectRBACObject(sql) + if detectErr != nil { + b.log.Warnf("isRBACExists error: %v", detectErr) + return false + } + if existsKind == kind && existsName == name { + return true + } + return false + } + + // search in local user directory + if sqlFiles, globErr := filepath.Glob(path.Join(accessPath, "*.sql")); globErr == nil { + for _, f := range sqlFiles { + sql, readErr := os.ReadFile(f) + if readErr != nil { + b.log.Warnf("read %s error: %v", f, readErr) + continue + } + if checkRBACExists(string(sql)) { + return true, "local", strings.TrimSuffix(filepath.Base(f), filepath.Ext(f)) + } + } + } else { + b.log.Warnf("access/*.sql error: %v", globErr) + } + + //search in keeper replicated user directory + if k != nil && len(replicatedUserDirectories) > 0 { + for _, userDirectory := range replicatedUserDirectories { + replicatedAccessPath, getAccessErr := k.GetReplicatedAccessPath(userDirectory.Name) + if getAccessErr != nil { + b.log.Warnf("b.isRBACExists -> k.GetReplicatedAccessPath error: %v", getAccessErr) + continue + } + isExists := false + existsObjectId := "" + walkErr := k.Walk(replicatedAccessPath, "uuid", true, func(node keeper.DumpNode) (bool, error) { + if node.Value == "" { + return false, nil + } + if checkRBACExists(node.Value) { + isExists = true + existsObjectId = strings.TrimPrefix(node.Path, path.Join(replicatedAccessPath, "uuid")+"/") + return true, nil + } + return false, nil + }) + if walkErr != nil { + b.log.Warnf("b.isRBACExists -> k.Walk error: %v", walkErr) + continue + } + if isExists { + return true, userDirectory.Name, existsObjectId + } + } + } + return false, "", "" +} + +func (b *Backuper) dropExistsRBAC(ctx context.Context, kind string, name string, accessPath string, rbacType, rbacObjectId string, k *keeper.Keeper) error { + //sql + if rbacType == "sql" { + dropSQL := fmt.Sprintf("DROP %s IF EXISTS `%s`", kind, name) + return b.ch.QueryContext(ctx, dropSQL) + } + //local + if rbacType == "local" { + return os.Remove(path.Join(accessPath, rbacObjectId+".sql")) + } + //keeper + var keeperPrefixesRBAC = map[string]string{ + "ROLE": "R", + "ROW POLICY": "P", + "SETTINGS PROFILE": "S", + "QUOTA": "Q", + "USER": "U", + } + keeperRBACTypePrefix, isKeeperRBACTypePrefixExists := keeperPrefixesRBAC[kind] + if !isKeeperRBACTypePrefixExists { + return fmt.Errorf("unsupported RBAC kind: %s", kind) + } + prefix, err := k.GetReplicatedAccessPath(rbacType) + if err != nil { + return fmt.Errorf("b.dropExistsRBAC -> k.GetReplicatedAccessPath error: %v", err) + } + deletedNodes := []string{ + path.Join(prefix, "uuid", rbacObjectId), + } + walkErr := k.Walk(prefix, keeperRBACTypePrefix, true, func(node keeper.DumpNode) (bool, error) { + if node.Value == rbacObjectId { + deletedNodes = append(deletedNodes, node.Path) + } + return false, nil + }) + if walkErr != nil { + return fmt.Errorf("b.dropExistsRBAC -> k.Walk(%s/%s) error: %v", prefix, keeperRBACTypePrefix, walkErr) + } + + for _, nodePath := range deletedNodes { + if deleteErr := k.Delete(nodePath); deleteErr != nil { + return fmt.Errorf("b.dropExistsRBAC -> k.Delete(%s) error: %v", nodePath, deleteErr) + } + } + return nil +} + +func (b *Backuper) detectRBACObject(sql string) (string, string, error) { + var kind, name string + var detectErr error + + // Define the map of prefixes and their corresponding kinds. + prefixes := map[string]string{ + "ATTACH ROLE": "ROLE", + "ATTACH ROW POLICY": "ROW POLICY", + "ATTACH SETTINGS PROFILE": "SETTINGS PROFILE", + "ATTACH QUOTA": "QUOTA", + "ATTACH USER": "USER", + } + + // Iterate over the prefixes to find a match. + for prefix, k := range prefixes { + if strings.HasPrefix(sql, prefix) { + kind = k + // Extract the name from the SQL query. + name = strings.TrimSpace(strings.TrimPrefix(sql, prefix)) + break + } + } + + // If no match is found, return an error. + if kind == "" { + detectErr = fmt.Errorf("unable to detect RBAC object kind from SQL query: %s", sql) + return kind, name, detectErr + } + name = strings.TrimSpace(strings.SplitN(name, " ", 2)[0]) + name = strings.Trim(name, " `") + if name == "" { + detectErr = fmt.Errorf("unable to detect RBAC object name from SQL query: %s", sql) + return kind, name, detectErr + } + return kind, name, detectErr +} + +// @todo think about restore RBAC from replicated to local *.sql +func (b *Backuper) restoreRBACReplicated(backupName string, backupPrefixDir string, k *keeper.Keeper, replicatedUserDirectories []clickhouse.UserDirectory) error { + if k == nil || len(replicatedUserDirectories) == 0 { + return nil + } log := b.log.WithField("logger", "restoreRBACReplicated") srcBackupDir := path.Join(b.DefaultDataPath, "backup", backupName, backupPrefixDir) info, err := os.Stat(srcBackupDir) @@ -423,42 +695,32 @@ func (b *Backuper) restoreRBACReplicated(ctx context.Context, backupName string, if !info.IsDir() { return fmt.Errorf("%s is not a dir", srcBackupDir) } - replicatedRBAC := make([]struct { - Name string `ch:"name"` - }, 0) - if err = b.ch.SelectContext(ctx, &replicatedRBAC, "SELECT name FROM system.user_directories WHERE type='replicated'"); err == nil && len(replicatedRBAC) > 0 { - jsonLFiles, err := filepathx.Glob(path.Join(srcBackupDir, "*.jsonl")) - if err != nil { - return err + jsonLFiles, err := filepathx.Glob(path.Join(srcBackupDir, "*.jsonl")) + if err != nil { + return err + } + if len(jsonLFiles) == 0 { + return nil + } + restoreReplicatedRBACMap := make(map[string]string, len(jsonLFiles)) + for _, jsonLFile := range jsonLFiles { + for _, userDirectory := range replicatedUserDirectories { + if strings.HasSuffix(jsonLFile, userDirectory.Name+".jsonl") { + restoreReplicatedRBACMap[jsonLFile] = userDirectory.Name + } } - if len(jsonLFiles) == 0 { - return nil + if _, exists := restoreReplicatedRBACMap[jsonLFile]; !exists { + restoreReplicatedRBACMap[jsonLFile] = replicatedUserDirectories[0].Name } - k := keeper.Keeper{Log: b.log.WithField("logger", "keeper")} - if err = k.Connect(ctx, b.ch, b.cfg); err != nil { + } + for jsonLFile, userDirectoryName := range restoreReplicatedRBACMap { + replicatedAccessPath, err := k.GetReplicatedAccessPath(userDirectoryName) + if err != nil { return err } - defer k.Close() - restoreReplicatedRBACMap := make(map[string]string, len(jsonLFiles)) - for _, jsonLFile := range jsonLFiles { - for _, userDirectory := range replicatedRBAC { - if strings.HasSuffix(jsonLFile, userDirectory.Name+".jsonl") { - restoreReplicatedRBACMap[jsonLFile] = userDirectory.Name - } - } - if _, exists := restoreReplicatedRBACMap[jsonLFile]; !exists { - restoreReplicatedRBACMap[jsonLFile] = replicatedRBAC[0].Name - } - } - for jsonLFile, userDirectoryName := range restoreReplicatedRBACMap { - replicatedAccessPath, err := k.GetReplicatedAccessPath(userDirectoryName) - if err != nil { - return err - } - log.Infof("keeper.Restore(%s) -> %s", jsonLFile, replicatedAccessPath) - if err := k.Restore(jsonLFile, replicatedAccessPath); err != nil { - return err - } + log.Infof("keeper.Restore(%s) -> %s", jsonLFile, replicatedAccessPath) + if err := k.Restore(jsonLFile, replicatedAccessPath); err != nil { + return err } } return nil diff --git a/pkg/backup/restore_remote.go b/pkg/backup/restore_remote.go index b2eabf8d..f316ffa0 100644 --- a/pkg/backup/restore_remote.go +++ b/pkg/backup/restore_remote.go @@ -2,12 +2,12 @@ package backup import "errors" -func (b *Backuper) RestoreFromRemote(backupName, tablePattern string, databaseMapping, partitions []string, schemaOnly, dataOnly, dropTable, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume bool, commandId int) error { +func (b *Backuper) RestoreFromRemote(backupName, tablePattern string, databaseMapping, partitions []string, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume bool, commandId int) error { if err := b.Download(backupName, tablePattern, partitions, schemaOnly, resume, commandId); err != nil { // https://github.com/Altinity/clickhouse-backup/issues/625 if !errors.Is(err, ErrBackupIsAlreadyExists) { return err } } - return b.Restore(backupName, tablePattern, databaseMapping, partitions, schemaOnly, dataOnly, dropTable, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, commandId) + return b.Restore(backupName, tablePattern, databaseMapping, partitions, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, commandId) } diff --git a/pkg/backup/restore_test.go b/pkg/backup/restore_test.go new file mode 100644 index 00000000..18542082 --- /dev/null +++ b/pkg/backup/restore_test.go @@ -0,0 +1,85 @@ +package backup + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDetectRBACObject(t *testing.T) { + b := &Backuper{} // Create an instance of Backuper for testing + + testCases := []struct { + inputSQL string + expectedKind string + expectedName string + expectedErr error + }{ + { + inputSQL: "ATTACH ROLE `admin`", + expectedKind: "ROLE", + expectedName: "admin", + expectedErr: nil, + }, + { + inputSQL: "ATTACH USER `user1` WITH PASSWORD 'password'", + expectedKind: "USER", + expectedName: "user1", + expectedErr: nil, + }, + { + inputSQL: "ATTACH ROLE test_rbac SETTINGS PROFILE ID('4949fb42-97bb-4841-4b5b-c05d4b0cb685');\n", + expectedKind: "ROLE", + expectedName: "test_rbac", + expectedErr: nil, + }, + { + inputSQL: "ATTACH ROW POLICY test_rbac ON default.test_rbac AS restrictive FOR SELECT USING 1 = 1 TO ID('e1469fb8-e014-c22b-4e5c-406134320f91');\n", + expectedKind: "ROW POLICY", + expectedName: "test_rbac", + expectedErr: nil, + }, + { + inputSQL: "ATTACH SETTINGS PROFILE `test_rbac` SETTINGS max_execution_time = 60.;\n", + expectedKind: "SETTINGS PROFILE", + expectedName: "test_rbac", + expectedErr: nil, + }, + { + inputSQL: "ATTACH QUOTA test_rbac KEYED BY user_name TO ID('e1469fb8-e014-c22b-4e5c-406134320f91');\n", + expectedKind: "QUOTA", + expectedName: "test_rbac", + expectedErr: nil, + }, + { + inputSQL: "ATTACH USER test_rbac IDENTIFIED WITH sha256_hash BY '256A6D6B157C014A70BE5C62ACA0FE4A6183BFBD45895F62287447B55E519BAD' DEFAULT ROLE ID('2d449952-fca4-c9f2-2949-b83880124bbc');\nATTACH GRANT ID('2d449952-fca4-c9f2-2949-b83880124bbc') TO test_rbac;\n", + expectedKind: "USER", + expectedName: "test_rbac", + expectedErr: nil, + }, + { + inputSQL: "INVALID SQL", + expectedKind: "", + expectedName: "", + expectedErr: fmt.Errorf("unable to detect RBAC object kind from SQL query: INVALID SQL"), + }, + { + inputSQL: "ATTACH USER ``", + expectedKind: "USER", + expectedName: "", + expectedErr: fmt.Errorf("unable to detect RBAC object name from SQL query: ATTACH USER ``"), + }, + } + + for _, tc := range testCases { + kind, name, err := b.detectRBACObject(tc.inputSQL) + assert.Equal(t, tc.expectedKind, kind) + assert.Equal(t, tc.expectedName, name) + if tc.expectedName != "" && tc.expectedKind != "" { + assert.NoError(t, err) + } + if err != nil { + assert.Equal(t, tc.expectedErr.Error(), err.Error()) + } + } +} diff --git a/pkg/clickhouse/structs.go b/pkg/clickhouse/structs.go index 49d306eb..c634b174 100644 --- a/pkg/clickhouse/structs.go +++ b/pkg/clickhouse/structs.go @@ -92,3 +92,12 @@ type ColumnDataTypes struct { type BackupDataSize struct { Size uint64 `ch:"backup_data_size"` } + +type UserDirectory struct { + Name string `ch:"name"` +} + +type RBACObject struct { + Id string `ch:"id"` + Name string `ch:"name"` +} diff --git a/pkg/config/config.go b/pkg/config/config.go index 1119b6f8..0e4a9088 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -60,6 +60,8 @@ type GeneralConfig struct { ShardedOperationMode string `yaml:"sharded_operation_mode" envconfig:"SHARDED_OPERATION_MODE"` CPUNicePriority int `yaml:"cpu_nice_priority" envconfig:"CPU_NICE_PRIORITY"` IONicePriority string `yaml:"io_nice_priority" envconfig:"IO_NICE_PRIORITY"` + RBACBackupAlways bool `yaml:"rbac_backup_always" envconfig:"RBAC_BACKUP_ALWAYS"` + RBACConflictResolution string `yaml:"rbac_conflict_resolution" envconfig:"RBAC_CONFLICT_RESOLUTION"` RetriesDuration time.Duration WatchDuration time.Duration FullDuration time.Duration @@ -528,6 +530,8 @@ func DefaultConfig() *Config { RestoreDatabaseMapping: make(map[string]string, 0), IONicePriority: "idle", CPUNicePriority: 15, + RBACBackupAlways: true, + RBACConflictResolution: "recreate", }, ClickHouse: ClickHouseConfig{ Username: "default", diff --git a/pkg/keeper/keeper.go b/pkg/keeper/keeper.go index 6a7229bf..82a84b0a 100644 --- a/pkg/keeper/keeper.go +++ b/pkg/keeper/keeper.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/antchfx/xmlquery" "github.com/apex/log" "os" @@ -37,7 +36,7 @@ func (KeeperLogToApexLogAdapter LogKeeperToApexLogAdapter) Printf(msg string, ar } } -type keeperDumpNode struct { +type DumpNode struct { Path string `json:"path"` Value string `json:"value"` } @@ -51,7 +50,7 @@ type Keeper struct { } // Connect - connect to any zookeeper server from /var/lib/clickhouse/preprocessed_configs/config.xml -func (k *Keeper) Connect(ctx context.Context, ch *clickhouse.ClickHouse, cfg *config.Config) error { +func (k *Keeper) Connect(ctx context.Context, ch *clickhouse.ClickHouse) error { configFile, doc, err := ch.ParseXML(ctx, "config.xml") if err != nil { return fmt.Errorf("can't parse config.xml from %s, error: %v", configFile, err) @@ -137,15 +136,15 @@ func (k *Keeper) dumpNodeRecursive(prefix, nodePath string, f *os.File) (int, er if err != nil { return 0, err } - bytes, err := k.writeJsonString(f, keeperDumpNode{Path: nodePath, Value: string(value)}) + bytes, err := k.writeJsonString(f, DumpNode{Path: nodePath, Value: string(value)}) if err != nil { return 0, err } - childs, _, err := k.conn.Children(path.Join(prefix, nodePath)) + children, _, err := k.conn.Children(path.Join(prefix, nodePath)) if err != nil { return 0, err } - for _, childPath := range childs { + for _, childPath := range children { if childBytes, err := k.dumpNodeRecursive(prefix, path.Join(nodePath, childPath), f); err != nil { return 0, err } else { @@ -155,7 +154,7 @@ func (k *Keeper) dumpNodeRecursive(prefix, nodePath string, f *os.File) (int, er return bytes, nil } -func (k *Keeper) writeJsonString(f *os.File, node keeperDumpNode) (int, error) { +func (k *Keeper) writeJsonString(f *os.File, node DumpNode) (int, error) { jsonLine, err := json.Marshal(node) if err != nil { return 0, err @@ -183,7 +182,7 @@ func (k *Keeper) Restore(dumpFile, prefix string) error { } scanner := bufio.NewScanner(f) for scanner.Scan() { - node := keeperDumpNode{} + node := DumpNode{} if err = json.Unmarshal(scanner.Bytes(), &node); err != nil { return err } @@ -207,6 +206,39 @@ func (k *Keeper) Restore(dumpFile, prefix string) error { return nil } +type WalkCallBack = func(node DumpNode) (bool, error) + +func (k *Keeper) Walk(prefix, relativePath string, recursive bool, callback WalkCallBack) error { + nodePath := path.Join(prefix, relativePath) + value, stat, err := k.conn.Get(nodePath) + k.Log.Debugf("Walk->get(%s) = %v, err = %v", nodePath, string(value), err) + if err != nil { + return err + } + var isDone bool + if isDone, err = callback(DumpNode{Path: nodePath, Value: string(value)}); err != nil { + return err + } + if isDone { + return nil + } + if recursive && stat.NumChildren > 0 { + children, _, err := k.conn.Children(path.Join(prefix, relativePath)) + if err != nil { + return err + } + for _, childPath := range children { + if childErr := k.Walk(prefix, path.Join(relativePath, childPath), recursive, callback); childErr != nil { + return childErr + } + } + } + return nil +} + +func (k *Keeper) Delete(nodePath string) error { + return k.conn.Delete(nodePath, -1) +} func (k *Keeper) Close() { k.conn.Close() } diff --git a/pkg/server/server.go b/pkg/server/server.go index 4b4b3174..7445c45c 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -1163,7 +1163,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) partitionsToBackup := make([]string, 0) schemaOnly := false dataOnly := false - dropTable := false + dropExists := false ignoreDependencies := false restoreRBAC := false restoreConfigs := false @@ -1202,11 +1202,11 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) fullCommand += " --data" } if _, exist := query["drop"]; exist { - dropTable = true + dropExists = true fullCommand += " --drop" } if _, exist := query["rm"]; exist { - dropTable = true + dropExists = true fullCommand += " --rm" } if _, exists := query["ignore_dependencies"]; exists { @@ -1236,7 +1236,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) go func() { err, _ := api.metrics.ExecuteWithMetrics("restore", 0, func() error { b := backup.NewBackuper(api.config) - return b.Restore(name, tablePattern, databaseMappingToRestore, partitionsToBackup, schemaOnly, dataOnly, dropTable, ignoreDependencies, restoreRBAC, false, restoreConfigs, false, commandId) + return b.Restore(name, tablePattern, databaseMappingToRestore, partitionsToBackup, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, false, restoreConfigs, false, commandId) }) status.Current.Stop(commandId, err) if err != nil { diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index e08757e5..ac4c83e5 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -460,41 +460,48 @@ func TestRBAC(t *testing.T) { ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") - - ch.queryWithNoError(r, "DROP SETTINGS PROFILE IF EXISTS test_rbac") + ch.queryWithNoError(r, "DROP SETTINGS PROFILE IF EXISTS test_rbac") ch.queryWithNoError(r, "DROP QUOTA IF EXISTS test_rbac") ch.queryWithNoError(r, "DROP ROW POLICY IF EXISTS test_rbac ON default.test_rbac") ch.queryWithNoError(r, "DROP ROLE IF EXISTS test_rbac") ch.queryWithNoError(r, "DROP USER IF EXISTS test_rbac") - log.Info("create RBAC related objects") - ch.queryWithNoError(r, "CREATE SETTINGS PROFILE test_rbac SETTINGS max_execution_time=60") - ch.queryWithNoError(r, "CREATE ROLE test_rbac SETTINGS PROFILE 'test_rbac'") - ch.queryWithNoError(r, "CREATE USER test_rbac IDENTIFIED BY 'test_rbac' DEFAULT ROLE test_rbac") - ch.queryWithNoError(r, "CREATE QUOTA test_rbac KEYED BY user_name FOR INTERVAL 1 hour NO LIMITS TO test_rbac") - ch.queryWithNoError(r, "CREATE ROW POLICY test_rbac ON default.test_rbac USING 1=1 AS RESTRICTIVE TO test_rbac") + creatAllRBAC := func(drop bool) { + if drop { + log.Info("drop all RBAC related objects after backup") + ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") + ch.queryWithNoError(r, "DROP QUOTA test_rbac") + ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") + ch.queryWithNoError(r, "DROP ROLE test_rbac") + ch.queryWithNoError(r, "DROP USER test_rbac") + } + log.Info("create RBAC related objects") + ch.queryWithNoError(r, "CREATE SETTINGS PROFILE test_rbac SETTINGS max_execution_time=60") + ch.queryWithNoError(r, "CREATE ROLE test_rbac SETTINGS PROFILE 'test_rbac'") + ch.queryWithNoError(r, "CREATE USER test_rbac IDENTIFIED BY 'test_rbac' DEFAULT ROLE test_rbac") + ch.queryWithNoError(r, "CREATE QUOTA test_rbac KEYED BY user_name FOR INTERVAL 1 hour NO LIMITS TO test_rbac") + ch.queryWithNoError(r, "CREATE ROW POLICY test_rbac ON default.test_rbac USING 1=1 AS RESTRICTIVE TO test_rbac") + } + creatAllRBAC(false) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "create", "--rbac", "--rbac-only", "test_rbac_backup")) r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup upload test_rbac_backup")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup")) r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - log.Info("drop all RBAC related objects after backup") - ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") - ch.queryWithNoError(r, "DROP QUOTA test_rbac") - ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") - ch.queryWithNoError(r, "DROP ROLE test_rbac") - ch.queryWithNoError(r, "DROP USER test_rbac") + log.Info("create conflicted RBAC objects") + creatAllRBAC(true) log.Info("download+restore RBAC") - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup download test_rbac_backup")) out, err := dockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac test_rbac_backup") + log.Debug(out) r.Contains(out, "RBAC successfully restored") r.NoError(err) out, err = dockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac-only test_rbac_backup") + log.Debug(out) r.Contains(out, "RBAC successfully restored") r.NoError(err) r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) From 93dab36e2cd342fddd041fa16920692b35db3120 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 3 Apr 2024 23:03:53 +0400 Subject: [PATCH 59/80] checked `ObjectDisks` + `CLICKHOUSE_USE_EMBEDDED_BACKUP_RESTORE: true` - will never upload object disk content, fix https://github.com/Altinity/clickhouse-backup/issues/799 --- ChangeLog.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ChangeLog.md b/ChangeLog.md index 04b788a8..da52f065 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -22,6 +22,7 @@ BUG FIXES - fix wrong behavior for `freeze_by_part` + `freeze_by_part_where`, fix [855](https://github.com/Altinity/clickhouse-backup/issues/855) - apply `CLICKHOUSE_SKIP_TABLES_ENGINES` during `create` command - fixed behavior for upload / download when .inner. table missing for MATERIALIZED VIEW by table pattern, fix [765](https://github.com/Altinity/clickhouse-backup/issues/765) +- fixed `ObjectDisks` + `CLICKHOUSE_USE_EMBEDDED_BACKUP_RESTORE: true` - shall skip upload object disk content, fix [799](https://github.com/Altinity/clickhouse-backup/issues/799) # v2.4.35 IMPROVEMENTS From da96a299e3c9545d823f2a9ca862f954e9c9f79f Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 3 Apr 2024 23:05:03 +0400 Subject: [PATCH 60/80] modified `rbac_conflict_resolution: recreate` option for RBAC object name conflicts during restore, fix https://github.com/Altinity/clickhouse-backup/issues/851, change approach to SQL based --- pkg/backup/restore.go | 25 +++++++++++++++++-------- pkg/backup/restore_test.go | 8 ++++---- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 796be57d..eaab4c7f 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -509,12 +509,12 @@ func (b *Backuper) resolveRBACConflictIfExist(ctx context.Context, sql string, a func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, accessPath string, version int, k *keeper.Keeper, replicatedUserDirectories []clickhouse.UserDirectory) (bool, string, string) { //search in sql system.users, system.quotas, system.row_policies, system.roles, system.settings_profiles - if version > 200005000 { + if version > 20005000 { var rbacSystemTableNames = map[string]string{ "ROLE": "roles", "ROW POLICY": "row_policies", "SETTINGS PROFILE": "settings_profiles", - "QUOTA": "quotes", + "QUOTA": "quotas", "USER": "users", } systemTable, systemTableExists := rbacSystemTableNames[kind] @@ -522,10 +522,10 @@ func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, a b.log.Errorf("unsupported RBAC object kind: %s", kind) return false, "", "" } - isRBACExistsSQL := fmt.Sprintf("SELECT id, name FROM `system`.`%s` WHERE name=? LIMIT 1", systemTable) + isRBACExistsSQL := fmt.Sprintf("SELECT toString(id) AS id, name FROM `system`.`%s` WHERE name=? LIMIT 1", systemTable) existsRBACRow := make([]clickhouse.RBACObject, 0) - if err := b.ch.SelectSingleRow(ctx, &existsRBACRow, isRBACExistsSQL, name); err != nil { - b.log.Errorf("RBAC object resolve failed kind: %s, name: %s, error: %v", kind, name, err) + if err := b.ch.SelectContext(ctx, &existsRBACRow, isRBACExistsSQL, name); err != nil { + b.log.Fatalf("RBAC object resolve failed kind: %s, name: %s, error: %v", kind, name, err) return false, "", "" } if len(existsRBACRow) == 0 { @@ -598,7 +598,7 @@ func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, a func (b *Backuper) dropExistsRBAC(ctx context.Context, kind string, name string, accessPath string, rbacType, rbacObjectId string, k *keeper.Keeper) error { //sql if rbacType == "sql" { - dropSQL := fmt.Sprintf("DROP %s IF EXISTS `%s`", kind, name) + dropSQL := fmt.Sprintf("DROP %s IF EXISTS %s", kind, name) return b.ch.QueryContext(ctx, dropSQL) } //local @@ -670,8 +670,17 @@ func (b *Backuper) detectRBACObject(sql string) (string, string, error) { detectErr = fmt.Errorf("unable to detect RBAC object kind from SQL query: %s", sql) return kind, name, detectErr } - name = strings.TrimSpace(strings.SplitN(name, " ", 2)[0]) - name = strings.Trim(name, " `") + names := strings.SplitN(name, " ", 2) + if len(names) > 1 && strings.HasPrefix(names[1], "ON ") { + names = strings.SplitN(name, " ", 4) + name = strings.Join(names[0:3], " ") + } else { + name = names[0] + } + if kind != "ROW POLICY" { + name = strings.Trim(name, "`") + } + name = strings.TrimSpace(name) if name == "" { detectErr = fmt.Errorf("unable to detect RBAC object name from SQL query: %s", sql) return kind, name, detectErr diff --git a/pkg/backup/restore_test.go b/pkg/backup/restore_test.go index 18542082..00400b5f 100644 --- a/pkg/backup/restore_test.go +++ b/pkg/backup/restore_test.go @@ -34,9 +34,9 @@ func TestDetectRBACObject(t *testing.T) { expectedErr: nil, }, { - inputSQL: "ATTACH ROW POLICY test_rbac ON default.test_rbac AS restrictive FOR SELECT USING 1 = 1 TO ID('e1469fb8-e014-c22b-4e5c-406134320f91');\n", + inputSQL: "ATTACH ROW POLICY `test_rbac` ON default.test_rbac AS restrictive FOR SELECT USING 1 = 1 TO ID('e1469fb8-e014-c22b-4e5c-406134320f91');\n", expectedKind: "ROW POLICY", - expectedName: "test_rbac", + expectedName: "`test_rbac` ON default.test_rbac", expectedErr: nil, }, { @@ -64,10 +64,10 @@ func TestDetectRBACObject(t *testing.T) { expectedErr: fmt.Errorf("unable to detect RBAC object kind from SQL query: INVALID SQL"), }, { - inputSQL: "ATTACH USER ``", + inputSQL: "ATTACH USER ", expectedKind: "USER", expectedName: "", - expectedErr: fmt.Errorf("unable to detect RBAC object name from SQL query: ATTACH USER ``"), + expectedErr: fmt.Errorf("unable to detect RBAC object name from SQL query: ATTACH USER "), }, } From 8cfb2c60d7cb30d5da298b204fce5d1f681d5bbd Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 4 Apr 2024 06:40:22 +0400 Subject: [PATCH 61/80] fix failures for RBACBackupAlways --- pkg/backup/upload.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 8d9ee267..6b1489e7 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -444,7 +444,11 @@ func (b *Backuper) uploadBackupRelatedDir(ctx context.Context, localBackupRelate var localFiles []string var err error if localFiles, err = filepathx.Glob(localFilesGlobPattern); err != nil || localFiles == nil || len(localFiles) == 0 { - return 0, fmt.Errorf("list %s return list=%v with err=%v", localFilesGlobPattern, localFiles, err) + if !b.cfg.General.RBACBackupAlways { + return 0, fmt.Errorf("list %s return list=%v with err=%v", localFilesGlobPattern, localFiles, err) + } + b.log.Warnf("list %s return list=%v with err=%v", localFilesGlobPattern, localFiles, err) + return 0, nil } for i := 0; i < len(localFiles); i++ { From 4c6e822f3d751a5144c96df72386541ba6e540b2 Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 4 Apr 2024 06:49:13 +0400 Subject: [PATCH 62/80] setup golang-1.22 properly --- .github/workflows/build.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 90630ccc..9f047802 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -123,15 +123,15 @@ jobs: - '23.8' - '24.3' steps: + - name: Checkout project + uses: actions/checkout@v4 + - name: Setup golang id: setup-go uses: actions/setup-go@v5 with: go-version: '^${{ matrix.golang-version }}' - - name: Checkout project - uses: actions/checkout@v4 - - uses: actions/download-artifact@v4 with: name: build-test-artifacts From 3b0308583053907ea3162b84d44094c37373dcab Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 4 Apr 2024 18:43:35 +0400 Subject: [PATCH 63/80] fix TestRBAC remove SQL-based dropRBACExists approach for clickhouse-server prior 22.3 --- pkg/backup/restore.go | 8 ++++++-- test/integration/integration_test.go | 2 ++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index eaab4c7f..7d0e8f33 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -509,7 +509,7 @@ func (b *Backuper) resolveRBACConflictIfExist(ctx context.Context, sql string, a func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, accessPath string, version int, k *keeper.Keeper, replicatedUserDirectories []clickhouse.UserDirectory) (bool, string, string) { //search in sql system.users, system.quotas, system.row_policies, system.roles, system.settings_profiles - if version > 20005000 { + if version > 22003000 { var rbacSystemTableNames = map[string]string{ "ROLE": "roles", "ROW POLICY": "row_policies", @@ -752,7 +752,11 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat log.Warnf("stat: %s error: %v", srcBackupDir, err) return err } - + existsFiles, _ := os.ReadDir(destinationDir) + for _, existsF := range existsFiles { + existsI, _ := existsF.Info() + log.Debugf("%s %v %v", path.Join(destinationDir, existsF.Name()), existsI.Size(), existsI.ModTime()) + } if !info.IsDir() { return fmt.Errorf("%s is not a dir", srcBackupDir) } diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index ac4c83e5..6ccf349d 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -492,6 +492,8 @@ func TestRBAC(t *testing.T) { log.Info("create conflicted RBAC objects") creatAllRBAC(true) + r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + log.Info("download+restore RBAC") r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup download test_rbac_backup")) From 95b0aec55f58c29744a0cb10863040f7eb0b986d Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 5 Apr 2024 08:09:20 +0400 Subject: [PATCH 64/80] fix TestServerAPI --- pkg/server/server.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/server/server.go b/pkg/server/server.go index 7445c45c..6708d4e6 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -782,16 +782,17 @@ func (api *APIServer) httpListHandler(w http.ResponseWriter, r *http.Request) { } description += b.Tags } + fullSize := b.GetFullSize() backupsJSON = append(backupsJSON, backupJSON{ Name: b.BackupName, Created: b.CreationDate.Format(common.TimeFormat), - Size: b.DataSize + b.MetadataSize, + Size: fullSize, Location: "remote", RequiredBackup: b.RequiredBackup, Desc: description, }) if i == len(remoteBackups)-1 { - api.metrics.LastBackupSizeRemote.Set(float64(b.DataSize + b.MetadataSize + b.ConfigSize + b.RBACSize)) + api.metrics.LastBackupSizeRemote.Set(float64(fullSize)) } } api.metrics.NumberBackupsRemoteBroken.Set(float64(brokenBackups)) @@ -1432,7 +1433,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e } } lastBackup := remoteBackups[numberBackupsRemote-1] - lastSizeRemote = lastBackup.DataSize + lastBackup.MetadataSize + lastBackup.ConfigSize + lastBackup.RBACSize + lastSizeRemote = lastBackup.GetFullSize() lastBackupCreateRemote = &lastBackup.CreationDate lastBackupUpload = &lastBackup.UploadDate api.metrics.LastBackupSizeRemote.Set(float64(lastSizeRemote)) From 578e576c53fc69667545c39169d38544e6458212 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 5 Apr 2024 08:12:07 +0400 Subject: [PATCH 65/80] fix TestServerAPI --- pkg/storage/general.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/storage/general.go b/pkg/storage/general.go index d37163ac..505c88ca 100644 --- a/pkg/storage/general.go +++ b/pkg/storage/general.go @@ -47,6 +47,10 @@ type Backup struct { UploadDate time.Time `json:"upload_date"` } +func (b *Backup) GetFullSize() uint64 { + return b.DataSize + b.MetadataSize + b.ConfigSize + b.RBACSize +} + type BackupDestination struct { RemoteStorage Log *apexLog.Entry From a51f892c015d37bcbb6cad4692a54018889b9d70 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 5 Apr 2024 09:37:11 +0400 Subject: [PATCH 66/80] fix testflows --- .../clickhouse_backup/tests/snapshots/cli.py.cli.snapshot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot index 4d15c6c0..8ddf1f34 100644 --- a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot +++ b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot @@ -1,4 +1,4 @@ -default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' embedded_backup_threads: 0\', \' embedded_restore_threads: 0\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' check_sum_algorithm: ""\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' embedded_access_key: ""\', \' embedded_secret_key: ""\', \' skip_credentials: false\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \' chunk_size: 0\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 4h\', \' debug: false\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" +default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' rbac_backup_always: true\', \' rbac_conflict_resolution: recreate\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' embedded_backup_threads: 0\', \' embedded_restore_threads: 0\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' check_sum_algorithm: ""\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' embedded_access_key: ""\', \' embedded_secret_key: ""\', \' skip_credentials: false\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \' chunk_size: 0\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 4h\', \' debug: false\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" help_flag = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.
] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --environment-override value, --env value override any environment variable via CLI parameter\n --help, -h show help\n --version, -v print the version'""" From 1cfc3a18301fcf368f011bbd22ad897adb0c336c Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 5 Apr 2024 10:33:09 +0400 Subject: [PATCH 67/80] fix TestIntegrationEmbedded fail --- pkg/backup/create.go | 15 +++++++++++---- pkg/backup/delete.go | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pkg/backup/create.go b/pkg/backup/create.go index b54608d8..31e5c6d6 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -126,8 +126,10 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, } partitionsIdMap, partitionsNameList := partition.ConvertPartitionsToIdsMapAndNamesList(ctx, b.ch, tables, nil, partitions) doBackupData := !schemaOnly && !rbacOnly && !configsOnly - backupRBACSize, backupConfigSize := b.createRBACAndConfigsIfNecessary(ctx, backupName, createRBAC, rbacOnly, createConfigs, configsOnly, disks, diskMap, log) - + backupRBACSize, backupConfigSize, rbacAndConfigsErr := b.createRBACAndConfigsIfNecessary(ctx, backupName, createRBAC, rbacOnly, createConfigs, configsOnly, disks, diskMap, log) + if rbacAndConfigsErr != nil { + return rbacAndConfigsErr + } if b.cfg.ClickHouse.UseEmbeddedBackupRestore { err = b.createBackupEmbedded(ctx, backupName, diffFromRemote, doBackupData, schemaOnly, version, tablePattern, partitionsNameList, partitionsIdMap, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, backupRBACSize, backupConfigSize, log, startBackup) } else { @@ -154,7 +156,7 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, return nil } -func (b *Backuper) createRBACAndConfigsIfNecessary(ctx context.Context, backupName string, createRBAC bool, rbacOnly bool, createConfigs bool, configsOnly bool, disks []clickhouse.Disk, diskMap map[string]string, log *apexLog.Entry) (uint64, uint64) { +func (b *Backuper) createRBACAndConfigsIfNecessary(ctx context.Context, backupName string, createRBAC bool, rbacOnly bool, createConfigs bool, configsOnly bool, disks []clickhouse.Disk, diskMap map[string]string, log *apexLog.Entry) (uint64, uint64, error) { backupRBACSize, backupConfigSize := uint64(0), uint64(0) backupPath := path.Join(b.DefaultDataPath, "backup") if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { @@ -177,7 +179,12 @@ func (b *Backuper) createRBACAndConfigsIfNecessary(ctx context.Context, backupNa log.WithField("size", utils.FormatBytes(backupConfigSize)).Info("done createBackupConfigs") } } - return backupRBACSize, backupConfigSize + if backupRBACSize > 0 || backupConfigSize > 0 { + if chownErr := filesystemhelper.Chown(backupPath, b.ch, disks, true); chownErr != nil { + return backupRBACSize, backupConfigSize, chownErr + } + } + return backupRBACSize, backupConfigSize, nil } func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRemote string, doBackupData, schemaOnly, rbacOnly, configsOnly bool, backupVersion string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, tablePattern string, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, backupRBACSize, backupConfigSize uint64, log *apexLog.Entry, startBackup time.Time) error { diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index b2fcfcee..7493285d 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -223,7 +223,7 @@ func (b *Backuper) cleanLocalEmbedded(ctx context.Context, backup LocalBackup, d if err != nil { return err } - if !info.IsDir() && !strings.HasSuffix(filePath, ".json") { + if !info.IsDir() && !strings.HasSuffix(filePath, ".json") && !strings.HasPrefix(filePath, path.Join(backupPath, "access")) { apexLog.Debugf("object_disk.ReadMetadataFromFile(%s)", filePath) meta, err := object_disk.ReadMetadataFromFile(filePath) if err != nil { From c01d867336d1d8c9e71205a8411d520d6e4097f8 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 5 Apr 2024 14:04:15 +0400 Subject: [PATCH 68/80] fix TestIntegrationEmbedded fail --- ChangeLog.md | 4 +- ReadMe.md | 7 +- pkg/backup/download.go | 22 +++--- pkg/backup/upload.go | 8 +- pkg/config/config.go | 56 +++++++------- pkg/progressbar/progressbar.go | 57 -------------- pkg/storage/general.go | 128 +++++++++++++------------------ test/integration/config-ftp.yaml | 2 + 8 files changed, 105 insertions(+), 179 deletions(-) delete mode 100644 pkg/progressbar/progressbar.go diff --git a/ChangeLog.md b/ChangeLog.md index da52f065..da52246a 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -10,7 +10,9 @@ IMPROVEMENTS - added `GCS_CHUNK_SIZE` config parameter, try to speedup GCS upload fix [874](https://github.com/Altinity/clickhouse-backup/pull/874), thanks @dermasmid - added `--remote-backup` cli parameter to `tables` command and `GET /backup/table`, fix [778](https://github.com/Altinity/clickhouse-backup/issues/778) - added `rbac_always_backup: true` option to default config, will create backup for RBAC objects automatically, restore still require `--rbac` to avoid destructive actions, fix [793](https://github.com/Altinity/clickhouse-backup/issues/793) -- added `rbac_conflict_resolution: recreate` option for RBAC object name conflicts during restore, fix [851](https://github.com/Altinity/clickhouse-backup/issues/851) +- added `rbac_conflict_resolution: recreate` option for RBAC object name conflicts during restore, fix [851](https://github.com/Altinity/clickhouse-backup/issues/851) +- added `upload_max_bytes_per_seconds` and `download_max_bytes_per_seconds` config options to allow throttling without CAP_SYS_NICE, fix [817](https://github.com/Altinity/clickhouse-backup/issues/817) +- removed `disable_progress_bar` config option and related progress bar code - switched to golang 1.22 - added `clickhouse/clickhouse-server:24.3` to CI/CD diff --git a/ReadMe.md b/ReadMe.md index 90611616..f20dfaaa 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -379,7 +379,6 @@ Use `clickhouse-backup print-config` to print current config. general: remote_storage: none # REMOTE_STORAGE, choice from: `azblob`,`gcs`,`s3`, etc; if `none` then `upload` and `download` commands will fail. max_file_size: 1073741824 # MAX_FILE_SIZE, 1G by default, useless when upload_by_part is true, use to split data parts files by archives - disable_progress_bar: true # DISABLE_PROGRESS_BAR, show progress bar during upload and download, makes sense only when `upload_concurrency` and `download_concurrency` is 1 backups_to_keep_local: 0 # BACKUPS_TO_KEEP_LOCAL, how many latest local backup should be kept, 0 means all created backups will be stored on local disk # -1 means backup will keep after `create` but will delete after `create_remote` command # You can run `clickhouse-backup delete local ` command to remove temporary backup files from the local disk @@ -391,7 +390,11 @@ general: # For example, 4 means max 4 parallel tables and 4 parallel parts inside one table, so equals 16 concurrent streams download_concurrency: 1 # DOWNLOAD_CONCURRENCY, max 255, by default, the value is round(sqrt(AVAILABLE_CPU_CORES / 2)) upload_concurrency: 1 # UPLOAD_CONCURRENCY, max 255, by default, the value is round(sqrt(AVAILABLE_CPU_CORES / 2)) - + + # Throttling speed for upload and download, calculates on part level, not the socket level, it means short period for high traffic values and then time to sleep + download_max_bytes_per_second: 0 # DOWNLOAD_MAX_BYTES_PER_SECOND, 0 means no throttling + upload_max_bytes_per_second: 0 # UPLOAD_MAX_BYTES_PER_SECOND, 0 means no throttling + # RESTORE_SCHEMA_ON_CLUSTER, execute all schema related SQL queries with `ON CLUSTER` clause as Distributed DDL. # Check `system.clusters` table for the correct cluster name, also `system.macros` can be used. # This isn't applicable when `use_embedded_backup_restore: true` diff --git a/pkg/backup/download.go b/pkg/backup/download.go index babba1b0..c60d3f89 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -58,7 +58,7 @@ func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err = retry.RunCtx(ctx, func(ctx context.Context) error { - return bd.DownloadCompressedStream(ctx, backupName, path.Join(b.DefaultDataPath, "backup", backupName)) + return bd.DownloadCompressedStream(ctx, backupName, path.Join(b.DefaultDataPath, "backup", backupName), b.cfg.General.DownloadMaxBytesPerSecond) }) if err != nil { return err @@ -230,7 +230,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ idx := i dataGroup.Go(func() error { start := time.Now() - if err := b.downloadTableData(dataCtx, remoteBackup.BackupMetadata, *tableMetadataAfterDownload[idx], disks); err != nil { + if err := b.downloadTableData(dataCtx, remoteBackup.BackupMetadata, *tableMetadataAfterDownload[idx]); err != nil { return err } log. @@ -589,7 +589,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st } } if remoteBackup.DataFormat == DirectoryFormat { - if err := b.dst.DownloadPath(ctx, 0, remoteSource, localDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration); err != nil { + if err := b.dst.DownloadPath(ctx, remoteSource, localDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.DownloadMaxBytesPerSecond); err != nil { //SFTP can't walk on non exists paths and return error if !strings.Contains(err.Error(), "not exist") { return 0, err @@ -623,7 +623,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st } retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err = retry.RunCtx(ctx, func(ctx context.Context) error { - return b.dst.DownloadCompressedStream(ctx, remoteSource, localDir) + return b.dst.DownloadCompressedStream(ctx, remoteSource, localDir, b.cfg.General.DownloadMaxBytesPerSecond) }) if err != nil { return 0, err @@ -634,7 +634,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st return uint64(remoteFileInfo.Size()), nil } -func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata, disks []clickhouse.Disk) error { +func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata) error { log := b.log.WithField("logger", "downloadTableData") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) ctx, cancel := context.WithCancel(ctx) @@ -671,7 +671,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err := retry.RunCtx(dataCtx, func(dataCtx context.Context) error { - return b.dst.DownloadCompressedStream(dataCtx, tableRemoteFile, tableLocalDir) + return b.dst.DownloadCompressedStream(dataCtx, tableRemoteFile, tableLocalDir, b.cfg.General.DownloadMaxBytesPerSecond) }) if err != nil { return err @@ -715,7 +715,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. if b.resume && b.resumableState.IsAlreadyProcessedBool(partRemotePath) { return nil } - if err := b.dst.DownloadPath(dataCtx, 0, partRemotePath, partLocalPath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration); err != nil { + if err := b.dst.DownloadPath(dataCtx, partRemotePath, partLocalPath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.DownloadMaxBytesPerSecond); err != nil { return err } if b.resume { @@ -732,7 +732,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } if !b.isEmbedded && remoteBackup.RequiredBackup != "" { - err := b.downloadDiffParts(ctx, remoteBackup, table, dbAndTableDir, disks) + err := b.downloadDiffParts(ctx, remoteBackup, table, dbAndTableDir) if err != nil { return err } @@ -741,7 +741,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. return nil } -func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata, dbAndTableDir string, disks []clickhouse.Disk) error { +func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata, dbAndTableDir string) error { log := b.log.WithField("operation", "downloadDiffParts") log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Debug("start") start := time.Now() @@ -869,7 +869,7 @@ func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLo if path.Ext(tableRemoteFile) != "" { retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err := retry.RunCtx(ctx, func(ctx context.Context) error { - return b.dst.DownloadCompressedStream(ctx, tableRemoteFile, tableLocalDir) + return b.dst.DownloadCompressedStream(ctx, tableRemoteFile, tableLocalDir, b.cfg.General.DownloadMaxBytesPerSecond) }) if err != nil { log.Warnf("DownloadCompressedStream %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) @@ -877,7 +877,7 @@ func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLo } } else { // remoteFile could be a directory - if err := b.dst.DownloadPath(ctx, 0, tableRemoteFile, tableLocalDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration); err != nil { + if err := b.dst.DownloadPath(ctx, tableRemoteFile, tableLocalDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.DownloadMaxBytesPerSecond); err != nil { log.Warnf("DownloadPath %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) return err } diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 6b1489e7..183ea78d 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -461,7 +461,7 @@ func (b *Backuper) uploadBackupRelatedDir(ctx context.Context, localBackupRelate } if b.cfg.GetCompressionFormat() == "none" { remoteUploadedBytes := int64(0) - if remoteUploadedBytes, err = b.dst.UploadPath(ctx, 0, localBackupRelatedDir, localFiles, destinationRemote, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration); err != nil { + if remoteUploadedBytes, err = b.dst.UploadPath(ctx, localBackupRelatedDir, localFiles, destinationRemote, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.UploadMaxBytesPerSecond); err != nil { return 0, fmt.Errorf("can't RBAC or config upload %s: %v", destinationRemote, err) } if b.resume { @@ -471,7 +471,7 @@ func (b *Backuper) uploadBackupRelatedDir(ctx context.Context, localBackupRelate } retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err = retry.RunCtx(ctx, func(ctx context.Context) error { - return b.dst.UploadCompressedStream(ctx, localBackupRelatedDir, localFiles, destinationRemote) + return b.dst.UploadCompressedStream(ctx, localBackupRelatedDir, localFiles, destinationRemote, b.cfg.General.UploadMaxBytesPerSecond) }) if err != nil { return 0, fmt.Errorf("can't RBAC or config upload compressed %s: %v", destinationRemote, err) @@ -542,7 +542,7 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet } } log.Debugf("start upload %d files to %s", len(partFiles), remotePath) - if uploadPathBytes, err := b.dst.UploadPath(ctx, 0, backupPath, partFiles, remotePath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration); err != nil { + if uploadPathBytes, err := b.dst.UploadPath(ctx, backupPath, partFiles, remotePath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.UploadMaxBytesPerSecond); err != nil { log.Errorf("UploadPath return error: %v", err) return fmt.Errorf("can't upload: %v", err) } else { @@ -576,7 +576,7 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet log.Debugf("start upload %d files to %s", len(localFiles), remoteDataFile) retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err := retry.RunCtx(ctx, func(ctx context.Context) error { - return b.dst.UploadCompressedStream(ctx, backupPath, localFiles, remoteDataFile) + return b.dst.UploadCompressedStream(ctx, backupPath, localFiles, remoteDataFile, b.cfg.General.UploadMaxBytesPerSecond) }) if err != nil { log.Errorf("UploadCompressedStream return error: %v", err) diff --git a/pkg/config/config.go b/pkg/config/config.go index 0e4a9088..89883654 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -38,33 +38,34 @@ type Config struct { // GeneralConfig - general setting section type GeneralConfig struct { - RemoteStorage string `yaml:"remote_storage" envconfig:"REMOTE_STORAGE"` - MaxFileSize int64 `yaml:"max_file_size" envconfig:"MAX_FILE_SIZE"` - DisableProgressBar bool `yaml:"disable_progress_bar" envconfig:"DISABLE_PROGRESS_BAR"` - BackupsToKeepLocal int `yaml:"backups_to_keep_local" envconfig:"BACKUPS_TO_KEEP_LOCAL"` - BackupsToKeepRemote int `yaml:"backups_to_keep_remote" envconfig:"BACKUPS_TO_KEEP_REMOTE"` - LogLevel string `yaml:"log_level" envconfig:"LOG_LEVEL"` - AllowEmptyBackups bool `yaml:"allow_empty_backups" envconfig:"ALLOW_EMPTY_BACKUPS"` - DownloadConcurrency uint8 `yaml:"download_concurrency" envconfig:"DOWNLOAD_CONCURRENCY"` - UploadConcurrency uint8 `yaml:"upload_concurrency" envconfig:"UPLOAD_CONCURRENCY"` - UseResumableState bool `yaml:"use_resumable_state" envconfig:"USE_RESUMABLE_STATE"` - RestoreSchemaOnCluster string `yaml:"restore_schema_on_cluster" envconfig:"RESTORE_SCHEMA_ON_CLUSTER"` - UploadByPart bool `yaml:"upload_by_part" envconfig:"UPLOAD_BY_PART"` - DownloadByPart bool `yaml:"download_by_part" envconfig:"DOWNLOAD_BY_PART"` - RestoreDatabaseMapping map[string]string `yaml:"restore_database_mapping" envconfig:"RESTORE_DATABASE_MAPPING"` - RetriesOnFailure int `yaml:"retries_on_failure" envconfig:"RETRIES_ON_FAILURE"` - RetriesPause string `yaml:"retries_pause" envconfig:"RETRIES_PAUSE"` - WatchInterval string `yaml:"watch_interval" envconfig:"WATCH_INTERVAL"` - FullInterval string `yaml:"full_interval" envconfig:"FULL_INTERVAL"` - WatchBackupNameTemplate string `yaml:"watch_backup_name_template" envconfig:"WATCH_BACKUP_NAME_TEMPLATE"` - ShardedOperationMode string `yaml:"sharded_operation_mode" envconfig:"SHARDED_OPERATION_MODE"` - CPUNicePriority int `yaml:"cpu_nice_priority" envconfig:"CPU_NICE_PRIORITY"` - IONicePriority string `yaml:"io_nice_priority" envconfig:"IO_NICE_PRIORITY"` - RBACBackupAlways bool `yaml:"rbac_backup_always" envconfig:"RBAC_BACKUP_ALWAYS"` - RBACConflictResolution string `yaml:"rbac_conflict_resolution" envconfig:"RBAC_CONFLICT_RESOLUTION"` - RetriesDuration time.Duration - WatchDuration time.Duration - FullDuration time.Duration + RemoteStorage string `yaml:"remote_storage" envconfig:"REMOTE_STORAGE"` + MaxFileSize int64 `yaml:"max_file_size" envconfig:"MAX_FILE_SIZE"` + BackupsToKeepLocal int `yaml:"backups_to_keep_local" envconfig:"BACKUPS_TO_KEEP_LOCAL"` + BackupsToKeepRemote int `yaml:"backups_to_keep_remote" envconfig:"BACKUPS_TO_KEEP_REMOTE"` + LogLevel string `yaml:"log_level" envconfig:"LOG_LEVEL"` + AllowEmptyBackups bool `yaml:"allow_empty_backups" envconfig:"ALLOW_EMPTY_BACKUPS"` + DownloadConcurrency uint8 `yaml:"download_concurrency" envconfig:"DOWNLOAD_CONCURRENCY"` + UploadConcurrency uint8 `yaml:"upload_concurrency" envconfig:"UPLOAD_CONCURRENCY"` + UploadMaxBytesPerSecond uint64 `yaml:"upload_max_bytes_per_second" envconfig:"UPLOAD_MAX_BYTES_PER_SECOND"` + DownloadMaxBytesPerSecond uint64 `yaml:"download_max_bytes_per_second" envconfig:"DOWNLOAD_MAX_BYTES_PER_SECOND"` + UseResumableState bool `yaml:"use_resumable_state" envconfig:"USE_RESUMABLE_STATE"` + RestoreSchemaOnCluster string `yaml:"restore_schema_on_cluster" envconfig:"RESTORE_SCHEMA_ON_CLUSTER"` + UploadByPart bool `yaml:"upload_by_part" envconfig:"UPLOAD_BY_PART"` + DownloadByPart bool `yaml:"download_by_part" envconfig:"DOWNLOAD_BY_PART"` + RestoreDatabaseMapping map[string]string `yaml:"restore_database_mapping" envconfig:"RESTORE_DATABASE_MAPPING"` + RetriesOnFailure int `yaml:"retries_on_failure" envconfig:"RETRIES_ON_FAILURE"` + RetriesPause string `yaml:"retries_pause" envconfig:"RETRIES_PAUSE"` + WatchInterval string `yaml:"watch_interval" envconfig:"WATCH_INTERVAL"` + FullInterval string `yaml:"full_interval" envconfig:"FULL_INTERVAL"` + WatchBackupNameTemplate string `yaml:"watch_backup_name_template" envconfig:"WATCH_BACKUP_NAME_TEMPLATE"` + ShardedOperationMode string `yaml:"sharded_operation_mode" envconfig:"SHARDED_OPERATION_MODE"` + CPUNicePriority int `yaml:"cpu_nice_priority" envconfig:"CPU_NICE_PRIORITY"` + IONicePriority string `yaml:"io_nice_priority" envconfig:"IO_NICE_PRIORITY"` + RBACBackupAlways bool `yaml:"rbac_backup_always" envconfig:"RBAC_BACKUP_ALWAYS"` + RBACConflictResolution string `yaml:"rbac_conflict_resolution" envconfig:"RBAC_CONFLICT_RESOLUTION"` + RetriesDuration time.Duration + WatchDuration time.Duration + FullDuration time.Duration } // GCSConfig - GCS settings section @@ -512,7 +513,6 @@ func DefaultConfig() *Config { BackupsToKeepLocal: 0, BackupsToKeepRemote: 0, LogLevel: "info", - DisableProgressBar: true, UploadConcurrency: uploadConcurrency, DownloadConcurrency: downloadConcurrency, RestoreSchemaOnCluster: "", diff --git a/pkg/progressbar/progressbar.go b/pkg/progressbar/progressbar.go deleted file mode 100644 index 696aabbe..00000000 --- a/pkg/progressbar/progressbar.go +++ /dev/null @@ -1,57 +0,0 @@ -package progressbar - -import ( - "fmt" - "io" - - progressbar "gopkg.in/cheggaaa/pb.v1" -) - -type Bar struct { - pb *progressbar.ProgressBar - show bool -} - -func StartNewByteBar(show bool, total int64) *Bar { - if show { - return &Bar{ - show: true, - pb: progressbar.StartNew(int(total)).SetUnits(progressbar.U_BYTES), - } - } - return &Bar{ - show: false, - } -} - -func (b *Bar) Finish() { - if b.show { - b.pb.Finish() - fmt.Print("\033[A") // move the cursor up - } -} - -func (b *Bar) Add64(add int64) { - if b.show { - b.pb.Add64(add) - } -} - -func (b *Bar) Set(current int) { - if b.show { - b.pb.Set(current) - } -} - -func (b *Bar) Increment() { - if b.show { - b.pb.Increment() - } -} - -func (b *Bar) NewProxyReader(r io.Reader) io.Reader { - if b.show { - return b.pb.NewProxyReader(r) - } - return r -} diff --git a/pkg/storage/general.go b/pkg/storage/general.go index 505c88ca..f0062875 100644 --- a/pkg/storage/general.go +++ b/pkg/storage/general.go @@ -4,11 +4,9 @@ import ( "archive/tar" "context" "encoding/json" - "errors" "fmt" "github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse" "github.com/Altinity/clickhouse-backup/v2/pkg/config" - "github.com/Altinity/clickhouse-backup/v2/pkg/progressbar" "github.com/eapache/go-resiliency/retrier" "io" "os" @@ -53,10 +51,9 @@ func (b *Backup) GetFullSize() uint64 { type BackupDestination struct { RemoteStorage - Log *apexLog.Entry - compressionFormat string - compressionLevel int - disableProgressBar bool + Log *apexLog.Entry + compressionFormat string + compressionLevel int } var metadataCacheLock sync.RWMutex @@ -297,17 +294,16 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, return result, nil } -func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remotePath string, localPath string) error { +func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remotePath string, localPath string, maxSpeed uint64) error { if err := os.MkdirAll(localPath, 0750); err != nil { return err } // get this first as GetFileReader blocks the ftp control channel - file, err := bd.StatFile(ctx, remotePath) + remoteFileInfo, err := bd.StatFile(ctx, remotePath) if err != nil { return err } - filesize := file.Size() - + startTime := time.Now() reader, err := bd.GetFileReaderWithLocalPath(ctx, remotePath, localPath) if err != nil { return err @@ -325,11 +321,8 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot } }() - bar := progressbar.StartNewByteBar(!bd.disableProgressBar, filesize) buf := buffer.New(BufferSize) - defer bar.Finish() bufReader := nio.NewReader(reader, buf) - proxyReader := bar.NewProxyReader(bufReader) compressionFormat := bd.compressionFormat if !checkArchiveExtension(path.Ext(remotePath), compressionFormat) { bd.Log.Warnf("remote file backup extension %s not equal with %s", remotePath, compressionFormat) @@ -339,7 +332,7 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot if err != nil { return err } - if err := z.Extract(ctx, proxyReader, nil, func(ctx context.Context, file archiver.File) error { + if err := z.Extract(ctx, bufReader, nil, func(ctx context.Context, file archiver.File) error { f, err := file.Open() if err != nil { return fmt.Errorf("can't open %s", file.NameInArchive) @@ -378,15 +371,11 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot }); err != nil { return err } + bd.throttleSpeed(startTime, remoteFileInfo.Size(), maxSpeed) return nil } -func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLocalPath string, files []string, remotePath string) error { - if _, err := bd.StatFile(ctx, remotePath); err != nil { - if !errors.Is(err, ErrNotFound) && !os.IsNotExist(err) { - return err - } - } +func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLocalPath string, files []string, remotePath string, maxSpeed uint64) error { var totalBytes int64 for _, filename := range files { fInfo, err := os.Stat(path.Join(baseLocalPath, filename)) @@ -397,12 +386,10 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc totalBytes += fInfo.Size() } } - bar := progressbar.StartNewByteBar(!bd.disableProgressBar, totalBytes) - defer bar.Finish() pipeBuffer := buffer.New(BufferSize) body, w := nio.Pipe(pipeBuffer) g, ctx := errgroup.WithContext(ctx) - + startTime := time.Now() var writerErr, readerErr error g.Go(func() error { defer func() { @@ -430,7 +417,7 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc if !info.Mode().IsRegular() { continue } - bar.Add64(info.Size()) + file := archiver.File{ FileInfo: info, NameInArchive: f, @@ -461,24 +448,14 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc readerErr = bd.PutFile(ctx, remotePath, body) return readerErr }) - return g.Wait() + if waitErr := g.Wait(); waitErr != nil { + return waitErr + } + bd.throttleSpeed(startTime, totalBytes, maxSpeed) + return nil } -func (bd *BackupDestination) DownloadPath(ctx context.Context, size int64, remotePath string, localPath string, RetriesOnFailure int, RetriesDuration time.Duration) error { - var bar *progressbar.Bar - if !bd.disableProgressBar { - totalBytes := size - if size == 0 { - if err := bd.Walk(ctx, remotePath, true, func(ctx context.Context, f RemoteFile) error { - totalBytes += f.Size() - return nil - }); err != nil { - return err - } - } - bar = progressbar.StartNewByteBar(!bd.disableProgressBar, totalBytes) - defer bar.Finish() - } +func (bd *BackupDestination) DownloadPath(ctx context.Context, remotePath string, localPath string, RetriesOnFailure int, RetriesDuration time.Duration, maxSpeed uint64) error { log := bd.Log.WithFields(apexLog.Fields{ "path": remotePath, "operation": "download", @@ -489,6 +466,7 @@ func (bd *BackupDestination) DownloadPath(ctx context.Context, size int64, remot } retry := retrier.New(retrier.ConstantBackoff(RetriesOnFailure, RetriesDuration), nil) err := retry.RunCtx(ctx, func(ctx context.Context) error { + startTime := time.Now() r, err := bd.GetFileReader(ctx, path.Join(remotePath, f.Name())) if err != nil { log.Error(err.Error()) @@ -505,7 +483,7 @@ func (bd *BackupDestination) DownloadPath(ctx context.Context, size int64, remot log.Error(err.Error()) return err } - if _, err := io.CopyBuffer(dst, r, nil); err != nil { + if _, err := io.Copy(dst, r); err != nil { log.Error(err.Error()) return err } @@ -517,38 +495,33 @@ func (bd *BackupDestination) DownloadPath(ctx context.Context, size int64, remot log.Error(err.Error()) return err } + + if dstFileInfo, err := os.Stat(dstFilePath); err == nil { + bd.throttleSpeed(startTime, dstFileInfo.Size(), maxSpeed) + } else { + return err + } + return nil }) if err != nil { return err } - if !bd.disableProgressBar { - bar.Add64(f.Size()) - } return nil }) } -func (bd *BackupDestination) UploadPath(ctx context.Context, size int64, baseLocalPath string, files []string, remotePath string, RetriesOnFailure int, RetriesDuration time.Duration) (int64, error) { - var bar *progressbar.Bar - totalBytes := size - if size == 0 { - for _, filename := range files { - fInfo, err := os.Stat(filepath.Clean(path.Join(baseLocalPath, filename))) - if err != nil { - return 0, err - } - if fInfo.Mode().IsRegular() { - totalBytes += fInfo.Size() - } - } - } - if !bd.disableProgressBar { - bar = progressbar.StartNewByteBar(!bd.disableProgressBar, totalBytes) - defer bar.Finish() - } - +func (bd *BackupDestination) UploadPath(ctx context.Context, baseLocalPath string, files []string, remotePath string, RetriesOnFailure int, RetriesDuration time.Duration, maxSpeed uint64) (int64, error) { + totalBytes := int64(0) for _, filename := range files { + startTime := time.Now() + fInfo, err := os.Stat(filepath.Clean(path.Join(baseLocalPath, filename))) + if err != nil { + return 0, err + } + if fInfo.Mode().IsRegular() { + totalBytes += fInfo.Size() + } f, err := os.Open(filepath.Clean(path.Join(baseLocalPath, filename))) if err != nil { return 0, err @@ -566,19 +539,28 @@ func (bd *BackupDestination) UploadPath(ctx context.Context, size int64, baseLoc closeFile() return 0, err } - fi, err := f.Stat() - if err != nil { - return 0, err - } - if !bd.disableProgressBar { - bar.Add64(fi.Size()) - } closeFile() + bd.throttleSpeed(startTime, fInfo.Size(), maxSpeed) } return totalBytes, nil } +func (bd *BackupDestination) throttleSpeed(startTime time.Time, size int64, maxSpeed uint64) { + if maxSpeed > 0 && size > 0 { + timeSince := time.Since(startTime).Nanoseconds() + currentSpeed := uint64(size*1000000000) / uint64(timeSince) + if currentSpeed > maxSpeed { + + // Calculate how long to sleep to reduce the average speed to maxSpeed + excessSpeed := currentSpeed - maxSpeed + excessData := uint64(size) - (maxSpeed * uint64(timeSince) / 1000000000) + sleepTime := time.Duration((excessData*1000000000)/excessSpeed) * time.Nanosecond + time.Sleep(sleepTime) + } + } +} + func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhouse.ClickHouse, calcMaxSize bool, backupName string) (*BackupDestination, error) { log := apexLog.WithField("logger", "NewBackupDestination") var err error @@ -630,7 +612,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous log.WithField("logger", "azure"), cfg.AzureBlob.CompressionFormat, cfg.AzureBlob.CompressionLevel, - cfg.General.DisableProgressBar, }, nil case "s3": partSize := cfg.S3.PartSize @@ -675,7 +656,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous log.WithField("logger", "s3"), cfg.S3.CompressionFormat, cfg.S3.CompressionLevel, - cfg.General.DisableProgressBar, }, nil case "gcs": googleCloudStorage := &GCS{Config: &cfg.GCS} @@ -701,7 +681,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous log.WithField("logger", "gcs"), cfg.GCS.CompressionFormat, cfg.GCS.CompressionLevel, - cfg.General.DisableProgressBar, }, nil case "cos": tencentStorage := &COS{Config: &cfg.COS} @@ -714,7 +693,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous log.WithField("logger", "cos"), cfg.COS.CompressionFormat, cfg.COS.CompressionLevel, - cfg.General.DisableProgressBar, }, nil case "ftp": ftpStorage := &FTP{ @@ -730,7 +708,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous log.WithField("logger", "FTP"), cfg.FTP.CompressionFormat, cfg.FTP.CompressionLevel, - cfg.General.DisableProgressBar, }, nil case "sftp": sftpStorage := &SFTP{ @@ -745,7 +722,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous log.WithField("logger", "SFTP"), cfg.SFTP.CompressionFormat, cfg.SFTP.CompressionLevel, - cfg.General.DisableProgressBar, }, nil default: return nil, fmt.Errorf("NewBackupDestination error: storage type '%s' is not supported", cfg.General.RemoteStorage) diff --git a/test/integration/config-ftp.yaml b/test/integration/config-ftp.yaml index 5fe8029c..73c92461 100644 --- a/test/integration/config-ftp.yaml +++ b/test/integration/config-ftp.yaml @@ -2,6 +2,8 @@ general: disable_progress_bar: true remote_storage: ftp upload_concurrency: 4 + upload_max_bytes_per_second: 10240 + download_max_bytes_per_second: 10240 download_concurrency: 4 restore_schema_on_cluster: "{cluster}" clickhouse: From d0a153ddd80609cfbb8aa9892cf3a81ca6438dee Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 5 Apr 2024 14:13:36 +0400 Subject: [PATCH 69/80] remove progressbar related code --- go.mod | 9 +++------ go.sum | 17 +++++++---------- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index 172cba24..09129d8e 100644 --- a/go.mod +++ b/go.mod @@ -41,11 +41,10 @@ require ( github.com/urfave/cli v1.22.14 github.com/xyproto/gionice v1.3.0 github.com/yargevad/filepathx v1.0.0 - golang.org/x/crypto v0.18.0 + golang.org/x/crypto v0.21.0 golang.org/x/mod v0.14.0 golang.org/x/sync v0.6.0 google.golang.org/api v0.157.0 - gopkg.in/cheggaaa/pb.v1 v1.0.28 gopkg.in/yaml.v3 v3.0.1 ) @@ -101,7 +100,6 @@ require ( github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect github.com/mattn/go-ieproxy v0.0.11 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mozillazg/go-httpheader v0.4.0 // indirect github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect @@ -111,7 +109,6 @@ require ( github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.46.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/rivo/uniseg v0.4.4 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect @@ -124,9 +121,9 @@ require ( go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index 84061755..57af44eb 100644 --- a/go.sum +++ b/go.sum @@ -149,7 +149,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -290,16 +289,12 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-ieproxy v0.0.11 h1:MQ/5BuGSgDAHZOJe6YY80IF2UVCfGkwfo6AeD7HtHYo= github.com/mattn/go-ieproxy v0.0.11/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -345,9 +340,6 @@ github.com/puzpuzpuz/xsync v1.5.2 h1:yRAP4wqSOZG+/4pxJ08fPTwrfL0IzE/LKQ/cw509qGY github.com/puzpuzpuz/xsync v1.5.2/go.mod h1:K98BYhX3k1dQ2M63t1YNVDanbwUPmBCAhNmVrrxfiGg= github.com/ricochet2200/go-disk-usage/du v0.0.0-20210707232629-ac9918953285 h1:d54EL9l+XteliUfUCGsEwwuk65dmmxX85VXF+9T6+50= github.com/ricochet2200/go-disk-usage/du v0.0.0-20210707232629-ac9918953285/go.mod h1:fxIDly1xtudczrZeOOlfaUvd2OPb2qZAPuWdU2BsBTk= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= @@ -441,6 +433,8 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -497,6 +491,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -540,12 +536,15 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -662,8 +661,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= From 4473f3e62526509c82979e66cef5283a080692db Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 5 Apr 2024 16:03:15 +0400 Subject: [PATCH 70/80] - updated all third-party SDK to latest versions --- ChangeLog.md | 1 + go.mod | 112 +++++++++++++++++++++--------------------- go.sum | 136 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 193 insertions(+), 56 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index da52246a..e935300f 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -14,6 +14,7 @@ IMPROVEMENTS - added `upload_max_bytes_per_seconds` and `download_max_bytes_per_seconds` config options to allow throttling without CAP_SYS_NICE, fix [817](https://github.com/Altinity/clickhouse-backup/issues/817) - removed `disable_progress_bar` config option and related progress bar code - switched to golang 1.22 +- updated all third-party SDK to latest versions - added `clickhouse/clickhouse-server:24.3` to CI/CD BUG FIXES diff --git a/go.mod b/go.mod index 09129d8e..7b0c445c 100644 --- a/go.mod +++ b/go.mod @@ -1,24 +1,24 @@ module github.com/Altinity/clickhouse-backup/v2 require ( - cloud.google.com/go/storage v1.37.0 + cloud.google.com/go/storage v1.40.0 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/adal v0.9.23 - github.com/ClickHouse/clickhouse-go/v2 v2.17.1 + github.com/ClickHouse/clickhouse-go/v2 v2.23.0 github.com/antchfx/xmlquery v1.3.18 github.com/apex/log v1.9.0 - github.com/aws/aws-sdk-go-v2 v1.24.1 - github.com/aws/aws-sdk-go-v2/config v1.26.6 - github.com/aws/aws-sdk-go-v2/credentials v1.16.16 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 - github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 - github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 - github.com/aws/smithy-go v1.19.0 + github.com/aws/aws-sdk-go-v2 v1.26.1 + github.com/aws/aws-sdk-go-v2/config v1.27.10 + github.com/aws/aws-sdk-go-v2/credentials v1.17.10 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.14 + github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 + github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 + github.com/aws/smithy-go v1.20.2 github.com/djherbis/buffer v1.2.0 github.com/djherbis/nio/v3 v3.0.1 - github.com/eapache/go-resiliency v1.5.0 + github.com/eapache/go-resiliency v1.6.0 github.com/go-logfmt/logfmt v0.6.0 github.com/go-zookeeper/zk v1.0.3 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 @@ -27,58 +27,58 @@ require ( github.com/jlaffaye/ftp v0.2.0 github.com/jolestar/go-commons-pool/v2 v2.1.2 github.com/kelseyhightower/envconfig v1.4.0 - github.com/klauspost/compress v1.17.4 + github.com/klauspost/compress v1.17.7 github.com/mattn/go-shellwords v1.0.12 github.com/mholt/archiver/v4 v4.0.0-alpha.8 github.com/otiai10/copy v1.14.0 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 - github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/client_golang v1.19.0 github.com/puzpuzpuz/xsync v1.5.2 github.com/ricochet2200/go-disk-usage/du v0.0.0-20210707232629-ac9918953285 - github.com/stretchr/testify v1.8.4 - github.com/tencentyun/cos-go-sdk-v5 v0.7.45 + github.com/stretchr/testify v1.9.0 + github.com/tencentyun/cos-go-sdk-v5 v0.7.47 github.com/urfave/cli v1.22.14 github.com/xyproto/gionice v1.3.0 github.com/yargevad/filepathx v1.0.0 - golang.org/x/crypto v0.21.0 + golang.org/x/crypto v0.22.0 golang.org/x/mod v0.14.0 - golang.org/x/sync v0.6.0 - google.golang.org/api v0.157.0 + golang.org/x/sync v0.7.0 + google.golang.org/api v0.172.0 gopkg.in/yaml.v3 v3.0.1 ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go v0.112.2 // indirect + cloud.google.com/go/compute v1.25.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect + cloud.google.com/go/iam v1.1.7 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/ClickHouse/ch-go v0.61.1 // indirect + github.com/ClickHouse/ch-go v0.61.5 // indirect github.com/andybalholm/brotli v1.1.0 // indirect - github.com/antchfx/xpath v1.2.5 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect + github.com/antchfx/xpath v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bodgit/plumbing v1.3.0 // indirect - github.com/bodgit/sevenzip v1.4.5 // indirect + github.com/bodgit/sevenzip v1.5.0 // indirect github.com/bodgit/windows v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/clbanning/mxj v1.8.4 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -88,12 +88,12 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -103,34 +103,34 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mozillazg/go-httpheader v0.4.0 // indirect github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect - github.com/paulmach/orb v0.11.0 // indirect + github.com/paulmach/orb v0.11.1 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.46.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.52.2 // indirect + github.com/prometheus/procfs v0.13.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/therootcompany/xz v1.0.1 // indirect - github.com/ulikunitz/xz v0.5.11 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.22.0 // indirect - go.opentelemetry.io/otel/trace v1.22.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/oauth2 v0.19.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/grpc v1.61.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/grpc v1.63.0 // indirect google.golang.org/protobuf v1.33.0 // indirect ) diff --git a/go.sum b/go.sum index 57af44eb..29040bd2 100644 --- a/go.sum +++ b/go.sum @@ -9,21 +9,33 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= +cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= +cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= +cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= @@ -51,8 +63,13 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/ch-go v0.61.1 h1:j5rx3qnvcnYjhnP1IdXE/vdIRQiqgwAzyqOaasA6QCw= github.com/ClickHouse/ch-go v0.61.1/go.mod h1:myxt/JZgy2BYHFGQqzmaIpbfr5CMbs3YHVULaWQj5YU= +github.com/ClickHouse/ch-go v0.61.5 h1:zwR8QbYI0tsMiEcze/uIMK+Tz1D3XZXLdNrlaOpeEI4= +github.com/ClickHouse/ch-go v0.61.5/go.mod h1:s1LJW/F/LcFs5HJnuogFMta50kKDO0lf9zzfrbl0RQg= +github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= github.com/ClickHouse/clickhouse-go/v2 v2.17.1 h1:ZCmAYWpu75IyEi7+Yrs/uaAjiCGY5wfW5kXo64exkX4= github.com/ClickHouse/clickhouse-go/v2 v2.17.1/go.mod h1:rkGTvFDTLqLIm0ma+13xmcCfr/08Gvs7KmFt1tgiWHQ= +github.com/ClickHouse/clickhouse-go/v2 v2.23.0 h1:srmRrkS0BR8gEut87u8jpcZ7geOob6nGj9ifrb+aKmg= +github.com/ClickHouse/clickhouse-go/v2 v2.23.0/go.mod h1:tBhdF3f3RdP7sS59+oBAtTyhWpy0024ZxDMhgxra0QE= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= @@ -61,6 +78,8 @@ github.com/antchfx/xmlquery v1.3.18/go.mod h1:Afkq4JIeXut75taLSuI31ISJ/zeq+3jG7T github.com/antchfx/xpath v1.2.4/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antchfx/xpath v1.2.5 h1:hqZ+wtQ+KIOV/S3bGZcIhpgYC26um2bZYP2KVGcR7VY= github.com/antchfx/xpath v1.2.5/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antchfx/xpath v1.3.0 h1:nTMlzGAK3IJ0bPpME2urTuFL76o4A96iYvoKFHRXJgc= +github.com/antchfx/xpath v1.3.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= @@ -69,42 +88,80 @@ github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3st github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= +github.com/aws/aws-sdk-go-v2/config v1.27.10 h1:PS+65jThT0T/snC5WjyfHHyUgG+eBoupSDV+f838cro= +github.com/aws/aws-sdk-go-v2/config v1.27.10/go.mod h1:BePM7Vo4OBpHreKRUMuDXX+/+JWP38FLkzl5m27/Jjs= github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.10 h1:qDZ3EA2lv1KangvQB6y258OssCHD0xvaGiEDkG4X/10= +github.com/aws/aws-sdk-go-v2/credentials v1.17.10/go.mod h1:6t3sucOaYDwDssHQa0ojH1RpmVmF5/jArkye1b2FKMI= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 h1:2MUXyGW6dVaQz6aqycpbdLIH1NMcUI6kW6vQ0RabGYg= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15/go.mod h1:aHbhbR6WEQgHAiRj41EQ2W47yOYwNtIkWTXmcAtYqj8= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.14 h1:Nhcq+ODoD9FRQYI3lATy6iADS5maER3ZXSfE8v3FMh8= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.14/go.mod h1:VlBbwTpgCj3rKWMVkEAYiAR3FKs7Mi3jALTMGfbfuns= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 h1:5oE2WzJE56/mVveuDZPJESKlg/00AaS2pY2QZcnxg4M= github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10/go.mod h1:FHbKWQtRBYUz4vO5WBWjzMD2by126ny5y/1EoaWoLfI= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 h1:L0ai8WICYHozIKK+OtPzVJBugL7culcuM4E4JOpIEm8= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10/go.mod h1:byqfyxJBshFk0fF9YmK0M0ugIO8OWjzH2T3bPG4eGuA= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 h1:KOxnQeWy5sXyS37fdKEvAsGHOr9fa/qvwxfJurR/BzE= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10/go.mod h1:jMx5INQFYFYB3lQD9W0D8Ohgq6Wnl7NYOJ2TQndbulI= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 h1:5XNlsBsEvBZBMO6p82y+sqpWg8j5aBCe+5C2GBFgqBQ= github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1/go.mod h1:4qXHrG1Ne3VGIMZPCB8OjH/pLFO94sKABIusjh0KWPU= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.4 h1:WzFol5Cd+yDxPAdnzTA5LmpHYSWinhmSj4rQChV0ee8= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.4/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -112,11 +169,15 @@ github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs= github.com/bodgit/sevenzip v1.4.5 h1:HFJQ+nbjppfyf2xbQEJBbmVo+o2kTg1FXV4i7YOx87s= github.com/bodgit/sevenzip v1.4.5/go.mod h1:LAcAg/UQzyjzCQSGBPZFYzoiHMfT6Gk+3tMSjUk3foY= +github.com/bodgit/sevenzip v1.5.0 h1:QESwnPUnhqftOgbi6wIiWm1WEkrT4puHukt5a2psEcw= +github.com/bodgit/sevenzip v1.5.0/go.mod h1:+E74G6pfBX8IMaVybsKMgGTTTBcbHU8ssPTJ9mLUr38= github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -126,9 +187,12 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -143,12 +207,15 @@ github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj6 github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/eapache/go-resiliency v1.5.0 h1:dRsaR00whmQD+SgVKlq/vCRFNgtEb5yppyeVos3Yce0= github.com/eapache/go-resiliency v1.5.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= +github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -201,6 +268,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -242,6 +311,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -274,6 +345,8 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -317,6 +390,8 @@ github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/paulmach/orb v0.11.0 h1:JfVXJUBeH9ifc/OrhBY0lL16QsmPgpCHMlqSSYhcgAA= github.com/paulmach/orb v0.11.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= +github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -329,13 +404,21 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= +github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= +github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= +github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= github.com/puzpuzpuz/xsync v1.5.2 h1:yRAP4wqSOZG+/4pxJ08fPTwrfL0IzE/LKQ/cw509qGY= github.com/puzpuzpuz/xsync v1.5.2/go.mod h1:K98BYhX3k1dQ2M63t1YNVDanbwUPmBCAhNmVrrxfiGg= github.com/ricochet2200/go-disk-usage/du v0.0.0-20210707232629-ac9918953285 h1:d54EL9l+XteliUfUCGsEwwuk65dmmxX85VXF+9T6+50= @@ -367,10 +450,14 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0= github.com/tencentyun/cos-go-sdk-v5 v0.7.45 h1:5/ZGOv846tP6+2X7w//8QjLgH2KcUK+HciFbfjWquFU= github.com/tencentyun/cos-go-sdk-v5 v0.7.45/go.mod h1:DH9US8nB+AJXqwu/AMOrCFN1COv3dpytXuJWHgdg7kE= +github.com/tencentyun/cos-go-sdk-v5 v0.7.47 h1:uoS4Sob16qEYoapkqJq1D1Vnsy9ira9BfNUMtoFYTI4= +github.com/tencentyun/cos-go-sdk-v5 v0.7.47/go.mod h1:DH9US8nB+AJXqwu/AMOrCFN1COv3dpytXuJWHgdg7kE= github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -384,6 +471,8 @@ github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKw github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= @@ -406,16 +495,28 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -435,6 +536,8 @@ golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -493,6 +596,10 @@ golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -500,6 +607,10 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -511,6 +622,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -538,6 +651,8 @@ golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -545,6 +660,7 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -604,6 +720,10 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.157.0 h1:ORAeqmbrrozeyw5NjnMxh7peHO0UzV4wWYSwZeCUb20= google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g= +google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= +google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= +google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= +google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -627,10 +747,22 @@ google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda h1:b6F6WIV4xHHD0FA4oIyzU6mHWg2WI2X1RBehwa5QN38= +google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda/go.mod h1:AHcE/gZH76Bk/ROZhQphlRoWo5xKDEtz3eVEO1LfA8c= google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 h1:9IZDv+/GcI6u+a4jRFRLxQs0RUCfavGfoOgEW6jpkI0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -642,6 +774,10 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 667c28aadcc8ffd76d47fc5ad0bb4a2b7ea0110e Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 5 Apr 2024 16:46:09 +0400 Subject: [PATCH 71/80] fix testflows cli snapshot --- .../clickhouse_backup/tests/snapshots/cli.py.cli.snapshot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot index 8ddf1f34..dff1107b 100644 --- a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot +++ b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot @@ -1,4 +1,4 @@ -default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' rbac_backup_always: true\', \' rbac_conflict_resolution: recreate\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' embedded_backup_threads: 0\', \' embedded_restore_threads: 0\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' check_sum_algorithm: ""\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' embedded_access_key: ""\', \' embedded_secret_key: ""\', \' skip_credentials: false\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \' chunk_size: 0\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 4h\', \' debug: false\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" +default_config = r"""'[\'general:\', \' remote_storage: none\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' rbac_backup_always: true\', \' rbac_conflict_resolution: recreate\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' embedded_backup_threads: 0\', \' embedded_restore_threads: 0\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' check_sum_algorithm: ""\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' embedded_access_key: ""\', \' embedded_secret_key: ""\', \' skip_credentials: false\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \' chunk_size: 0\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 4h\', \' debug: false\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" help_flag = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.
] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --environment-override value, --env value override any environment variable via CLI parameter\n --help, -h show help\n --version, -v print the version'""" From ab3dd36d7e94f605e756387f0ee9eda7c0302c4d Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 5 Apr 2024 20:11:00 +0400 Subject: [PATCH 72/80] fixed connection to clickhouse-server behavior when long clickhouse-server startup time and `docker-entrypoint.d` processing, will infinite reconnect each 5 seconds, until success, fix https://github.com/Altinity/clickhouse-backup/issues/857 --- ChangeLog.md | 1 + pkg/clickhouse/clickhouse.go | 35 +++++++++++++++++++++-------------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index e935300f..ca12babb 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -26,6 +26,7 @@ BUG FIXES - apply `CLICKHOUSE_SKIP_TABLES_ENGINES` during `create` command - fixed behavior for upload / download when .inner. table missing for MATERIALIZED VIEW by table pattern, fix [765](https://github.com/Altinity/clickhouse-backup/issues/765) - fixed `ObjectDisks` + `CLICKHOUSE_USE_EMBEDDED_BACKUP_RESTORE: true` - shall skip upload object disk content, fix [799](https://github.com/Altinity/clickhouse-backup/issues/799) +- fixed connection to clickhouse-server behavior when long clickhouse-server startup time and `docker-entrypoint.d` processing, will infinite reconnect each 5 seconds, until success, fix [857](https://github.com/Altinity/clickhouse-backup/issues/857) # v2.4.35 IMPROVEMENTS diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index f1274442..bdfb97b3 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -104,25 +104,32 @@ func (ch *ClickHouse) Connect() error { opt.Settings["log_queries"] = 0 } - if ch.conn, err = clickhouse.Open(opt); err != nil { - ch.Log.Errorf("clickhouse connection: %s, sql.Open return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) - return err - } - logFunc := ch.Log.Infof if !ch.Config.LogSQLQueries { logFunc = ch.Log.Debugf } - logFunc("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) - err = ch.conn.Ping(context.Background()) - if err != nil { - ch.Log.Errorf("clickhouse connection ping: %s return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) - return err - } else { - ch.IsOpen = true + // infinite reconnect until success, fix https://github.com/Altinity/clickhouse-backup/issues/857 + for { + for { + ch.conn, err = clickhouse.Open(opt) + if err == nil { + break + } + ch.Log.Warnf("clickhouse connection: %s, sql.Open return error: %v, will wait 5 second to reconnect", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + time.Sleep(5 * time.Second) + } + logFunc("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) + err = ch.conn.Ping(context.Background()) + if err == nil { + logFunc("clickhouse connection success: %s", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) + ch.IsOpen = true + break + } + ch.Log.Warnf("clickhouse connection ping: %s return error: %v, will wait 5 second to reconnect", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + time.Sleep(5 * time.Second) } - logFunc("clickhouse connection open: %s", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) - return err + + return nil } // GetDisks - return data from system.disks table From 4feacc34a3e1e6e961544a7089661b4d2f484461 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 6 Apr 2024 12:14:23 +0400 Subject: [PATCH 73/80] fixed `USE_EMBEDDED_BACKUP_RESTORE=true` behavior to allow use backup disk with type `local`, fix https://github.com/Altinity/clickhouse-backup/issues/882, asd complete removed support for legacy backups, created with version prior v1.0 --- ChangeLog.md | 4 +- pkg/backup/backuper.go | 3 - pkg/backup/create.go | 1 - pkg/backup/delete.go | 2 +- pkg/backup/download.go | 39 ------ pkg/backup/download_test.go | 1 - pkg/backup/list.go | 20 +-- pkg/backup/restore.go | 56 ++++---- pkg/clickhouse/clickhouse.go | 27 +--- pkg/clickhouse/legacy.go | 94 ------------- pkg/clickhouse/structs.go | 2 +- pkg/filesystemhelper/filesystemhelper.go | 4 - pkg/metadata/metadata.go | 8 -- pkg/server/server.go | 6 - pkg/storage/general.go | 37 +---- pkg/storage/utils_test.go | 126 +++++++++--------- test/integration/config-s3-embedded-local.yml | 39 ++++++ test/integration/docker-compose.yml | 1 + test/integration/docker-compose_advanced.yml | 1 + test/integration/dynamic_settings.sh | 45 +++++-- test/integration/integration_test.go | 22 +-- 21 files changed, 196 insertions(+), 342 deletions(-) delete mode 100644 pkg/clickhouse/legacy.go create mode 100644 test/integration/config-s3-embedded-local.yml diff --git a/ChangeLog.md b/ChangeLog.md index ca12babb..6c93ffac 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,5 +1,7 @@ # v2.5.0 (not released yet) IMPROVEMENTS +- complete removed support for legacy backups, created with version prior v1.0 +- removed `disable_progress_bar` config option and related progress bar code - added `--delete-source` parameter for `upload` and `create_remote` commands to explicitly delete local backup during upload, fix [777](https://github.com/Altinity/clickhouse-backup/issues/777) - added support for `--env ENV_NAME=value` cli parameter for allow dynamically override any config parameter, fix [821](https://github.com/Altinity/clickhouse-backup/issues/821) - added support for `use_embedded_backup_restore: true` with empty `embedded_backup_disk` value, tested on S3/GCS over S3/AzureBlobStorage, fix [695](https://github.com/Altinity/clickhouse-backup/issues/695) @@ -12,7 +14,6 @@ IMPROVEMENTS - added `rbac_always_backup: true` option to default config, will create backup for RBAC objects automatically, restore still require `--rbac` to avoid destructive actions, fix [793](https://github.com/Altinity/clickhouse-backup/issues/793) - added `rbac_conflict_resolution: recreate` option for RBAC object name conflicts during restore, fix [851](https://github.com/Altinity/clickhouse-backup/issues/851) - added `upload_max_bytes_per_seconds` and `download_max_bytes_per_seconds` config options to allow throttling without CAP_SYS_NICE, fix [817](https://github.com/Altinity/clickhouse-backup/issues/817) -- removed `disable_progress_bar` config option and related progress bar code - switched to golang 1.22 - updated all third-party SDK to latest versions - added `clickhouse/clickhouse-server:24.3` to CI/CD @@ -27,6 +28,7 @@ BUG FIXES - fixed behavior for upload / download when .inner. table missing for MATERIALIZED VIEW by table pattern, fix [765](https://github.com/Altinity/clickhouse-backup/issues/765) - fixed `ObjectDisks` + `CLICKHOUSE_USE_EMBEDDED_BACKUP_RESTORE: true` - shall skip upload object disk content, fix [799](https://github.com/Altinity/clickhouse-backup/issues/799) - fixed connection to clickhouse-server behavior when long clickhouse-server startup time and `docker-entrypoint.d` processing, will infinite reconnect each 5 seconds, until success, fix [857](https://github.com/Altinity/clickhouse-backup/issues/857) +- fixed `USE_EMBEDDED_BACKUP_RESTORE=true` behavior to allow use backup disk with type `local`, fix [882](https://github.com/Altinity/clickhouse-backup/issues/882) # v2.4.35 IMPROVEMENTS diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index 06c9876e..926cecf0 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -339,9 +339,6 @@ func (b *Backuper) getTablesDiffFromRemote(ctx context.Context, diffFromRemote s var diffRemoteMetadata *metadata.BackupMetadata for _, backup := range backupList { if backup.BackupName == diffFromRemote { - if backup.Legacy { - return nil, fmt.Errorf("%s have legacy format and can't be used as diff-from-remote source", diffFromRemote) - } diffRemoteMetadata = &backup.BackupMetadata break } diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 31e5c6d6..ac711c65 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -45,7 +45,6 @@ var ( type LocalBackup struct { metadata.BackupMetadata - Legacy bool Broken string } diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index 7493285d..093d60c6 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -214,7 +214,7 @@ func (b *Backuper) hasObjectDisksLocal(backupList []LocalBackup, backupName stri func (b *Backuper) cleanLocalEmbedded(ctx context.Context, backup LocalBackup, disks []clickhouse.Disk) error { for _, disk := range disks { - if disk.Name == b.cfg.ClickHouse.EmbeddedBackupDisk { + if disk.Name == b.cfg.ClickHouse.EmbeddedBackupDisk && disk.Type != "local" { if err := object_disk.InitCredentialsAndConnections(ctx, b.ch, b.cfg, disk.Name); err != nil { return err } diff --git a/pkg/backup/download.go b/pkg/backup/download.go index c60d3f89..27f4d2c7 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -39,34 +39,6 @@ var ( ErrBackupIsAlreadyExists = errors.New("backup is already exists") ) -func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error { - log := b.log.WithFields(apexLog.Fields{ - "backup": backupName, - "operation": "download_legacy", - }) - bd, err := storage.NewBackupDestination(ctx, b.cfg, b.ch, true, "") - if err != nil { - return err - } - if err := bd.Connect(ctx); err != nil { - return err - } - defer func() { - if err := bd.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) - } - }() - retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) - err = retry.RunCtx(ctx, func(ctx context.Context) error { - return bd.DownloadCompressedStream(ctx, backupName, path.Join(b.DefaultDataPath, "backup", backupName), b.cfg.General.DownloadMaxBytesPerSecond) - }) - if err != nil { - return err - } - log.Info("done") - return nil -} - func (b *Backuper) Download(backupName string, tablePattern string, partitions []string, schemaOnly, resume bool, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { @@ -142,17 +114,6 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if !found { return fmt.Errorf("'%s' is not found on remote storage", backupName) } - //look https://github.com/Altinity/clickhouse-backup/discussions/266 need download legacy before check for empty backup - if remoteBackup.Legacy { - if tablePattern != "" { - return fmt.Errorf("'%s' is old format backup and doesn't supports download of specific tables", backupName) - } - if schemaOnly { - return fmt.Errorf("'%s' is old format backup and doesn't supports download of schema only", backupName) - } - log.Warnf("'%s' is old-format backup", backupName) - return b.legacyDownload(ctx, backupName) - } if len(remoteBackup.Tables) == 0 && !b.cfg.General.AllowEmptyBackups { return fmt.Errorf("'%s' is empty backup", backupName) } diff --git a/pkg/backup/download_test.go b/pkg/backup/download_test.go index 776e87d6..c36b8f9c 100644 --- a/pkg/backup/download_test.go +++ b/pkg/backup/download_test.go @@ -90,7 +90,6 @@ var remoteBackup = storage.Backup{ DataFormat: "tar", RequiredBackup: "", }, - Legacy: false, UploadDate: time.Now(), } diff --git a/pkg/backup/list.go b/pkg/backup/list.go index 5bf13d78..f1042ff6 100644 --- a/pkg/backup/list.go +++ b/pkg/backup/list.go @@ -61,9 +61,6 @@ func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) } description := backup.DataFormat uploadDate := backup.UploadDate.Format("02/01/2006 15:04:05") - if backup.Legacy { - description += ", old-format" - } if backup.Tags != "" { description += ", " + backup.Tags } @@ -119,9 +116,6 @@ func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBacku description += backup.Tags } creationDate := backup.CreationDate.Format("02/01/2006 15:04:05") - if backup.Legacy { - size = "???" - } required := "" if backup.RequiredBackup != "" { required = "+" + backup.RequiredBackup @@ -238,15 +232,10 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) } backupMetafilePath := path.Join(backupPath, name, "metadata.json") backupMetadataBody, err := os.ReadFile(backupMetafilePath) - if os.IsNotExist(err) { - // Legacy backup - result = append(result, LocalBackup{ - BackupMetadata: metadata.BackupMetadata{ - BackupName: name, - CreationDate: info.ModTime(), - }, - Legacy: true, - }) + if err != nil { + if !os.IsNotExist(err) { + return result, disks, err + } continue } var backupMetadata metadata.BackupMetadata @@ -255,7 +244,6 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) } result = append(result, LocalBackup{ BackupMetadata: backupMetadata, - Legacy: false, }) } if closeErr := d.Close(); closeErr != nil { diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 7d0e8f33..020477ac 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -102,12 +102,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par break } } - if os.IsNotExist(err) { // Legacy backups don't have metadata.json, but we need handle not exists local backup - backupPath := path.Join(b.DefaultDataPath, "backup", backupName) - if fInfo, fErr := os.Stat(backupPath); fErr != nil || !fInfo.IsDir() { - return fmt.Errorf("'%s' stat return %v, %v", backupPath, fInfo, fErr) - } - } else if err != nil { + if err != nil { return err } backupMetadata := metadata.BackupMetadata{} @@ -200,7 +195,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } } if schemaOnly || dropExists || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { - if err = b.RestoreSchema(ctx, backupName, tablesForRestore, ignoreDependencies, version); err != nil { + if err = b.RestoreSchema(ctx, backupName, backupMetadata, disks, tablesForRestore, ignoreDependencies, version); err != nil { return err } } @@ -812,7 +807,7 @@ func (b *Backuper) dropExistPartitions(ctx context.Context, tablesForRestore Lis } // RestoreSchema - restore schemas matched by tablePattern from backupName -func (b *Backuper) RestoreSchema(ctx context.Context, backupName string, tablesForRestore ListOfTables, ignoreDependencies bool, version int) error { +func (b *Backuper) RestoreSchema(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, disks []clickhouse.Disk, tablesForRestore ListOfTables, ignoreDependencies bool, version int) error { log := apexLog.WithFields(apexLog.Fields{ "backup": backupName, "operation": "restore_schema", @@ -823,7 +818,7 @@ func (b *Backuper) RestoreSchema(ctx context.Context, backupName string, tablesF } var restoreErr error if b.isEmbedded { - restoreErr = b.restoreSchemaEmbedded(ctx, backupName, tablesForRestore, version) + restoreErr = b.restoreSchemaEmbedded(ctx, backupName, backupMetadata, disks, tablesForRestore, version) } else { restoreErr = b.restoreSchemaRegular(tablesForRestore, version, log) } @@ -838,7 +833,7 @@ var UUIDWithMergeTreeRE = regexp.MustCompile(`^(.+)(UUID)(\s+)'([^']+)'(.+)({uui var emptyReplicatedMergeTreeRE = regexp.MustCompile(`(?m)Replicated(MergeTree|ReplacingMergeTree|SummingMergeTree|AggregatingMergeTree|CollapsingMergeTree|VersionedCollapsingMergeTree|GraphiteMergeTree)\s*\(([^']*)\)(.*)`) -func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, tablesForRestore ListOfTables, version int) error { +func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, disks []clickhouse.Disk, tablesForRestore ListOfTables, version int) error { var err error if tablesForRestore == nil || len(tablesForRestore) == 0 { if !b.cfg.General.AllowEmptyBackups { @@ -848,7 +843,7 @@ func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, return nil } if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { - err = b.fixEmbeddedMetadataLocal(ctx, backupName, version) + err = b.fixEmbeddedMetadataLocal(ctx, backupName, backupMetadata, disks, version) } else { err = b.fixEmbeddedMetadataRemote(ctx, backupName, version) } @@ -898,7 +893,7 @@ func (b *Backuper) fixEmbeddedMetadataRemote(ctx context.Context, backupName str return nil } -func (b *Backuper) fixEmbeddedMetadataLocal(ctx context.Context, backupName string, chVersion int) error { +func (b *Backuper) fixEmbeddedMetadataLocal(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, disks []clickhouse.Disk, chVersion int) error { metadataPath := path.Join(b.EmbeddedBackupDataPath, backupName, "metadata") if walkErr := filepath.Walk(metadataPath, func(filePath string, info fs.FileInfo, err error) error { if err != nil { @@ -907,6 +902,25 @@ func (b *Backuper) fixEmbeddedMetadataLocal(ctx context.Context, backupName stri if !strings.HasSuffix(filePath, ".sql") { return nil } + if backupMetadata.DiskTypes[b.cfg.ClickHouse.EmbeddedBackupDisk] == "local" { + sqlBytes, err := os.ReadFile(filePath) + if err != nil { + return err + } + sqlQuery, sqlMetadataChanged, fixSqlErr := b.fixEmbeddedMetadataSQLQuery(ctx, sqlBytes, filePath, chVersion) + if fixSqlErr != nil { + return fixSqlErr + } + if sqlMetadataChanged { + if err = os.WriteFile(filePath, []byte(sqlQuery), 0644); err != nil { + return err + } + if err = filesystemhelper.Chown(filePath, b.ch, disks, false); err != nil { + return err + } + } + return nil + } sqlMetadata, err := object_disk.ReadMetadataFromFile(filePath) if err != nil { return err @@ -1104,18 +1118,12 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci // RestoreData - restore data for tables matched by tablePattern from backupName func (b *Backuper) RestoreData(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, dataOnly bool, metadataPath, tablePattern string, partitions []string, disks []clickhouse.Disk) error { + var err error startRestoreData := time.Now() log := apexLog.WithFields(apexLog.Fields{ "backup": backupName, "operation": "restore_data", }) - if b.ch.IsClickhouseShadow(path.Join(b.DefaultDataPath, "backup", backupName, "shadow")) { - return fmt.Errorf("backups created in v0.0.1 is not supported now") - } - backup, _, err := b.getLocalBackup(ctx, backupName, disks) - if err != nil { - return fmt.Errorf("can't restore: %v", err) - } diskMap := make(map[string]string, len(disks)) diskTypes := make(map[string]string, len(disks)) @@ -1123,18 +1131,14 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, backupMet diskMap[disk.Name] = disk.Path diskTypes[disk.Name] = disk.Type } - for diskName := range backup.DiskTypes { + for diskName := range backupMetadata.DiskTypes { if _, exists := diskTypes[diskName]; !exists { - diskTypes[diskName] = backup.DiskTypes[diskName] + diskTypes[diskName] = backupMetadata.DiskTypes[diskName] } } var tablesForRestore ListOfTables var partitionsNameList map[metadata.TableTitle][]string - if backup.Legacy { - tablesForRestore, err = b.ch.GetBackupTablesLegacy(backupName, disks) - } else { - tablesForRestore, partitionsNameList, err = b.getTableListByPatternLocal(ctx, metadataPath, tablePattern, false, partitions) - } + tablesForRestore, partitionsNameList, err = b.getTableListByPatternLocal(ctx, metadataPath, tablePattern, false, partitions) if err != nil { // fix https://github.com/Altinity/clickhouse-backup/issues/832 if b.cfg.General.AllowEmptyBackups && os.IsNotExist(err) { diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index bdfb97b3..1e1e0072 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -943,7 +943,7 @@ func (ch *ClickHouse) CreateTable(table Table, query string, dropTable, ignoreDe return errors.New(fmt.Sprintf("schema query ```%s``` doesn't contains table name `%s`", query, table.Name)) } - // fix restore schema for legacy backup + // fix schema for restore // see https://github.com/Altinity/clickhouse-backup/issues/268 // https://github.com/Altinity/clickhouse-backup/issues/297 // https://github.com/Altinity/clickhouse-backup/issues/331 @@ -984,31 +984,6 @@ func (ch *ClickHouse) GetConn() driver.Conn { return ch.conn } -func (ch *ClickHouse) IsClickhouseShadow(path string) bool { - d, err := os.Open(path) - if err != nil { - return false - } - defer func() { - if err := d.Close(); err != nil { - ch.Log.Warnf("can't close directory %v", err) - } - }() - names, err := d.Readdirnames(-1) - if err != nil { - return false - } - for _, name := range names { - if name == "increment.txt" { - continue - } - if _, err := strconv.Atoi(name); err != nil { - return false - } - } - return true -} - func (ch *ClickHouse) StructSelect(dest interface{}, query string, args ...interface{}) error { return ch.SelectContext(context.Background(), dest, query, args...) } diff --git a/pkg/clickhouse/legacy.go b/pkg/clickhouse/legacy.go deleted file mode 100644 index 03bd770e..00000000 --- a/pkg/clickhouse/legacy.go +++ /dev/null @@ -1,94 +0,0 @@ -package clickhouse - -import ( - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" -) - -// GetBackupTablesLegacy - return list of backups of tables that can be restored -func (ch *ClickHouse) GetBackupTablesLegacy(backupName string, disks []Disk) ([]metadata.TableMetadata, error) { - dataPath, err := ch.GetDefaultPath(disks) - if err != nil { - return nil, err - } - backupShadowPath := filepath.Join(dataPath, "backup", backupName, "shadow") - dbNum := 0 - tableNum := 1 - partNum := 2 - totalNum := 3 - if ch.IsClickhouseShadow(backupShadowPath) { - dbNum = 2 - tableNum = 3 - partNum = 4 - totalNum = 5 - } - fi, err := os.Stat(backupShadowPath) - if err != nil { - return nil, fmt.Errorf("can't get tables, %v", err) - } - if !fi.IsDir() { - return nil, fmt.Errorf("can't get tables, %s is not a dir", backupShadowPath) - } - - var allpartsBackup map[string][]metadata.Part - // TODO: we don't need anymore fill Partitions from file, we can get it from `system.detached_parts` table - tables := make(map[string]metadata.TableMetadata) - err = filepath.Walk(backupShadowPath, func(filePath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - filePath = filepath.ToSlash(filePath) // fix fucking Windows slashes - relativePath := strings.Trim(strings.TrimPrefix(filePath, backupShadowPath), "/") - parts := strings.Split(relativePath, "/") - if len(parts) != totalNum { - return nil - } - - tDB, _ := url.PathUnescape(parts[dbNum]) - tName, _ := url.PathUnescape(parts[tableNum]) - fullTableName := fmt.Sprintf("%s.%s", tDB, tName) - - allPartsHashes := allpartsBackup[fullTableName] - var hashOfAllFiles, hashOfUncompressedFiles, uncompressHashOfCompressedFiles string - for _, partHash := range allPartsHashes { - if partHash.Name == parts[partNum] { - hashOfAllFiles = partHash.HashOfAllFiles - hashOfUncompressedFiles = partHash.HashOfUncompressedFiles - uncompressHashOfCompressedFiles = partHash.UncompressedHashOfCompressedFiles - } - } - - partition := metadata.Part{ - Name: parts[partNum], - // Path: filePath, - HashOfAllFiles: hashOfAllFiles, - HashOfUncompressedFiles: hashOfUncompressedFiles, - UncompressedHashOfCompressedFiles: uncompressHashOfCompressedFiles, - } - - if t, ok := tables[fullTableName]; ok { - t.Parts["default"] = append(t.Parts["default"], partition) - tables[fullTableName] = t - return nil - } - tables[fullTableName] = metadata.TableMetadata{ - Database: tDB, - Table: tName, - Parts: map[string][]metadata.Part{"default": {partition}}, - } - return nil - } - return nil - }) - result := make([]metadata.TableMetadata, 0) - for i := range tables { - result = append(result, tables[i]) - } - return result, err -} diff --git a/pkg/clickhouse/structs.go b/pkg/clickhouse/structs.go index c634b174..90e6bf48 100644 --- a/pkg/clickhouse/structs.go +++ b/pkg/clickhouse/structs.go @@ -19,7 +19,7 @@ type Table struct { Name string `ch:"name"` Engine string `ch:"engine"` // fields depends on `clickhouse-server` version - DataPath string `ch:"data_path"` // For legacy support + DataPath string `ch:"data_path"` DataPaths []string `ch:"data_paths"` UUID string `ch:"uuid"` CreateTableQuery string `ch:"create_table_query"` diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index 40b32ddf..76bd367c 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -170,10 +170,6 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM return fmt.Errorf("'%s' should be directory or absent", dstPartPath) } srcPartPath := path.Join(backupDiskPath, "backup", backupName, "shadow", dbAndTableDir, backupDiskName, part.Name) - // Legacy backup support - if _, err := os.Stat(srcPartPath); os.IsNotExist(err) { - srcPartPath = path.Join(backupDiskPath, "backup", backupName, "shadow", dbAndTableDir, part.Name) - } if err := filepath.Walk(srcPartPath, func(filePath string, info os.FileInfo, err error) error { if err != nil { return err diff --git a/pkg/metadata/metadata.go b/pkg/metadata/metadata.go index 7cade93d..ef84d2e4 100644 --- a/pkg/metadata/metadata.go +++ b/pkg/metadata/metadata.go @@ -65,14 +65,6 @@ type Part struct { Name string `json:"name"` Required bool `json:"required,omitempty"` RebalancedDisk string `json:"rebalanced_disk,omitempty"` - // @todo remove legacy backup fields - // Path string `json:"path"` // TODO: make it relative? look like useless now, can be calculated from Name - HashOfAllFiles string `json:"hash_of_all_files,omitempty"` // ??? - HashOfUncompressedFiles string `json:"hash_of_uncompressed_files,omitempty"` - UncompressedHashOfCompressedFiles string `json:"uncompressed_hash_of_compressed_files,omitempty"` // ??? - PartitionID string `json:"partition_id,omitempty"` - ModificationTime *time.Time `json:"modification_time,omitempty"` - Size int64 `json:"size,omitempty"` } type SplitPartFiles struct { diff --git a/pkg/server/server.go b/pkg/server/server.go index 6708d4e6..ec7c83c1 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -737,9 +737,6 @@ func (api *APIServer) httpListHandler(w http.ResponseWriter, r *http.Request) { } for _, item := range localBackups { description := item.DataFormat - if item.Legacy { - description = "old-format" - } if item.Broken != "" { description = item.Broken } @@ -769,9 +766,6 @@ func (api *APIServer) httpListHandler(w http.ResponseWriter, r *http.Request) { } for i, b := range remoteBackups { description := b.DataFormat - if b.Legacy { - description = "old-format" - } if b.Broken != "" { description = b.Broken brokenBackups++ diff --git a/pkg/storage/general.go b/pkg/storage/general.go index f0062875..9141c610 100644 --- a/pkg/storage/general.go +++ b/pkg/storage/general.go @@ -39,7 +39,6 @@ func (readerWrapper readerWrapperForContext) Read(p []byte) (n int, err error) { type Backup struct { metadata.BackupMetadata - Legacy bool FileExtension string Broken string UploadDate time.Time `json:"upload_date"` @@ -62,10 +61,6 @@ func (bd *BackupDestination) RemoveBackupRemote(ctx context.Context, backup Back if bd.Kind() == "SFTP" || bd.Kind() == "FTP" { return bd.DeleteFile(ctx, backup.BackupName) } - if backup.Legacy { - archiveName := fmt.Sprintf("%s.%s", backup.BackupName, backup.FileExtension) - return bd.DeleteFile(ctx, archiveName) - } return bd.Walk(ctx, backup.BackupName+"/", true, func(ctx context.Context, f RemoteFile) error { if bd.Kind() == "azblob" { if f.Size() > 0 || !f.LastModified().IsZero() { @@ -78,15 +73,6 @@ func (bd *BackupDestination) RemoveBackupRemote(ctx context.Context, backup Back }) } -func isLegacyBackup(backupName string) (bool, string, string) { - for _, suffix := range config.ArchiveExtensions { - if strings.HasSuffix(backupName, "."+suffix) { - return true, strings.TrimSuffix(backupName, "."+suffix), suffix - } - } - return false, backupName, "" -} - func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string]Backup, error) { listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind())) listCache := map[string]Backup{} @@ -180,20 +166,6 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, return nil, err } err = bd.Walk(ctx, "/", false, func(ctx context.Context, o RemoteFile) error { - // Legacy backup - if ok, backupName, fileExtension := isLegacyBackup(strings.TrimPrefix(o.Name(), "/")); ok { - result = append(result, Backup{ - metadata.BackupMetadata{ - BackupName: backupName, - DataSize: uint64(o.Size()), - }, - true, - fileExtension, - "", - o.LastModified(), - }) - return nil - } backupName := strings.Trim(o.Name(), "/") if !parseMetadata || (parseMetadataOnly != "" && parseMetadataOnly != backupName) { if cachedMetadata, isCached := listCache[backupName]; isCached { @@ -203,7 +175,6 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, BackupMetadata: metadata.BackupMetadata{ BackupName: backupName, }, - Legacy: false, }) } return nil @@ -218,7 +189,6 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, metadata.BackupMetadata{ BackupName: backupName, }, - false, "", "broken (can't stat metadata.json)", o.LastModified(), // folder @@ -232,7 +202,6 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, metadata.BackupMetadata{ BackupName: backupName, }, - false, "", "broken (can't open metadata.json)", o.LastModified(), // folder @@ -246,7 +215,6 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, metadata.BackupMetadata{ BackupName: backupName, }, - false, "", "broken (can't read metadata.json)", o.LastModified(), // folder @@ -263,7 +231,6 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, metadata.BackupMetadata{ BackupName: backupName, }, - false, "", "broken (bad metadata.json)", o.LastModified(), // folder @@ -271,9 +238,7 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, result = append(result, brokenBackup) return nil } - goodBackup := Backup{ - m, false, "", "", mf.LastModified(), - } + goodBackup := Backup{m, "", "", mf.LastModified()} listCache[backupName] = goodBackup result = append(result, goodBackup) return nil diff --git a/pkg/storage/utils_test.go b/pkg/storage/utils_test.go index f8c824e8..7795117c 100644 --- a/pkg/storage/utils_test.go +++ b/pkg/storage/utils_test.go @@ -19,15 +19,15 @@ func timeParse(s string) time.Time { func TestGetBackupsToDelete(t *testing.T) { testData := []Backup{ - {metadata.BackupMetadata{BackupName: "three"}, false, "", "", timeParse("2019-03-28T19-50-13")}, - {metadata.BackupMetadata{BackupName: "one"}, false, "", "", timeParse("2019-03-28T19-50-11")}, - {metadata.BackupMetadata{BackupName: "five"}, false, "", "", timeParse("2019-03-28T19-50-15")}, - {metadata.BackupMetadata{BackupName: "two"}, false, "", "", timeParse("2019-03-28T19-50-12")}, - {metadata.BackupMetadata{BackupName: "four"}, false, "", "", timeParse("2019-03-28T19-50-14")}, + {metadata.BackupMetadata{BackupName: "three"}, "", "", timeParse("2019-03-28T19-50-13")}, + {metadata.BackupMetadata{BackupName: "one"}, "", "", timeParse("2019-03-28T19-50-11")}, + {metadata.BackupMetadata{BackupName: "five"}, "", "", timeParse("2019-03-28T19-50-15")}, + {metadata.BackupMetadata{BackupName: "two"}, "", "", timeParse("2019-03-28T19-50-12")}, + {metadata.BackupMetadata{BackupName: "four"}, "", "", timeParse("2019-03-28T19-50-14")}, } expectedData := []Backup{ - {metadata.BackupMetadata{BackupName: "two"}, false, "", "", timeParse("2019-03-28T19-50-12")}, - {metadata.BackupMetadata{BackupName: "one"}, false, "", "", timeParse("2019-03-28T19-50-11")}, + {metadata.BackupMetadata{BackupName: "two"}, "", "", timeParse("2019-03-28T19-50-12")}, + {metadata.BackupMetadata{BackupName: "one"}, "", "", timeParse("2019-03-28T19-50-11")}, } assert.Equal(t, expectedData, GetBackupsToDeleteRemote(testData, 3)) assert.Equal(t, []Backup{}, GetBackupsToDeleteRemote([]Backup{testData[0]}, 3)) @@ -36,25 +36,25 @@ func TestGetBackupsToDelete(t *testing.T) { func TestGetBackupsToDeleteWithRequiredBackup(t *testing.T) { // fix https://github.com/Altinity/clickhouse-backup/issues/111 testData := []Backup{ - {metadata.BackupMetadata{BackupName: "3"}, false, "", "", timeParse("2019-03-28T19-50-13")}, - {metadata.BackupMetadata{BackupName: "1"}, false, "", "", timeParse("2019-03-28T19-50-11")}, - {metadata.BackupMetadata{BackupName: "5", RequiredBackup: "2"}, false, "", "", timeParse("2019-03-28T19-50-15")}, - {metadata.BackupMetadata{BackupName: "2"}, false, "", "", timeParse("2019-03-28T19-50-12")}, - {metadata.BackupMetadata{BackupName: "4", RequiredBackup: "3"}, false, "", "", timeParse("2019-03-28T19-50-14")}, + {metadata.BackupMetadata{BackupName: "3"}, "", "", timeParse("2019-03-28T19-50-13")}, + {metadata.BackupMetadata{BackupName: "1"}, "", "", timeParse("2019-03-28T19-50-11")}, + {metadata.BackupMetadata{BackupName: "5", RequiredBackup: "2"}, "", "", timeParse("2019-03-28T19-50-15")}, + {metadata.BackupMetadata{BackupName: "2"}, "", "", timeParse("2019-03-28T19-50-12")}, + {metadata.BackupMetadata{BackupName: "4", RequiredBackup: "3"}, "", "", timeParse("2019-03-28T19-50-14")}, } expectedData := []Backup{ - {metadata.BackupMetadata{BackupName: "1"}, false, "", "", timeParse("2019-03-28T19-50-11")}, + {metadata.BackupMetadata{BackupName: "1"}, "", "", timeParse("2019-03-28T19-50-11")}, } assert.Equal(t, expectedData, GetBackupsToDeleteRemote(testData, 3)) assert.Equal(t, []Backup{}, GetBackupsToDeleteRemote([]Backup{testData[0]}, 3)) // fix https://github.com/Altinity/clickhouse-backup/issues/385 testData = []Backup{ - {metadata.BackupMetadata{BackupName: "3", RequiredBackup: "2"}, false, "", "", timeParse("2019-03-28T19-50-13")}, - {metadata.BackupMetadata{BackupName: "1"}, false, "", "", timeParse("2019-03-28T19-50-11")}, - {metadata.BackupMetadata{BackupName: "5", RequiredBackup: "4"}, false, "", "", timeParse("2019-03-28T19-50-15")}, - {metadata.BackupMetadata{BackupName: "2", RequiredBackup: "1"}, false, "", "", timeParse("2019-03-28T19-50-12")}, - {metadata.BackupMetadata{BackupName: "4", RequiredBackup: "3"}, false, "", "", timeParse("2019-03-28T19-50-14")}, + {metadata.BackupMetadata{BackupName: "3", RequiredBackup: "2"}, "", "", timeParse("2019-03-28T19-50-13")}, + {metadata.BackupMetadata{BackupName: "1"}, "", "", timeParse("2019-03-28T19-50-11")}, + {metadata.BackupMetadata{BackupName: "5", RequiredBackup: "4"}, "", "", timeParse("2019-03-28T19-50-15")}, + {metadata.BackupMetadata{BackupName: "2", RequiredBackup: "1"}, "", "", timeParse("2019-03-28T19-50-12")}, + {metadata.BackupMetadata{BackupName: "4", RequiredBackup: "3"}, "", "", timeParse("2019-03-28T19-50-14")}, } expectedData = []Backup{} assert.Equal(t, expectedData, GetBackupsToDeleteRemote(testData, 3)) @@ -65,13 +65,13 @@ func TestGetBackupsToDeleteWithRequiredBackup(t *testing.T) { func TestGetBackupsToDeleteWithInvalidUploadDate(t *testing.T) { // fix https://github.com/Altinity/clickhouse-backup/issues/409 testData := []Backup{ - {metadata.BackupMetadata{BackupName: "1"}, false, "", "", timeParse("2022-03-03T18-08-01")}, - {metadata.BackupMetadata{BackupName: "2"}, false, "", "", timeParse("2022-03-03T18-08-02")}, - {BackupMetadata: metadata.BackupMetadata{BackupName: "3"}, Legacy: false, FileExtension: "", Broken: ""}, // UploadDate initialized with default value - {metadata.BackupMetadata{BackupName: "4"}, false, "", "", timeParse("2022-03-03T18-08-04")}, + {metadata.BackupMetadata{BackupName: "1"}, "", "", timeParse("2022-03-03T18-08-01")}, + {metadata.BackupMetadata{BackupName: "2"}, "", "", timeParse("2022-03-03T18-08-02")}, + {BackupMetadata: metadata.BackupMetadata{BackupName: "3"}, FileExtension: "", Broken: ""}, // UploadDate initialized with default value + {metadata.BackupMetadata{BackupName: "4"}, "", "", timeParse("2022-03-03T18-08-04")}, } expectedData := []Backup{ - {metadata.BackupMetadata{BackupName: "1"}, false, "", "", timeParse("2022-03-03T18-08-01")}, + {metadata.BackupMetadata{BackupName: "1"}, "", "", timeParse("2022-03-03T18-08-01")}, } assert.Equal(t, expectedData, GetBackupsToDeleteRemote(testData, 2)) @@ -80,48 +80,48 @@ func TestGetBackupsToDeleteWithInvalidUploadDate(t *testing.T) { func TestGetBackupsToDeleteWithRecursiveRequiredBackups(t *testing.T) { // fix https://github.com/Altinity/clickhouse-backup/issues/525 testData := []Backup{ - {metadata.BackupMetadata{BackupName: "2022-09-01T05-00-01"}, false, "", "", timeParse("2022-09-01T05-00-01")}, - {metadata.BackupMetadata{BackupName: "2022-09-01T21-00-03", RequiredBackup: "2022-09-01T05-00-01"}, false, "", "", timeParse("2022-09-01T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-02T21-00-02", RequiredBackup: "2022-09-01T21-00-03"}, false, "", "", timeParse("2022-09-02T21-00-02")}, - {metadata.BackupMetadata{BackupName: "2022-09-03T21-00-03", RequiredBackup: "2022-09-02T21-00-02"}, false, "", "", timeParse("2022-09-03T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-04T21-00-03", RequiredBackup: "2022-09-04T21-00-03"}, false, "", "", timeParse("2022-09-04T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-05T21-00-03", RequiredBackup: "2022-09-04T21-00-03"}, false, "", "", timeParse("2022-09-05T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-06T21-00-03", RequiredBackup: "2022-09-05T21-00-03"}, false, "", "", timeParse("2022-09-06T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-07T21-00-03", RequiredBackup: "2022-09-06T21-00-03"}, false, "", "", timeParse("2022-09-07T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-08T21-00-03", RequiredBackup: "2022-09-07T21-00-03"}, false, "", "", timeParse("2022-09-08T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-09T21-00-03", RequiredBackup: "2022-09-08T21-00-03"}, false, "", "", timeParse("2022-09-09T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-10T21-00-03", RequiredBackup: "2022-09-09T21-00-03"}, false, "", "", timeParse("2022-09-10T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-11T21-00-03", RequiredBackup: "2022-09-10T21-00-03"}, false, "", "", timeParse("2022-09-11T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-12T21-00-02", RequiredBackup: "2022-09-11T21-00-03"}, false, "", "", timeParse("2022-09-12T21-00-02")}, - {metadata.BackupMetadata{BackupName: "2022-09-13T21-00-03", RequiredBackup: "2022-09-12T21-00-02"}, false, "", "", timeParse("2022-09-13T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-14T21-00-03", RequiredBackup: "2022-09-13T21-00-03"}, false, "", "", timeParse("2022-09-14T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-10-03T09-30-20"}, false, "", "", timeParse("2022-10-03T09-30-20")}, - {metadata.BackupMetadata{BackupName: "2022-10-03T09-39-37", RequiredBackup: "2022-10-03T09-30-20"}, false, "", "", timeParse("2022-10-03T09-39-37")}, - {metadata.BackupMetadata{BackupName: "2022-10-03T09-40-03", RequiredBackup: "2022-10-03T09-39-37"}, false, "", "", timeParse("2022-10-03T09-40-03")}, - {metadata.BackupMetadata{BackupName: "2022-10-03T09-41-31", RequiredBackup: "2022-10-03T09-40-03"}, false, "", "", timeParse("2022-10-03T09-41-31")}, - {metadata.BackupMetadata{BackupName: "2022-10-03T09-52-12", RequiredBackup: "2022-10-03T09-41-31"}, false, "", "", timeParse("2022-10-03T09-52-12")}, - {metadata.BackupMetadata{BackupName: "2022-10-03T10-11-15", RequiredBackup: "2022-10-03T09-52-12"}, false, "", "", timeParse("2022-10-03T10-11-15")}, - {metadata.BackupMetadata{BackupName: "2022-10-03T10-12-38", RequiredBackup: "2022-10-03T10-11-15"}, false, "", "", timeParse("2022-10-03T10-12-38")}, - {metadata.BackupMetadata{BackupName: "2022-10-03T10-12-57", RequiredBackup: "2022-10-03T10-12-38"}, false, "", "", timeParse("2022-10-03T10-12-57")}, - {metadata.BackupMetadata{BackupName: "2022-10-03T10-13-16", RequiredBackup: "2022-10-03T10-12-57"}, false, "", "", timeParse("2022-10-03T10-13-16")}, - {metadata.BackupMetadata{BackupName: "2022-10-03T10-15-32", RequiredBackup: "2022-10-03T10-13-16"}, false, "", "", timeParse("2022-10-03T10-15-32")}, + {metadata.BackupMetadata{BackupName: "2022-09-01T05-00-01"}, "", "", timeParse("2022-09-01T05-00-01")}, + {metadata.BackupMetadata{BackupName: "2022-09-01T21-00-03", RequiredBackup: "2022-09-01T05-00-01"}, "", "", timeParse("2022-09-01T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-02T21-00-02", RequiredBackup: "2022-09-01T21-00-03"}, "", "", timeParse("2022-09-02T21-00-02")}, + {metadata.BackupMetadata{BackupName: "2022-09-03T21-00-03", RequiredBackup: "2022-09-02T21-00-02"}, "", "", timeParse("2022-09-03T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-04T21-00-03", RequiredBackup: "2022-09-04T21-00-03"}, "", "", timeParse("2022-09-04T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-05T21-00-03", RequiredBackup: "2022-09-04T21-00-03"}, "", "", timeParse("2022-09-05T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-06T21-00-03", RequiredBackup: "2022-09-05T21-00-03"}, "", "", timeParse("2022-09-06T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-07T21-00-03", RequiredBackup: "2022-09-06T21-00-03"}, "", "", timeParse("2022-09-07T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-08T21-00-03", RequiredBackup: "2022-09-07T21-00-03"}, "", "", timeParse("2022-09-08T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-09T21-00-03", RequiredBackup: "2022-09-08T21-00-03"}, "", "", timeParse("2022-09-09T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-10T21-00-03", RequiredBackup: "2022-09-09T21-00-03"}, "", "", timeParse("2022-09-10T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-11T21-00-03", RequiredBackup: "2022-09-10T21-00-03"}, "", "", timeParse("2022-09-11T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-12T21-00-02", RequiredBackup: "2022-09-11T21-00-03"}, "", "", timeParse("2022-09-12T21-00-02")}, + {metadata.BackupMetadata{BackupName: "2022-09-13T21-00-03", RequiredBackup: "2022-09-12T21-00-02"}, "", "", timeParse("2022-09-13T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-14T21-00-03", RequiredBackup: "2022-09-13T21-00-03"}, "", "", timeParse("2022-09-14T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-10-03T09-30-20"}, "", "", timeParse("2022-10-03T09-30-20")}, + {metadata.BackupMetadata{BackupName: "2022-10-03T09-39-37", RequiredBackup: "2022-10-03T09-30-20"}, "", "", timeParse("2022-10-03T09-39-37")}, + {metadata.BackupMetadata{BackupName: "2022-10-03T09-40-03", RequiredBackup: "2022-10-03T09-39-37"}, "", "", timeParse("2022-10-03T09-40-03")}, + {metadata.BackupMetadata{BackupName: "2022-10-03T09-41-31", RequiredBackup: "2022-10-03T09-40-03"}, "", "", timeParse("2022-10-03T09-41-31")}, + {metadata.BackupMetadata{BackupName: "2022-10-03T09-52-12", RequiredBackup: "2022-10-03T09-41-31"}, "", "", timeParse("2022-10-03T09-52-12")}, + {metadata.BackupMetadata{BackupName: "2022-10-03T10-11-15", RequiredBackup: "2022-10-03T09-52-12"}, "", "", timeParse("2022-10-03T10-11-15")}, + {metadata.BackupMetadata{BackupName: "2022-10-03T10-12-38", RequiredBackup: "2022-10-03T10-11-15"}, "", "", timeParse("2022-10-03T10-12-38")}, + {metadata.BackupMetadata{BackupName: "2022-10-03T10-12-57", RequiredBackup: "2022-10-03T10-12-38"}, "", "", timeParse("2022-10-03T10-12-57")}, + {metadata.BackupMetadata{BackupName: "2022-10-03T10-13-16", RequiredBackup: "2022-10-03T10-12-57"}, "", "", timeParse("2022-10-03T10-13-16")}, + {metadata.BackupMetadata{BackupName: "2022-10-03T10-15-32", RequiredBackup: "2022-10-03T10-13-16"}, "", "", timeParse("2022-10-03T10-15-32")}, } expectedData := []Backup{ - {metadata.BackupMetadata{BackupName: "2022-09-14T21-00-03", RequiredBackup: "2022-09-13T21-00-03"}, false, "", "", timeParse("2022-09-14T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-13T21-00-03", RequiredBackup: "2022-09-12T21-00-02"}, false, "", "", timeParse("2022-09-13T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-12T21-00-02", RequiredBackup: "2022-09-11T21-00-03"}, false, "", "", timeParse("2022-09-12T21-00-02")}, - {metadata.BackupMetadata{BackupName: "2022-09-11T21-00-03", RequiredBackup: "2022-09-10T21-00-03"}, false, "", "", timeParse("2022-09-11T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-10T21-00-03", RequiredBackup: "2022-09-09T21-00-03"}, false, "", "", timeParse("2022-09-10T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-09T21-00-03", RequiredBackup: "2022-09-08T21-00-03"}, false, "", "", timeParse("2022-09-09T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-08T21-00-03", RequiredBackup: "2022-09-07T21-00-03"}, false, "", "", timeParse("2022-09-08T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-07T21-00-03", RequiredBackup: "2022-09-06T21-00-03"}, false, "", "", timeParse("2022-09-07T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-06T21-00-03", RequiredBackup: "2022-09-05T21-00-03"}, false, "", "", timeParse("2022-09-06T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-05T21-00-03", RequiredBackup: "2022-09-04T21-00-03"}, false, "", "", timeParse("2022-09-05T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-04T21-00-03", RequiredBackup: "2022-09-04T21-00-03"}, false, "", "", timeParse("2022-09-04T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-03T21-00-03", RequiredBackup: "2022-09-02T21-00-02"}, false, "", "", timeParse("2022-09-03T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-02T21-00-02", RequiredBackup: "2022-09-01T21-00-03"}, false, "", "", timeParse("2022-09-02T21-00-02")}, - {metadata.BackupMetadata{BackupName: "2022-09-01T21-00-03", RequiredBackup: "2022-09-01T05-00-01"}, false, "", "", timeParse("2022-09-01T21-00-03")}, - {metadata.BackupMetadata{BackupName: "2022-09-01T05-00-01"}, false, "", "", timeParse("2022-09-01T05-00-01")}, + {metadata.BackupMetadata{BackupName: "2022-09-14T21-00-03", RequiredBackup: "2022-09-13T21-00-03"}, "", "", timeParse("2022-09-14T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-13T21-00-03", RequiredBackup: "2022-09-12T21-00-02"}, "", "", timeParse("2022-09-13T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-12T21-00-02", RequiredBackup: "2022-09-11T21-00-03"}, "", "", timeParse("2022-09-12T21-00-02")}, + {metadata.BackupMetadata{BackupName: "2022-09-11T21-00-03", RequiredBackup: "2022-09-10T21-00-03"}, "", "", timeParse("2022-09-11T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-10T21-00-03", RequiredBackup: "2022-09-09T21-00-03"}, "", "", timeParse("2022-09-10T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-09T21-00-03", RequiredBackup: "2022-09-08T21-00-03"}, "", "", timeParse("2022-09-09T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-08T21-00-03", RequiredBackup: "2022-09-07T21-00-03"}, "", "", timeParse("2022-09-08T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-07T21-00-03", RequiredBackup: "2022-09-06T21-00-03"}, "", "", timeParse("2022-09-07T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-06T21-00-03", RequiredBackup: "2022-09-05T21-00-03"}, "", "", timeParse("2022-09-06T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-05T21-00-03", RequiredBackup: "2022-09-04T21-00-03"}, "", "", timeParse("2022-09-05T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-04T21-00-03", RequiredBackup: "2022-09-04T21-00-03"}, "", "", timeParse("2022-09-04T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-03T21-00-03", RequiredBackup: "2022-09-02T21-00-02"}, "", "", timeParse("2022-09-03T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-02T21-00-02", RequiredBackup: "2022-09-01T21-00-03"}, "", "", timeParse("2022-09-02T21-00-02")}, + {metadata.BackupMetadata{BackupName: "2022-09-01T21-00-03", RequiredBackup: "2022-09-01T05-00-01"}, "", "", timeParse("2022-09-01T21-00-03")}, + {metadata.BackupMetadata{BackupName: "2022-09-01T05-00-01"}, "", "", timeParse("2022-09-01T05-00-01")}, } assert.Equal(t, expectedData, GetBackupsToDeleteRemote(testData, 6)) } diff --git a/test/integration/config-s3-embedded-local.yml b/test/integration/config-s3-embedded-local.yml new file mode 100644 index 00000000..99db54f6 --- /dev/null +++ b/test/integration/config-s3-embedded-local.yml @@ -0,0 +1,39 @@ +general: + disable_progress_bar: true + remote_storage: s3 + upload_concurrency: 4 + download_concurrency: 4 + skip_tables: + - " system.*" + - "INFORMATION_SCHEMA.*" + - "information_schema.*" + - "_temporary_and_external_tables.*" + restore_schema_on_cluster: "{cluster}" +clickhouse: + host: clickhouse + port: 9440 + username: backup + password: meow=& 123?*%# МЯУ + secure: true + skip_verify: true + sync_replicated_tables: true + timeout: 4h + restart_command: bash -c 'echo "FAKE RESTART"' + use_embedded_backup_restore: true + embedded_backup_disk: backups_local +s3: + access_key: access_key + secret_key: it_is_my_super_secret_key + bucket: clickhouse + endpoint: http://minio:9000 + acl: private + force_path_style: true + path: backup/{cluster}/{shard} + object_disk_path: object_disk/{cluster}/{shard} + disable_ssl: true + compression_format: none +api: + listen: :7171 + create_integration_tables: true + integration_tables_host: "clickhouse-backup" + allow_parallel: true diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index c28ccd5d..402e6605 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -198,6 +198,7 @@ services: - ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml - ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml - ./config-s3-embedded-url.yml:/etc/clickhouse-backup/config-s3-embedded-url.yml + - ./config-s3-embedded-local.yml:/etc/clickhouse-backup/config-s3-embedded-local.yml - ./config-s3-fips.yml:/etc/clickhouse-backup/config-s3-fips.yml.template - ./config-s3-nodelete.yml:/etc/clickhouse-backup/config-s3-nodelete.yml - ./config-s3-plain-embedded.yml:/etc/clickhouse-backup/config-s3-plain-embedded.yml diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 767ee544..e203ee50 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -265,6 +265,7 @@ services: - ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml - ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml - ./config-s3-embedded-url.yml:/etc/clickhouse-backup/config-s3-embedded-url.yml + - ./config-s3-embedded-local.yml:/etc/clickhouse-backup/config-s3-embedded-local.yml - ./config-s3-fips.yml:/etc/clickhouse-backup/config-s3-fips.yml.template - ./config-s3-nodelete.yml:/etc/clickhouse-backup/config-s3-nodelete.yml - ./config-s3-plain-embedded.yml:/etc/clickhouse-backup/config-s3-plain-embedded.yml diff --git a/test/integration/dynamic_settings.sh b/test/integration/dynamic_settings.sh index ada3f6d5..569c6b73 100755 --- a/test/integration/dynamic_settings.sh +++ b/test/integration/dynamic_settings.sh @@ -205,6 +205,32 @@ EOT fi fi +# embedded local backup configuration +if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^22\.[6-9] || "${CLICKHOUSE_VERSION}" =~ ^22\.1[0-9]+ || "${CLICKHOUSE_VERSION}" =~ ^2[3-9]\.[0-9]+ ]]; then + +mkdir -p /var/lib/clickhouse/disks/backups_local/ /var/lib/clickhouse/backups_embedded/ +chown -R clickhouse /var/lib/clickhouse/disks/ /var/lib/clickhouse/backups_embedded/ + +cat < /etc/clickhouse-server/config.d/backup_storage_configuration_local.xml + + + + + + local + /var/lib/clickhouse/disks/backups_local/ + + + + + backups_local + /var/lib/clickhouse/backups_embedded/ + + +EOT + +fi + # embedded s3 backup configuration if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^22\.[6-9] || "${CLICKHOUSE_VERSION}" =~ ^22\.1[0-9]+ || "${CLICKHOUSE_VERSION}" =~ ^2[3-9]\.[0-9]+ ]]; then @@ -231,20 +257,21 @@ cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3.xml + backups_local backups_s3 /var/lib/clickhouse/backups_embedded/ EOT -# zero replication is buggy -#cat < /etc/clickhouse-server/config.d/zero_copy_replication.xml -# -# -# 1 -# -# -#EOT +# zero replication is buggy, be carefull +cat < /etc/clickhouse-server/config.d/zero_copy_replication.xml + + + 1 + + +EOT cat < /etc/clickhouse-server/config.d/zookeeper_log.xml @@ -286,6 +313,7 @@ cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3_plai + backups_local backups_s3 backups_s3_plain @@ -334,6 +362,7 @@ cat < /etc/clickhouse-server/config.d/storage_configuration_azblob.xml + backups_local backups_s3 backups_s3_plain backups_azure diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 6ccf349d..518e04f7 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2001,6 +2001,19 @@ func TestIntegrationEmbedded(t *testing.T) { } //t.Parallel() r := require.New(t) + if compareVersion(version, "24.3") >= 0 { + //CUSTOM backup create folder in each disk, need to clear + r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/")) + runMainIntegrationScenario(t, "EMBEDDED_LOCAL", "config-s3-embedded-local.yml") + } + //CUSTOM backup create folder in each disk, need to clear + r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) + runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") + + //@TODO think about how to implements embedded backup for s3_plain disks + //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) + //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") + //@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053 //CUSTOM backup create folder in each disk //r.NoError(dockerExec("azure", "apk", "add", "tcpdump")) @@ -2014,19 +2027,12 @@ func TestIntegrationEmbedded(t *testing.T) { //r.NoError(dockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) if compareVersion(version, "23.8") >= 0 { - //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330 + //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330, https://github.com/fsouza/fake-gcs-server/pull/1164 //installDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") //r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml")) //runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") } - //CUSTOM backup create folder in each disk - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) - runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") - - //@TODO think about how to implements embedded backup for s3_plain disks - //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) - //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") } func TestRestoreDatabaseMapping(t *testing.T) { From d364d138b86443ecdc7f99cf8bd4e7f7e3ecfde4 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 6 Apr 2024 13:21:23 +0400 Subject: [PATCH 74/80] zerocopy replication, disable ALTER TABLE ... FREEZE in 24.3+ --- test/integration/dynamic_settings.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/integration/dynamic_settings.sh b/test/integration/dynamic_settings.sh index 569c6b73..23acd767 100755 --- a/test/integration/dynamic_settings.sh +++ b/test/integration/dynamic_settings.sh @@ -264,14 +264,14 @@ cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3.xml EOT -# zero replication is buggy, be carefull -cat < /etc/clickhouse-server/config.d/zero_copy_replication.xml - - - 1 - - -EOT +# zero replication is buggy, can't freeze table: code: 344, message: FREEZE PARTITION queries are disabled. +#cat < /etc/clickhouse-server/config.d/zero_copy_replication.xml +# +# +# 1 +# +# +#EOT cat < /etc/clickhouse-server/config.d/zookeeper_log.xml From 120f70b883ee203d45119bda250bcdab952e8376 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 6 Apr 2024 16:25:41 +0400 Subject: [PATCH 75/80] fixed wrong list command behavior, it shall scann all system.disks path not only default disk to find pratially created backups, fix https://github.com/Altinity/clickhouse-backup/issues/873 fixed create `--rbac` behavior, don't create access folder if no RBAC objects is present --- ChangeLog.md | 4 +- pkg/backup/create.go | 30 +++++++++---- pkg/backup/delete.go | 2 +- pkg/backup/list.go | 67 +++++++++++++++++++--------- pkg/keeper/keeper.go | 5 +++ test/integration/dynamic_settings.sh | 2 + test/integration/integration_test.go | 2 +- 7 files changed, 80 insertions(+), 32 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index 6c93ffac..51598c78 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -29,7 +29,9 @@ BUG FIXES - fixed `ObjectDisks` + `CLICKHOUSE_USE_EMBEDDED_BACKUP_RESTORE: true` - shall skip upload object disk content, fix [799](https://github.com/Altinity/clickhouse-backup/issues/799) - fixed connection to clickhouse-server behavior when long clickhouse-server startup time and `docker-entrypoint.d` processing, will infinite reconnect each 5 seconds, until success, fix [857](https://github.com/Altinity/clickhouse-backup/issues/857) - fixed `USE_EMBEDDED_BACKUP_RESTORE=true` behavior to allow use backup disk with type `local`, fix [882](https://github.com/Altinity/clickhouse-backup/issues/882) - +- fixed wrong list command behavior, it shall scann all system.disks path not only default disk to find pratially created backups, fix [873](https://github.com/Altinity/clickhouse-backup/issues/873) +- fixed create `--rbac` behavior, don't create access folder if no RBAC objects is present +- # v2.4.35 IMPROVEMENTS - set part size for `s3:CopyObject` minimum 128Mb, look details https://repost.aws/questions/QUtW2_XaALTK63wv9XLSywiQ/s3-sync-command-is-slow-to-start-on-some-data diff --git a/pkg/backup/create.go b/pkg/backup/create.go index ac711c65..142728f1 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -144,7 +144,6 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, log.Errorf("creating failed -> b.Clean error: %v", cleanShadowErr) log.Error(cleanShadowErr.Error()) } - return err } @@ -623,21 +622,28 @@ func (b *Backuper) createBackupRBAC(ctx context.Context, backupPath string, disk if err == nil && !accessPathInfo.IsDir() { return 0, fmt.Errorf("%s is not directory", accessPath) } - if err == nil { + if err != nil { + return 0, err + } + rbacSQLFiles, err := filepath.Glob(path.Join(accessPath, "*.sql")) + if err != nil { + return 0, err + } + if len(rbacSQLFiles) != 0 { log.Debugf("copy %s -> %s", accessPath, rbacBackup) copyErr := recursiveCopy.Copy(accessPath, rbacBackup, recursiveCopy.Options{ Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { - rbacDataSize += uint64(srcinfo.Size()) - return false, nil + if strings.HasSuffix(src, "*.sql") { + rbacDataSize += uint64(srcinfo.Size()) + return false, nil + } else { + return true, nil + } }, }) if copyErr != nil { return 0, copyErr } - } else { - if err = os.MkdirAll(rbacBackup, 0755); err != nil { - return 0, err - } } replicatedRBACDataSize, err := b.createBackupRBACReplicated(ctx, rbacBackup) if err != nil { @@ -663,6 +669,14 @@ func (b *Backuper) createBackupRBACReplicated(ctx context.Context, rbacBackup st if err != nil { return 0, err } + rbacUUIDObjectsCount, err := k.ChildCount(replicatedAccessPath, "uuid") + if err != nil { + return 0, err + } + if rbacUUIDObjectsCount == 0 { + b.log.WithField("logger", "createBackupRBACReplicated").Warnf("%s/%s have no childs, skip Dump", replicatedAccessPath, "uuid") + continue + } dumpFile := path.Join(rbacBackup, userDirectory.Name+".jsonl") b.log.WithField("logger", "createBackupRBACReplicated").Infof("keeper.Dump %s -> %s", replicatedAccessPath, dumpFile) dumpRBACSize, dumpErr := k.Dump(replicatedAccessPath, dumpFile) diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index 093d60c6..3cd45b86 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -156,7 +156,7 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis if disk.IsBackup { backupPath = path.Join(disk.Path, backupName) } - log.Debugf("remove '%s'", backupPath) + log.Infof("remove '%s'", backupPath) if err = os.RemoveAll(backupPath); err != nil { return err } diff --git a/pkg/backup/list.go b/pkg/backup/list.go index f1042ff6..1993adcc 100644 --- a/pkg/backup/list.go +++ b/pkg/backup/list.go @@ -184,23 +184,34 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) }, } } - defaultDataPath, err := b.ch.GetDefaultPath(disks) - if err != nil { - return nil, nil, err - } var result []LocalBackup - allBackupPaths := []string{path.Join(defaultDataPath, "backup")} - if b.cfg.ClickHouse.UseEmbeddedBackupRestore { - for _, disk := range disks { - select { - case <-ctx.Done(): - return nil, nil, ctx.Err() - default: - if disk.IsBackup || disk.Name == b.cfg.ClickHouse.EmbeddedBackupDisk { - allBackupPaths = append(allBackupPaths, disk.Path) - } + allBackupPaths := []string{} + for _, disk := range disks { + if disk.IsBackup || disk.Name == b.cfg.ClickHouse.EmbeddedBackupDisk { + allBackupPaths = append(allBackupPaths, disk.Path) + } else { + allBackupPaths = append(allBackupPaths, path.Join(disk.Path, "backup")) + } + } + addBrokenBackupIfNotExists := func(result []LocalBackup, name string, info os.FileInfo, broken string) []LocalBackup { + backupAlreadyExists := false + for _, backup := range result { + if backup.BackupName == name { + backupAlreadyExists = true + break } } + // add broken backup if not exists + if !backupAlreadyExists { + result = append(result, LocalBackup{ + BackupMetadata: metadata.BackupMetadata{ + BackupName: name, + CreationDate: info.ModTime(), + }, + Broken: broken, + }) + } + return result } l := len(allBackupPaths) for i, backupPath := range allBackupPaths { @@ -234,20 +245,34 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) backupMetadataBody, err := os.ReadFile(backupMetafilePath) if err != nil { if !os.IsNotExist(err) { - return result, disks, err + b.log.Warnf("list can't read %s error: %s", backupMetafilePath, err) } + result = addBrokenBackupIfNotExists(result, name, info, "broken metadata.json not found") continue } var backupMetadata metadata.BackupMetadata - if err := json.Unmarshal(backupMetadataBody, &backupMetadata); err != nil { - return nil, disks, err + if parseErr := json.Unmarshal(backupMetadataBody, &backupMetadata); parseErr != nil { + result = addBrokenBackupIfNotExists(result, name, info, fmt.Sprintf("parse metadata.json error: %v", parseErr)) + continue } - result = append(result, LocalBackup{ - BackupMetadata: backupMetadata, - }) + brokenBackupIsAlreadyExists := false + for i, backup := range result { + if backup.BackupName == backupMetadata.BackupName { + brokenBackupIsAlreadyExists = true + result[i].BackupMetadata = backupMetadata + result[i].Broken = "" + break + } + } + if !brokenBackupIsAlreadyExists { + result = append(result, LocalBackup{ + BackupMetadata: backupMetadata, + }) + } + } if closeErr := d.Close(); closeErr != nil { - log.Errorf("can't close %s openError: %v", backupPath, closeErr) + log.Errorf("can't close %s error: %v", backupPath, closeErr) } } } diff --git a/pkg/keeper/keeper.go b/pkg/keeper/keeper.go index 82a84b0a..20083732 100644 --- a/pkg/keeper/keeper.go +++ b/pkg/keeper/keeper.go @@ -131,6 +131,11 @@ func (k *Keeper) Dump(prefix, dumpFile string) (int, error) { return bytes, nil } +func (k *Keeper) ChildCount(prefix, nodePath string) (int, error) { + childrenNodes, _, err := k.conn.Children(path.Join(prefix, nodePath)) + return len(childrenNodes), err +} + func (k *Keeper) dumpNodeRecursive(prefix, nodePath string, f *os.File) (int, error) { value, _, err := k.conn.Get(path.Join(prefix, nodePath)) if err != nil { diff --git a/test/integration/dynamic_settings.sh b/test/integration/dynamic_settings.sh index 23acd767..7a0cff0e 100755 --- a/test/integration/dynamic_settings.sh +++ b/test/integration/dynamic_settings.sh @@ -265,10 +265,12 @@ cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3.xml EOT # zero replication is buggy, can't freeze table: code: 344, message: FREEZE PARTITION queries are disabled. +# https://github.com/ClickHouse/ClickHouse/issues/62167#issuecomment-2031774983 #cat < /etc/clickhouse-server/config.d/zero_copy_replication.xml # # # 1 +# 0 # # #EOT diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 518e04f7..a4341361 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -468,7 +468,7 @@ func TestRBAC(t *testing.T) { creatAllRBAC := func(drop bool) { if drop { - log.Info("drop all RBAC related objects after backup") + log.Info("drop all RBAC related objects") ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") ch.queryWithNoError(r, "DROP QUOTA test_rbac") ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") From c64d10c971bb89674340e5a01fe2b5403f3be094 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 6 Apr 2024 17:00:02 +0400 Subject: [PATCH 76/80] TestS3NoDeletePermission for 19.17 --- pkg/backup/create.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 142728f1..d7ce8031 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -622,8 +622,8 @@ func (b *Backuper) createBackupRBAC(ctx context.Context, backupPath string, disk if err == nil && !accessPathInfo.IsDir() { return 0, fmt.Errorf("%s is not directory", accessPath) } - if err != nil { - return 0, err + if os.IsNotExist(err) { + return 0, nil } rbacSQLFiles, err := filepath.Glob(path.Join(accessPath, "*.sql")) if err != nil { From 9192fddc3c91f10f58bf0dda37bce79e0ff15cce Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 6 Apr 2024 17:24:26 +0400 Subject: [PATCH 77/80] fix TestRBAC for 22.3 --- pkg/backup/create.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/backup/create.go b/pkg/backup/create.go index d7ce8031..9a5aae7d 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -677,6 +677,9 @@ func (b *Backuper) createBackupRBACReplicated(ctx context.Context, rbacBackup st b.log.WithField("logger", "createBackupRBACReplicated").Warnf("%s/%s have no childs, skip Dump", replicatedAccessPath, "uuid") continue } + if err = os.MkdirAll(rbacBackup, 0755); err != nil { + return 0, err + } dumpFile := path.Join(rbacBackup, userDirectory.Name+".jsonl") b.log.WithField("logger", "createBackupRBACReplicated").Infof("keeper.Dump %s -> %s", replicatedAccessPath, dumpFile) dumpRBACSize, dumpErr := k.Dump(replicatedAccessPath, dumpFile) From 3bd4fd14087fc03b7a7103972473e6b79ef3c03e Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 7 Apr 2024 21:42:36 +0500 Subject: [PATCH 78/80] fix testflows RBAC related tests --- pkg/backup/create.go | 7 +++++-- pkg/backup/restore.go | 4 ++-- .../clickhouse_backup/docker-compose/docker-compose.yml | 1 + 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 9a5aae7d..3c8d0edc 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -632,8 +632,11 @@ func (b *Backuper) createBackupRBAC(ctx context.Context, backupPath string, disk if len(rbacSQLFiles) != 0 { log.Debugf("copy %s -> %s", accessPath, rbacBackup) copyErr := recursiveCopy.Copy(accessPath, rbacBackup, recursiveCopy.Options{ - Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { - if strings.HasSuffix(src, "*.sql") { + OnDirExists: func(src, dst string) recursiveCopy.DirExistsAction { + return recursiveCopy.Replace + }, + Skip: func(srcinfo os.FileInfo, src, dst string) (bool, error) { + if strings.HasSuffix(src, ".sql") { rbacDataSize += uint64(srcinfo.Size()) return false, nil } else { diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 020477ac..684c70ab 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -757,10 +757,10 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat } log.Debugf("copy %s -> %s", srcBackupDir, destinationDir) copyOptions := recursiveCopy.Options{ - OnDirExists: func(src, dest string) recursiveCopy.DirExistsAction { + OnDirExists: func(src, dst string) recursiveCopy.DirExistsAction { return recursiveCopy.Merge }, - Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { + Skip: func(srcinfo os.FileInfo, src, dst string) (bool, error) { for _, pattern := range skipPatterns { if matched, matchErr := filepath.Match(pattern, filepath.Base(src)); matchErr != nil || matched { return true, matchErr diff --git a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml index ef32272e..1817c130 100644 --- a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml +++ b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml @@ -112,6 +112,7 @@ services: - "7171:7171" environment: - DEBIAN_FRONTEND=noninteractive + - LOG_LEVEL=${LOG_LEVEL:-info} - GCS_CREDENTIALS_JSON=${QA_GCS_CRED_JSON:-} - GCS_CREDENTIALS_JSON_ENCODED=${QA_GCS_CRED_JSON_ENCODED:-} - CLICKHOUSE_HOST=clickhouse1 From cfc48e54ce1a4d968b0701ac501c14b8f1413af6 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 7 Apr 2024 22:24:12 +0500 Subject: [PATCH 79/80] checked behavior when `system.disks` contains disk which not present in any `storage_policies`, fix https://github.com/Altinity/clickhouse-backup/issues/845 --- ChangeLog.md | 2 +- pkg/clickhouse/clickhouse.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ChangeLog.md b/ChangeLog.md index 51598c78..6cfe3319 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -31,7 +31,7 @@ BUG FIXES - fixed `USE_EMBEDDED_BACKUP_RESTORE=true` behavior to allow use backup disk with type `local`, fix [882](https://github.com/Altinity/clickhouse-backup/issues/882) - fixed wrong list command behavior, it shall scann all system.disks path not only default disk to find pratially created backups, fix [873](https://github.com/Altinity/clickhouse-backup/issues/873) - fixed create `--rbac` behavior, don't create access folder if no RBAC objects is present -- +- fixed behavior when `system.disks` contains disk which not present in any `storage_policies`, fix [845](https://github.com/Altinity/clickhouse-backup/issues/845) # v2.4.35 IMPROVEMENTS - set part size for `s3:CopyObject` minimum 128Mb, look details https://repost.aws/questions/QUtW2_XaALTK63wv9XLSywiQ/s3-sync-command-is-slow-to-start-on-some-data diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 1e1e0072..442b2e8e 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -289,6 +289,7 @@ func (ch *ClickHouse) getDisksFromSystemDisks(ctx context.Context) ([]Disk, erro joinStoragePoliciesSQL := "" if len(diskFields) > 0 && diskFields[0].StoragePolicyPresent > 0 { storagePoliciesSQL = "groupUniqArray(s.policy_name)" + // LEFT JOIN to allow disks which not have policy, https://github.com/Altinity/clickhouse-backup/issues/845 joinStoragePoliciesSQL = " LEFT JOIN " joinStoragePoliciesSQL += "(SELECT policy_name, arrayJoin(disks) AS disk FROM system.storage_policies) AS s ON s.disk = d.name" } From 67d2675fbf55e5bb39aa95999a999f79526c6b25 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 7 Apr 2024 23:05:18 +0500 Subject: [PATCH 80/80] added `clickhouse_backup_in_progress_commands` metric, fix https://github.com/Altinity/clickhouse-backup/issues/836 --- ChangeLog.md | 4 +- go.mod | 1 - go.sum | 155 +--------------------------------- pkg/server/metrics/metrics.go | 12 +++ pkg/server/server.go | 1 - 5 files changed, 17 insertions(+), 156 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index 6cfe3319..d2718e0a 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,4 +1,4 @@ -# v2.5.0 (not released yet) +# v2.5.0 IMPROVEMENTS - complete removed support for legacy backups, created with version prior v1.0 - removed `disable_progress_bar` config option and related progress bar code @@ -14,6 +14,7 @@ IMPROVEMENTS - added `rbac_always_backup: true` option to default config, will create backup for RBAC objects automatically, restore still require `--rbac` to avoid destructive actions, fix [793](https://github.com/Altinity/clickhouse-backup/issues/793) - added `rbac_conflict_resolution: recreate` option for RBAC object name conflicts during restore, fix [851](https://github.com/Altinity/clickhouse-backup/issues/851) - added `upload_max_bytes_per_seconds` and `download_max_bytes_per_seconds` config options to allow throttling without CAP_SYS_NICE, fix [817](https://github.com/Altinity/clickhouse-backup/issues/817) +- added `clickhouse_backup_in_progress_commands` metric, fix [836](https://github.com/Altinity/clickhouse-backup/issues/836) - switched to golang 1.22 - updated all third-party SDK to latest versions - added `clickhouse/clickhouse-server:24.3` to CI/CD @@ -32,6 +33,7 @@ BUG FIXES - fixed wrong list command behavior, it shall scann all system.disks path not only default disk to find pratially created backups, fix [873](https://github.com/Altinity/clickhouse-backup/issues/873) - fixed create `--rbac` behavior, don't create access folder if no RBAC objects is present - fixed behavior when `system.disks` contains disk which not present in any `storage_policies`, fix [845](https://github.com/Altinity/clickhouse-backup/issues/845) + # v2.4.35 IMPROVEMENTS - set part size for `s3:CopyObject` minimum 128Mb, look details https://repost.aws/questions/QUtW2_XaALTK63wv9XLSywiQ/s3-sync-command-is-slow-to-start-on-some-data diff --git a/go.mod b/go.mod index 7b0c445c..1b7e94b9 100644 --- a/go.mod +++ b/go.mod @@ -126,7 +126,6 @@ require ( golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect diff --git a/go.sum b/go.sum index 29040bd2..5afcca23 100644 --- a/go.sum +++ b/go.sum @@ -7,33 +7,21 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= -cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= -cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= -cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -61,13 +49,8 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ClickHouse/ch-go v0.61.1 h1:j5rx3qnvcnYjhnP1IdXE/vdIRQiqgwAzyqOaasA6QCw= -github.com/ClickHouse/ch-go v0.61.1/go.mod h1:myxt/JZgy2BYHFGQqzmaIpbfr5CMbs3YHVULaWQj5YU= github.com/ClickHouse/ch-go v0.61.5 h1:zwR8QbYI0tsMiEcze/uIMK+Tz1D3XZXLdNrlaOpeEI4= github.com/ClickHouse/ch-go v0.61.5/go.mod h1:s1LJW/F/LcFs5HJnuogFMta50kKDO0lf9zzfrbl0RQg= -github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= -github.com/ClickHouse/clickhouse-go/v2 v2.17.1 h1:ZCmAYWpu75IyEi7+Yrs/uaAjiCGY5wfW5kXo64exkX4= -github.com/ClickHouse/clickhouse-go/v2 v2.17.1/go.mod h1:rkGTvFDTLqLIm0ma+13xmcCfr/08Gvs7KmFt1tgiWHQ= github.com/ClickHouse/clickhouse-go/v2 v2.23.0 h1:srmRrkS0BR8gEut87u8jpcZ7geOob6nGj9ifrb+aKmg= github.com/ClickHouse/clickhouse-go/v2 v2.23.0/go.mod h1:tBhdF3f3RdP7sS59+oBAtTyhWpy0024ZxDMhgxra0QE= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= @@ -76,8 +59,6 @@ github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer5 github.com/antchfx/xmlquery v1.3.18 h1:FSQ3wMuphnPPGJOFhvc+cRQ2CT/rUj4cyQXkJcjOwz0= github.com/antchfx/xmlquery v1.3.18/go.mod h1:Afkq4JIeXut75taLSuI31ISJ/zeq+3jG7TunF7noreA= github.com/antchfx/xpath v1.2.4/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= -github.com/antchfx/xpath v1.2.5 h1:hqZ+wtQ+KIOV/S3bGZcIhpgYC26um2bZYP2KVGcR7VY= -github.com/antchfx/xpath v1.2.5/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antchfx/xpath v1.3.0 h1:nTMlzGAK3IJ0bPpME2urTuFL76o4A96iYvoKFHRXJgc= github.com/antchfx/xpath v1.3.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= @@ -86,80 +67,42 @@ github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDw github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= -github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= github.com/aws/aws-sdk-go-v2/config v1.27.10 h1:PS+65jThT0T/snC5WjyfHHyUgG+eBoupSDV+f838cro= github.com/aws/aws-sdk-go-v2/config v1.27.10/go.mod h1:BePM7Vo4OBpHreKRUMuDXX+/+JWP38FLkzl5m27/Jjs= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= github.com/aws/aws-sdk-go-v2/credentials v1.17.10 h1:qDZ3EA2lv1KangvQB6y258OssCHD0xvaGiEDkG4X/10= github.com/aws/aws-sdk-go-v2/credentials v1.17.10/go.mod h1:6t3sucOaYDwDssHQa0ojH1RpmVmF5/jArkye1b2FKMI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 h1:2MUXyGW6dVaQz6aqycpbdLIH1NMcUI6kW6vQ0RabGYg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15/go.mod h1:aHbhbR6WEQgHAiRj41EQ2W47yOYwNtIkWTXmcAtYqj8= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.14 h1:Nhcq+ODoD9FRQYI3lATy6iADS5maER3ZXSfE8v3FMh8= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.14/go.mod h1:VlBbwTpgCj3rKWMVkEAYiAR3FKs7Mi3jALTMGfbfuns= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 h1:5oE2WzJE56/mVveuDZPJESKlg/00AaS2pY2QZcnxg4M= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10/go.mod h1:FHbKWQtRBYUz4vO5WBWjzMD2by126ny5y/1EoaWoLfI= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 h1:L0ai8WICYHozIKK+OtPzVJBugL7culcuM4E4JOpIEm8= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10/go.mod h1:byqfyxJBshFk0fF9YmK0M0ugIO8OWjzH2T3bPG4eGuA= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 h1:KOxnQeWy5sXyS37fdKEvAsGHOr9fa/qvwxfJurR/BzE= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10/go.mod h1:jMx5INQFYFYB3lQD9W0D8Ohgq6Wnl7NYOJ2TQndbulI= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 h1:5XNlsBsEvBZBMO6p82y+sqpWg8j5aBCe+5C2GBFgqBQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1/go.mod h1:4qXHrG1Ne3VGIMZPCB8OjH/pLFO94sKABIusjh0KWPU= github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= github.com/aws/aws-sdk-go-v2/service/sso v1.20.4 h1:WzFol5Cd+yDxPAdnzTA5LmpHYSWinhmSj4rQChV0ee8= github.com/aws/aws-sdk-go-v2/service/sso v1.20.4/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= @@ -167,15 +110,11 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU= github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs= -github.com/bodgit/sevenzip v1.4.5 h1:HFJQ+nbjppfyf2xbQEJBbmVo+o2kTg1FXV4i7YOx87s= -github.com/bodgit/sevenzip v1.4.5/go.mod h1:LAcAg/UQzyjzCQSGBPZFYzoiHMfT6Gk+3tMSjUk3foY= github.com/bodgit/sevenzip v1.5.0 h1:QESwnPUnhqftOgbi6wIiWm1WEkrT4puHukt5a2psEcw= github.com/bodgit/sevenzip v1.5.0/go.mod h1:+E74G6pfBX8IMaVybsKMgGTTTBcbHU8ssPTJ9mLUr38= github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -185,12 +124,7 @@ github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -205,17 +139,12 @@ github.com/djherbis/nio/v3 v3.0.1/go.mod h1:Ng4h80pbZFMla1yKzm61cF0tqqilXZYrogmW github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY= github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= -github.com/eapache/go-resiliency v1.5.0 h1:dRsaR00whmQD+SgVKlq/vCRFNgtEb5yppyeVos3Yce0= -github.com/eapache/go-resiliency v1.5.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -265,9 +194,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -309,8 +235,6 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= @@ -343,8 +267,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -388,8 +310,6 @@ github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= -github.com/paulmach/orb v0.11.0 h1:JfVXJUBeH9ifc/OrhBY0lL16QsmPgpCHMlqSSYhcgAA= -github.com/paulmach/orb v0.11.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= @@ -402,21 +322,13 @@ github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= -github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= github.com/puzpuzpuz/xsync v1.5.2 h1:yRAP4wqSOZG+/4pxJ08fPTwrfL0IzE/LKQ/cw509qGY= @@ -448,14 +360,11 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0= -github.com/tencentyun/cos-go-sdk-v5 v0.7.45 h1:5/ZGOv846tP6+2X7w//8QjLgH2KcUK+HciFbfjWquFU= -github.com/tencentyun/cos-go-sdk-v5 v0.7.45/go.mod h1:DH9US8nB+AJXqwu/AMOrCFN1COv3dpytXuJWHgdg7kE= github.com/tencentyun/cos-go-sdk-v5 v0.7.47 h1:uoS4Sob16qEYoapkqJq1D1Vnsy9ira9BfNUMtoFYTI4= github.com/tencentyun/cos-go-sdk-v5 v0.7.47/go.mod h1:DH9US8nB+AJXqwu/AMOrCFN1COv3dpytXuJWHgdg7kE= github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= @@ -469,8 +378,6 @@ github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eN github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= @@ -493,28 +400,16 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= @@ -532,10 +427,6 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -592,12 +483,6 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -605,10 +490,6 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -620,8 +501,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -647,20 +526,14 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -668,7 +541,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= @@ -718,10 +590,6 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.157.0 h1:ORAeqmbrrozeyw5NjnMxh7peHO0UzV4wWYSwZeCUb20= -google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g= -google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= -google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -729,8 +597,6 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -745,22 +611,10 @@ google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= -google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc= -google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda h1:b6F6WIV4xHHD0FA4oIyzU6mHWg2WI2X1RBehwa5QN38= google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda/go.mod h1:AHcE/gZH76Bk/ROZhQphlRoWo5xKDEtz3eVEO1LfA8c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 h1:9IZDv+/GcI6u+a4jRFRLxQs0RUCfavGfoOgEW6jpkI0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -772,10 +626,6 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -788,7 +638,6 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= diff --git a/pkg/server/metrics/metrics.go b/pkg/server/metrics/metrics.go index a72b8b36..5809c414 100644 --- a/pkg/server/metrics/metrics.go +++ b/pkg/server/metrics/metrics.go @@ -30,6 +30,7 @@ type APIMetrics struct { NumberBackupsLocal prometheus.Gauge NumberBackupsRemoteExpected prometheus.Gauge NumberBackupsLocalExpected prometheus.Gauge + InProgressCommands prometheus.Gauge SubCommands map[string][]string log *apexLog.Entry @@ -101,6 +102,7 @@ func (m *APIMetrics) RegisterMetrics() { Name: "last_backup_size_local", Help: "Last local backup size in bytes", }) + m.LastBackupSizeRemote = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "clickhouse_backup", Name: "last_backup_size_remote", @@ -137,6 +139,12 @@ func (m *APIMetrics) RegisterMetrics() { Help: "How many backups expected on local storage", }) + m.InProgressCommands = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "clickhouse_backup", + Name: "in_progress_commands", + Help: "How many commands running in progress", + }) + for _, command := range commandList { prometheus.MustRegister( m.SuccessfulCounter[command], @@ -155,6 +163,7 @@ func (m *APIMetrics) RegisterMetrics() { m.NumberBackupsLocal, m.NumberBackupsRemoteExpected, m.NumberBackupsLocalExpected, + m.InProgressCommands, ) for _, command := range commandList { @@ -165,6 +174,7 @@ func (m *APIMetrics) RegisterMetrics() { func (m *APIMetrics) Start(command string, startTime time.Time) { if _, exists := m.LastStart[command]; exists { m.LastStart[command].Set(float64(startTime.Unix())) + m.InProgressCommands.Inc() if subCommands, subCommandsExists := m.SubCommands[command]; subCommandsExists { for _, subCommand := range subCommands { if _, exists := m.LastStart[subCommand]; exists { @@ -176,10 +186,12 @@ func (m *APIMetrics) Start(command string, startTime time.Time) { m.log.Warnf("%s not found in LastStart metrics", command) } } + func (m *APIMetrics) Finish(command string, startTime time.Time) { if _, exists := m.LastFinish[command]; exists { m.LastDuration[command].Set(float64(time.Since(startTime).Nanoseconds())) m.LastFinish[command].Set(float64(time.Now().Unix())) + m.InProgressCommands.Dec() if subCommands, subCommandsExists := m.SubCommands[command]; subCommandsExists { for _, subCommand := range subCommands { if _, exists := m.LastFinish[subCommand]; exists { diff --git a/pkg/server/server.go b/pkg/server/server.go index ec7c83c1..d7ca26ef 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -560,7 +560,6 @@ func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.Acti defer status.Current.Stop(commandId, err) if err != nil { api.log.Errorf("Watch error: %v", err) - return } }()