Skip to content

Commit

Permalink
fixed wrong list command behavior, it shall scann all system.disks pa…
Browse files Browse the repository at this point in the history
…th not only default disk to find pratially created backups, fix #873

fixed create `--rbac` behavior, don't create access folder if no RBAC objects is present
  • Loading branch information
Slach committed Apr 6, 2024
1 parent d364d13 commit 120f70b
Show file tree
Hide file tree
Showing 7 changed files with 80 additions and 32 deletions.
4 changes: 3 additions & 1 deletion ChangeLog.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,9 @@ BUG FIXES
- fixed `ObjectDisks` + `CLICKHOUSE_USE_EMBEDDED_BACKUP_RESTORE: true` - shall skip upload object disk content, fix [799](https://github.com/Altinity/clickhouse-backup/issues/799)
- fixed connection to clickhouse-server behavior when long clickhouse-server startup time and `docker-entrypoint.d` processing, will infinite reconnect each 5 seconds, until success, fix [857](https://github.com/Altinity/clickhouse-backup/issues/857)
- fixed `USE_EMBEDDED_BACKUP_RESTORE=true` behavior to allow use backup disk with type `local`, fix [882](https://github.com/Altinity/clickhouse-backup/issues/882)

- fixed wrong list command behavior, it shall scann all system.disks path not only default disk to find pratially created backups, fix [873](https://github.com/Altinity/clickhouse-backup/issues/873)
- fixed create `--rbac` behavior, don't create access folder if no RBAC objects is present
-
# v2.4.35
IMPROVEMENTS
- set part size for `s3:CopyObject` minimum 128Mb, look details https://repost.aws/questions/QUtW2_XaALTK63wv9XLSywiQ/s3-sync-command-is-slow-to-start-on-some-data
Expand Down
30 changes: 22 additions & 8 deletions pkg/backup/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,6 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string,
log.Errorf("creating failed -> b.Clean error: %v", cleanShadowErr)
log.Error(cleanShadowErr.Error())
}

return err
}

Expand Down Expand Up @@ -623,21 +622,28 @@ func (b *Backuper) createBackupRBAC(ctx context.Context, backupPath string, disk
if err == nil && !accessPathInfo.IsDir() {
return 0, fmt.Errorf("%s is not directory", accessPath)
}
if err == nil {
if err != nil {
return 0, err
}
rbacSQLFiles, err := filepath.Glob(path.Join(accessPath, "*.sql"))
if err != nil {
return 0, err
}
if len(rbacSQLFiles) != 0 {
log.Debugf("copy %s -> %s", accessPath, rbacBackup)
copyErr := recursiveCopy.Copy(accessPath, rbacBackup, recursiveCopy.Options{
Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) {
rbacDataSize += uint64(srcinfo.Size())
return false, nil
if strings.HasSuffix(src, "*.sql") {
rbacDataSize += uint64(srcinfo.Size())
return false, nil
} else {
return true, nil
}
},
})
if copyErr != nil {
return 0, copyErr
}
} else {
if err = os.MkdirAll(rbacBackup, 0755); err != nil {
return 0, err
}
}
replicatedRBACDataSize, err := b.createBackupRBACReplicated(ctx, rbacBackup)
if err != nil {
Expand All @@ -663,6 +669,14 @@ func (b *Backuper) createBackupRBACReplicated(ctx context.Context, rbacBackup st
if err != nil {
return 0, err
}
rbacUUIDObjectsCount, err := k.ChildCount(replicatedAccessPath, "uuid")
if err != nil {
return 0, err
}
if rbacUUIDObjectsCount == 0 {
b.log.WithField("logger", "createBackupRBACReplicated").Warnf("%s/%s have no childs, skip Dump", replicatedAccessPath, "uuid")
continue
}
dumpFile := path.Join(rbacBackup, userDirectory.Name+".jsonl")
b.log.WithField("logger", "createBackupRBACReplicated").Infof("keeper.Dump %s -> %s", replicatedAccessPath, dumpFile)
dumpRBACSize, dumpErr := k.Dump(replicatedAccessPath, dumpFile)
Expand Down
2 changes: 1 addition & 1 deletion pkg/backup/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis
if disk.IsBackup {
backupPath = path.Join(disk.Path, backupName)
}
log.Debugf("remove '%s'", backupPath)
log.Infof("remove '%s'", backupPath)
if err = os.RemoveAll(backupPath); err != nil {
return err
}
Expand Down
67 changes: 46 additions & 21 deletions pkg/backup/list.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,23 +184,34 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk)
},
}
}
defaultDataPath, err := b.ch.GetDefaultPath(disks)
if err != nil {
return nil, nil, err
}
var result []LocalBackup
allBackupPaths := []string{path.Join(defaultDataPath, "backup")}
if b.cfg.ClickHouse.UseEmbeddedBackupRestore {
for _, disk := range disks {
select {
case <-ctx.Done():
return nil, nil, ctx.Err()
default:
if disk.IsBackup || disk.Name == b.cfg.ClickHouse.EmbeddedBackupDisk {
allBackupPaths = append(allBackupPaths, disk.Path)
}
allBackupPaths := []string{}
for _, disk := range disks {
if disk.IsBackup || disk.Name == b.cfg.ClickHouse.EmbeddedBackupDisk {
allBackupPaths = append(allBackupPaths, disk.Path)
} else {
allBackupPaths = append(allBackupPaths, path.Join(disk.Path, "backup"))
}
}
addBrokenBackupIfNotExists := func(result []LocalBackup, name string, info os.FileInfo, broken string) []LocalBackup {
backupAlreadyExists := false
for _, backup := range result {
if backup.BackupName == name {
backupAlreadyExists = true
break
}
}
// add broken backup if not exists
if !backupAlreadyExists {
result = append(result, LocalBackup{
BackupMetadata: metadata.BackupMetadata{
BackupName: name,
CreationDate: info.ModTime(),
},
Broken: broken,
})
}
return result
}
l := len(allBackupPaths)
for i, backupPath := range allBackupPaths {
Expand Down Expand Up @@ -234,20 +245,34 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk)
backupMetadataBody, err := os.ReadFile(backupMetafilePath)
if err != nil {
if !os.IsNotExist(err) {
return result, disks, err
b.log.Warnf("list can't read %s error: %s", backupMetafilePath, err)
}
result = addBrokenBackupIfNotExists(result, name, info, "broken metadata.json not found")
continue
}
var backupMetadata metadata.BackupMetadata
if err := json.Unmarshal(backupMetadataBody, &backupMetadata); err != nil {
return nil, disks, err
if parseErr := json.Unmarshal(backupMetadataBody, &backupMetadata); parseErr != nil {
result = addBrokenBackupIfNotExists(result, name, info, fmt.Sprintf("parse metadata.json error: %v", parseErr))
continue
}
result = append(result, LocalBackup{
BackupMetadata: backupMetadata,
})
brokenBackupIsAlreadyExists := false
for i, backup := range result {
if backup.BackupName == backupMetadata.BackupName {
brokenBackupIsAlreadyExists = true
result[i].BackupMetadata = backupMetadata
result[i].Broken = ""
break
}
}
if !brokenBackupIsAlreadyExists {
result = append(result, LocalBackup{
BackupMetadata: backupMetadata,
})
}

}
if closeErr := d.Close(); closeErr != nil {
log.Errorf("can't close %s openError: %v", backupPath, closeErr)
log.Errorf("can't close %s error: %v", backupPath, closeErr)
}
}
}
Expand Down
5 changes: 5 additions & 0 deletions pkg/keeper/keeper.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,11 @@ func (k *Keeper) Dump(prefix, dumpFile string) (int, error) {
return bytes, nil
}

func (k *Keeper) ChildCount(prefix, nodePath string) (int, error) {
childrenNodes, _, err := k.conn.Children(path.Join(prefix, nodePath))
return len(childrenNodes), err
}

func (k *Keeper) dumpNodeRecursive(prefix, nodePath string, f *os.File) (int, error) {
value, _, err := k.conn.Get(path.Join(prefix, nodePath))
if err != nil {
Expand Down
2 changes: 2 additions & 0 deletions test/integration/dynamic_settings.sh
Original file line number Diff line number Diff line change
Expand Up @@ -265,10 +265,12 @@ cat <<EOT > /etc/clickhouse-server/config.d/backup_storage_configuration_s3.xml
EOT

# zero replication is buggy, can't freeze table: code: 344, message: FREEZE PARTITION queries are disabled.
# https://github.com/ClickHouse/ClickHouse/issues/62167#issuecomment-2031774983
#cat <<EOT > /etc/clickhouse-server/config.d/zero_copy_replication.xml
#<yandex>
# <merge_tree>
# <allow_remote_fs_zero_copy_replication>1</allow_remote_fs_zero_copy_replication>
# <disable_freeze_partition_for_zero_copy_replication>0</disable_freeze_partition_for_zero_copy_replication>
# </merge_tree>
#</yandex>
#EOT
Expand Down
2 changes: 1 addition & 1 deletion test/integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -468,7 +468,7 @@ func TestRBAC(t *testing.T) {

creatAllRBAC := func(drop bool) {
if drop {
log.Info("drop all RBAC related objects after backup")
log.Info("drop all RBAC related objects")
ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac")
ch.queryWithNoError(r, "DROP QUOTA test_rbac")
ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac")
Expand Down

0 comments on commit 120f70b

Please sign in to comment.