Skip to content

Commit

Permalink
- add tests for azure_blob_storage backup disk for `use_embbeded_ba…
Browse files Browse the repository at this point in the history
…ckup_restore: true`, it works, but slow, look ClickHouse/ClickHouse#52088 and Azure/Azurite#2053 for details

- clean Dockerfile for build-reace-docker and build-race-fips-docker parameters
  • Loading branch information
Slach committed Jul 13, 2023
1 parent 719e252 commit 45665d9
Show file tree
Hide file tree
Showing 11 changed files with 205 additions and 85 deletions.
8 changes: 8 additions & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
# v2.3.1
IMPROVEMENTS
- add but skip tests for `azure_blob_storage` backup disk for `use_embbeded_backup_restore: true`, it works, but slow, look https://github.com/ClickHouse/ClickHouse/issues/52088 for details

BUG FIXES
- complete success/failure server callback notification even when main context canceled, fix [680](https://github.com/Altinity/clickhouse-backup/pull/680)
- `clean` command will not return error when shadow directory not exists, fix [686](https://github.com/Altinity/clickhouse-backup/issues/686)

# v2.3.0
IMPROVEMENTS
- add FIPS compatible builds and examples, fix [656](https://github.com/Altinity/clickhouse-backup/issues/656), fix [674](https://github.com/Altinity/clickhouse-backup/issues/674)
Expand Down
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@ FROM builder-base AS builder-race
ARG TARGETPLATFORM
COPY ./ /src/
RUN mkdir -p ./clickhouse-backup/
RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race ./cmd/clickhouse-backup
RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CGO_ENABLED=1 go build -a -cover -buildvcs=false -ldflags "-X 'main.version=race' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race ./cmd/clickhouse-backup
RUN cp -l ./clickhouse-backup/clickhouse-backup-race /bin/clickhouse-backup && ldd ./clickhouse-backup/clickhouse-backup-race
RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race-fips' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race-fips ./cmd/clickhouse-backup
RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race-fips' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race-fips ./cmd/clickhouse-backup
RUN cp -l ./clickhouse-backup/clickhouse-backup-race-fips /bin/clickhouse-backup-fips && ldd ./clickhouse-backup/clickhouse-backup-race-fips
COPY entrypoint.sh /entrypoint.sh

Expand Down
3 changes: 3 additions & 0 deletions pkg/backup/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,9 @@ func (b *Backuper) cleanRemoteEmbedded(ctx context.Context, backup storage.Backu
}
apexLog.Debugf("object_disk.ReadMetadataFromReader(%s)", f.Name())
meta, err := object_disk.ReadMetadataFromReader(r, f.Name())
if err != nil {
return err
}
for _, o := range meta.StorageObjects {
if err = object_disk.DeleteFile(ctx, b.cfg.ClickHouse.EmbeddedBackupDisk, o.ObjectRelativePath); err != nil {
return err
Expand Down
22 changes: 22 additions & 0 deletions pkg/backup/upload.go
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,28 @@ func (b *Backuper) validateUploadParams(ctx context.Context, backupName string,
if (diffFrom != "" || diffFromRemote != "") && b.cfg.ClickHouse.UseEmbeddedBackupRestore {
log.Warnf("--diff-from and --diff-from-remote not compatible with backups created with `use_embedded_backup_restore: true`")
}

if b.cfg.ClickHouse.UseEmbeddedBackupRestore {
fatalMsg := fmt.Sprintf("`general->remote_storage: %s` `clickhouse->use_embedded_backup_restore: %v` require %s->compression_format: none, actual %%s", b.cfg.General.RemoteStorage, b.cfg.ClickHouse.UseEmbeddedBackupRestore, b.cfg.General.RemoteStorage)
if b.cfg.General.RemoteStorage == "s3" && b.cfg.S3.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.S3.CompressionFormat)
}
if b.cfg.General.RemoteStorage == "gcs" && b.cfg.GCS.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.GCS.CompressionFormat)
}
if b.cfg.General.RemoteStorage == "azblob" && b.cfg.AzureBlob.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.AzureBlob.CompressionFormat)
}
if b.cfg.General.RemoteStorage == "sftp" && b.cfg.SFTP.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.SFTP.CompressionFormat)
}
if b.cfg.General.RemoteStorage == "ftp" && b.cfg.FTP.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.FTP.CompressionFormat)
}
if b.cfg.General.RemoteStorage == "cos" && b.cfg.COS.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.COS.CompressionFormat)
}
}
if b.cfg.General.RemoteStorage == "custom" && b.resume {
return fmt.Errorf("can't resume for `remote_storage: custom`")
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/storage/azblob.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,16 @@ func (s *AzureBlob) Kind() string {
// Connect - connect to Azure
func (s *AzureBlob) Connect(ctx context.Context) error {
if s.Config.EndpointSuffix == "" {
return fmt.Errorf("endpoint suffix not set")
return fmt.Errorf("azblob endpoint suffix not set")
}
if s.Config.Container == "" {
return fmt.Errorf("container name not set")
return fmt.Errorf("azblob container name not set")
}
if s.Config.AccountName == "" {
return fmt.Errorf("account name not set")
return fmt.Errorf("azblob account name not set")
}
if s.Config.AccountKey == "" && s.Config.SharedAccessSignature == "" && !s.Config.UseManagedIdentity {
return fmt.Errorf("account key or SAS or use_managed_identity must be set")
return fmt.Errorf("azblob account key or SAS or use_managed_identity must be set")
}
var (
err error
Expand Down
18 changes: 13 additions & 5 deletions pkg/storage/object_disk/object_disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) (
if containerNameNode == nil {
return nil, fmt.Errorf("%s -> /%s/storage_configuration/disks/%s doesn't contains <account_key>", configFile, root.Data, diskName)
}
creds.AzureAccountName = strings.Trim(accountKeyNode.InnerText(), "\r\n \t")
creds.AzureAccountKey = strings.Trim(accountKeyNode.InnerText(), "\r\n \t")
credentials[diskName] = creds
break
}
Expand Down Expand Up @@ -425,7 +425,12 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf
break
case "azblob":
connection.Type = "azure_blob_storage"
azureCfg := config.AzureBlobConfig{}
azureCfg := config.AzureBlobConfig{
Timeout: "15m",
BufferSize: 2 * 1024 * 1024,
MaxBuffers: 3,
MaxPartsCount: 5000,
}
azureURL, err := url.Parse(creds.EndPoint)
if err != nil {
return nil, err
Expand All @@ -435,11 +440,14 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf
azureCfg.EndpointSchema = azureURL.Scheme
}
azureCfg.EndpointSuffix = azureURL.Host
if creds.AzureAccountName != "" {
azureCfg.AccountName = creds.AzureAccountName
}
if azureURL.Path != "" {
azureCfg.Path = azureURL.Path
}
if creds.AzureAccountKey != "" {
azureCfg.AccountName = creds.AzureAccountName
if azureCfg.AccountName != "" && strings.HasPrefix(azureCfg.Path, "/"+creds.AzureAccountName) {
azureCfg.Path = strings.TrimPrefix(azureURL.Path, "/"+creds.AzureAccountName)
}
}
if creds.AzureAccountKey != "" {
azureCfg.AccountKey = creds.AzureAccountKey
Expand Down
2 changes: 1 addition & 1 deletion test/integration/config-azblob-embedded.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ azblob:
endpoint_schema: http
container: container1
path: backup
compression_format: tar
compression_format: none
api:
listen: :7171
create_integration_tables: true
Expand Down
34 changes: 33 additions & 1 deletion test/integration/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
services:
services:
sshd:
image: docker.io/panubo/sshd:latest
container_name: sshd
Expand Down Expand Up @@ -32,6 +32,10 @@ services:
MINIO_DEFAULT_BUCKETS: 'clickhouse'
MINIO_ROOT_USER: access-key
MINIO_ROOT_PASSWORD: it-is-my-super-secret-key
healthcheck:
test: curl -sL http://localhost:9000/
interval: 10s
retries: 30
volumes:
- ./minio_nodelete.sh:/bin/minio_nodelete.sh
networks:
Expand All @@ -54,9 +58,31 @@ services:
image: mcr.microsoft.com/azure-storage/azurite:latest
container_name: azure
hostname: devstoreaccount1.blob.azure
healthcheck:
test: nc 127.0.0.1 10000 -z
interval: 1s
retries: 30
command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"]
networks:
- clickhouse-backup

# azure_init:
# image: mcr.microsoft.com/azure-cli:latest
# command:
# - /bin/sh
# - -xc
# - |
# az storage container create --debug --name azure-backup-disk &&
# az storage container create --debug --name azure-disk
# depends_on:
# azure:
# condition: service_healthy
# environment:
# # https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools
# AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure:10000/devstoreaccount1;
# networks:
# - clickhouse-backup

zookeeper:
image: docker.io/zookeeper:${ZOOKEEPER_VERSION:-latest}
container_name: zookeeper
Expand Down Expand Up @@ -134,6 +160,12 @@ services:
depends_on:
zookeeper:
condition: service_healthy
minio:
condition: service_healthy
azure:
condition: service_healthy
# azure_init:
# condition: service_completed_successfully

all_services_ready:
image: hello-world
Expand Down
34 changes: 33 additions & 1 deletion test/integration/docker-compose_advanced.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,10 @@ services:
MINIO_DEFAULT_BUCKETS: 'clickhouse'
MINIO_ROOT_USER: access-key
MINIO_ROOT_PASSWORD: it-is-my-super-secret-key
healthcheck:
test: curl -sL http://localhost:9000/
interval: 10s
retries: 30
volumes:
- ./minio_nodelete.sh:/bin/minio_nodelete.sh
ports:
Expand All @@ -58,7 +62,7 @@ services:
# - /bin/sh
# command:
# - -c
# - "mkdir -p /storage/altinity-qa-test && fake-gcs-server -scheme http -port 8080 -public-host gsc:8080"
# - "mkdir -p /data/clickhouse-backup-test-gcs && fake-gcs-server -data /data -scheme http -port 8080 -public-host gsc:8080"
# networks:
# - clickhouse-backup

Expand All @@ -67,9 +71,31 @@ services:
image: mcr.microsoft.com/azure-storage/azurite:latest
container_name: azure
hostname: devstoreaccount1.blob.azure
healthcheck:
test: nc 127.0.0.1 10000 -z
interval: 1s
retries: 30
command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"]
networks:
- clickhouse-backup

# azure_init:
# image: mcr.microsoft.com/azure-cli:latest
# command:
# - /bin/sh
# - -xc
# - |
# az storage container create --debug --name azure-backup-disk &&
# az storage container create --debug --name azure-disk
# depends_on:
# azure:
# condition: service_healthy
# environment:
# # https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools
# AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure:10000/devstoreaccount1;
# networks:
# - clickhouse-backup

mysql:
image: docker.io/mysql:${MYSQL_VERSION:-latest}
command: --default-authentication-plugin=mysql_native_password --gtid_mode=on --enforce_gtid_consistency=ON
Expand Down Expand Up @@ -189,6 +215,12 @@ services:
condition: service_healthy
zookeeper:
condition: service_healthy
minio:
condition: service_healthy
azure:
condition: service_healthy
# azure_init:
# condition: service_completed_successfully

all_services_ready:
image: hello-world
Expand Down
106 changes: 61 additions & 45 deletions test/integration/dynamic_settings.sh
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ EOT

fi

if [[ "${CLICKHOUSE_VERSION}" =~ ^21\.[8-9]|^21\.[0-9]{2} || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then
if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^21\.[8-9]|^21\.[0-9]{2} || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then

cat <<EOT > /etc/clickhouse-server/config.d/storage_configuration_s3.xml
<yandex>
Expand Down Expand Up @@ -110,7 +110,7 @@ EOT

fi

if [[ "${CLICKHOUSE_VERSION}" =~ ^21\.12 || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then
if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^21\.12 || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then

cat <<EOT > /etc/clickhouse-server/config.d/storage_configuration_encrypted_s3.xml
<yandex>
Expand Down Expand Up @@ -182,52 +182,68 @@ EOT

fi

# embedded s3_plain and azure backup configuration
if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^23\.3 || "${CLICKHOUSE_VERSION}" =~ ^23\.1[0-9]+ || "${CLICKHOUSE_VERSION}" =~ ^2[4-9]\.[1-9]+ ]]; then
# s3_plain and azure backup configuration
if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^23\.3 || "${CLICKHOUSE_VERSION}" =~ ^23\.[4-9] || "${CLICKHOUSE_VERSION}" =~ ^23\.1[0-9]+ || "${CLICKHOUSE_VERSION}" =~ ^2[4-9]\.[1-9]+ ]]; then

mkdir -p /var/lib/clickhouse/disks/backups_azure/ /var/lib/clickhouse/disks/backups_s3_plain/
mkdir -p /var/lib/clickhouse/disks/backups_s3_plain/
chown -R clickhouse /var/lib/clickhouse/disks/

#cat <<EOT > /etc/clickhouse-server/config.d/backup_storage_configuration_s3_plain.xml
#<?xml version="1.0"?>
#<clickhouse>
# <storage_configuration>
# <disks>
# <backups_s3_plain>
# <send_metadata>true</send_metadata>
# <type>s3_plain</type>
# <endpoint>http://minio:9000/clickhouse/backups_plain/</endpoint>
# <access_key_id>access-key</access_key_id>
# <secret_access_key>it-is-my-super-secret-key</secret_access_key>
# <cache_enabled>false</cache_enabled>
# </backups_s3_plain>
# </disks>
# </storage_configuration>
# <backups>
# <allowed_disk>backups_azure</allowed_disk>
# </backups>
#</clickhouse>
#EOT

#cat <<EOT > /etc/clickhouse-server/config.d/backup_storage_configuration_azure.xml
#<?xml version="1.0"?>
#<clickhouse>
# <storage_configuration>
# <disks>
# <backups_azure>
# <type>azure_blob_storage</type>
# <storage_account_url>http://azure:10000</storage_account_url>
# <container_name>container-embedded</container_name>
# <account_name>devstoreaccount1</account_name>
# <account_key>Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==</account_key>
# </backups_azure>
# </disks>
# </storage_configuration>
# <backups>
# <allowed_disk>backups_azure</allowed_disk>
# </backups>
#</clickhouse>
#EOT
cat <<EOT > /etc/clickhouse-server/config.d/backup_storage_configuration_s3_plain.xml
<?xml version="1.0"?>
<clickhouse>
<storage_configuration>
<disks>
<backups_s3_plain>
<type>s3_plain</type>
<endpoint>http://minio:9000/clickhouse/backups_s3_plain/</endpoint>
<access_key_id>access-key</access_key_id>
<secret_access_key>it-is-my-super-secret-key</secret_access_key>
<cache_enabled>false</cache_enabled>
</backups_s3_plain>
</disks>
</storage_configuration>
<backups>
<allowed_disk>backups_s3</allowed_disk>
<allowed_disk>backups_s3_plain</allowed_disk>
</backups>
</clickhouse>
EOT

mkdir -p /var/lib/clickhouse/disks/backups_azure/
chown -R clickhouse /var/lib/clickhouse/disks/

cat <<EOT > /etc/clickhouse-server/config.d/backup_storage_configuration_azure.xml
<?xml version="1.0"?>
<clickhouse>
<storage_configuration>
<disks>
<azure>
<type>azure_blob_storage</type>
<storage_account_url>http://azure:10000/devstoreaccount1</storage_account_url>
<container_name>azure-disk</container_name>
<!-- https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools -->
<account_name>devstoreaccount1</account_name>
<account_key>Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==</account_key>
<cache_enabled>false</cache_enabled>
</azure>
<backups_azure>
<type>azure_blob_storage</type>
<storage_account_url>http://azure:10000/devstoreaccount1</storage_account_url>
<container_name>azure-backup-disk</container_name>
<!-- https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools -->
<account_name>devstoreaccount1</account_name>
<account_key>Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==</account_key>
<cache_enabled>false</cache_enabled>
</backups_azure>
</disks>
</storage_configuration>
<backups>
<allowed_disk>backups_s3</allowed_disk>
<allowed_disk>backups_s3_plain</allowed_disk>
<allowed_disk>backups_azure</allowed_disk>
</backups>
</clickhouse>
EOT

fi

Expand Down
Loading

0 comments on commit 45665d9

Please sign in to comment.