-stack_size
+stack-size
string
@@ -9075,7 +9163,7 @@ TiKVGCConfig
- batch_keys
+ batch-keys
int64
@@ -9087,7 +9175,7 @@ TiKVGCConfig
- max_write_bytes_per_sec
+ max-write-bytes-per-sec
string
@@ -9116,7 +9204,7 @@ TiKVImportConfig
-import_dir
+import-dir
string
@@ -9127,7 +9215,7 @@ TiKVImportConfig
-num_threads
+num-threads
int64
@@ -9138,7 +9226,7 @@ TiKVImportConfig
-num_import_jobs
+num-import-jobs
int64
@@ -9149,7 +9237,7 @@ TiKVImportConfig
-num_import_sst_jobs
+num-import-sst-jobs
int64
@@ -9160,7 +9248,7 @@ TiKVImportConfig
-max_prepare_duration
+max-prepare-duration
string
@@ -9171,7 +9259,7 @@ TiKVImportConfig
-region_split_size
+region-split-size
string
@@ -9182,7 +9270,7 @@ TiKVImportConfig
-stream_channel_window
+stream-channel-window
int64
@@ -9193,7 +9281,7 @@ TiKVImportConfig
-max_open_engines
+max-open-engines
int64
@@ -9204,7 +9292,7 @@ TiKVImportConfig
-upload_speed_limit
+upload-speed-limit
string
@@ -9246,7 +9334,7 @@ TiKVPDConfig
-retry_interval
+retry-interval
string
@@ -9260,7 +9348,7 @@ TiKVPDConfig
-retry_max_count
+retry-max-count
int64
@@ -9274,7 +9362,7 @@ TiKVPDConfig
-retry_log_every
+retry-log-every
int64
@@ -9307,7 +9395,7 @@ TiKVRaftDBConfig
-wal_recovery_mode
+wal-recovery-mode
string
@@ -9318,7 +9406,7 @@ TiKVRaftDBConfig
-wal_dir
+wal-dir
string
@@ -9329,7 +9417,7 @@ TiKVRaftDBConfig
-wal_ttl_seconds
+wal-ttl-seconds
int64
@@ -9340,7 +9428,7 @@ TiKVRaftDBConfig
-wal_size_limit
+wal-size-limit
string
@@ -9351,7 +9439,7 @@ TiKVRaftDBConfig
-max_total_wal_size
+max-total-wal-size
string
@@ -9362,7 +9450,7 @@ TiKVRaftDBConfig
-max_background_jobs
+max-background-jobs
int64
@@ -9373,7 +9461,7 @@ TiKVRaftDBConfig
-max_manifest_file_size
+max-manifest-file-size
string
@@ -9384,7 +9472,7 @@ TiKVRaftDBConfig
-create_if_missing
+create-if-missing
bool
@@ -9395,7 +9483,7 @@ TiKVRaftDBConfig
-max_open_files
+max-open-files
int64
@@ -9406,7 +9494,7 @@ TiKVRaftDBConfig
-enable_statistics
+enable-statistics
bool
@@ -9417,7 +9505,7 @@ TiKVRaftDBConfig
-stats_dump_period
+stats-dump-period
string
@@ -9428,7 +9516,7 @@ TiKVRaftDBConfig
-compaction_readahead_size
+compaction-readahead-size
string
@@ -9439,7 +9527,7 @@ TiKVRaftDBConfig
-info_log_max_size
+info-log-max-size
string
@@ -9450,7 +9538,7 @@ TiKVRaftDBConfig
-info_log_roll_time
+info-log-roll-time
string
@@ -9461,7 +9549,7 @@ TiKVRaftDBConfig
-info_log_keep_log_file_num
+info-log-keep-log-file-num
int64
@@ -9472,7 +9560,7 @@ TiKVRaftDBConfig
-info_log_dir
+info-log-dir
string
@@ -9483,7 +9571,7 @@ TiKVRaftDBConfig
-max_sub_compactions
+max-sub-compactions
int64
@@ -9494,7 +9582,7 @@ TiKVRaftDBConfig
-writable_file_max_buffer_size
+writable-file-max-buffer-size
string
@@ -9505,7 +9593,7 @@ TiKVRaftDBConfig
-use_direct_io_for_flush_and_compaction
+use-direct-io-for-flush-and-compaction
bool
@@ -9516,7 +9604,7 @@ TiKVRaftDBConfig
-enable_pipelined_write
+enable-pipelined-write
bool
@@ -9527,7 +9615,7 @@ TiKVRaftDBConfig
-allow_concurrent_memtable_write
+allow-concurrent-memtable-write
bool
@@ -9538,7 +9626,7 @@ TiKVRaftDBConfig
-bytes_per_sync
+bytes-per-sync
string
@@ -9549,7 +9637,7 @@ TiKVRaftDBConfig
-wal_bytes_per_sync
+wal-bytes-per-sync
string
@@ -10258,7 +10346,7 @@ TiKVSecurityConfig
-override_ssl_target
+override-ssl-target
string
@@ -10269,7 +10357,7 @@ TiKVSecurityConfig
-cipher_file
+cipher-file
string
@@ -10347,7 +10435,7 @@ TiKVServerConfig
-grpc_memory_pool_quota
+grpc-memory-pool-quota
string
@@ -10615,6 +10703,17 @@ TiKVSpec
+serviceAccount
+
+string
+
+
+
+Specify a Service Account for tikv
+
+
+
+
replicas
int32
@@ -10890,7 +10989,7 @@ TiKVStorageReadPoolConfi
-high_concurrency
+high-concurrency
int64
@@ -10902,7 +11001,7 @@ TiKVStorageReadPoolConfi
-normal_concurrency
+normal-concurrency
int64
@@ -10914,7 +11013,7 @@ TiKVStorageReadPoolConfi
-low_concurrency
+low-concurrency
int64
@@ -10926,7 +11025,7 @@ TiKVStorageReadPoolConfi
-max_tasks_per_worker_high
+max-tasks-per-worker-high
int64
@@ -10938,7 +11037,7 @@ TiKVStorageReadPoolConfi
-max_tasks_per_worker_normal
+max-tasks-per-worker-normal
int64
@@ -10950,7 +11049,7 @@ TiKVStorageReadPoolConfi
-max_tasks_per_worker_low
+max-tasks-per-worker-low
int64
@@ -10962,7 +11061,7 @@ TiKVStorageReadPoolConfi
-stack_size
+stack-size
string
diff --git a/hack/e2e.sh b/hack/e2e.sh
index 881102dac8..478974c09f 100755
--- a/hack/e2e.sh
+++ b/hack/e2e.sh
@@ -53,6 +53,7 @@ Environments:
KUBECONFIG path to the kubeconfig file, defaults: ~/.kube/config
SKIP_BUILD skip building binaries
SKIP_IMAGE_BUILD skip build and push images
+ SKIP_IMAGE_LOAD skip load images
SKIP_UP skip starting the cluster
SKIP_DOWN skip shutting down the cluster
SKIP_TEST skip running the test
@@ -76,6 +77,7 @@ Environments:
GINKGO_PARALLEL if set to `y`, will run specs in parallel, the number of nodes will be the number of cpus
GINKGO_NO_COLOR if set to `y`, suppress color output in default reporter
RUNNER_SUITE_NAME the suite name of runner
+ SKIP_GINKGO if set to `y`, skip ginkgo
Examples:
@@ -179,6 +181,7 @@ CLUSTER=${CLUSTER:-tidb-operator}
KUBECONFIG=${KUBECONFIG:-~/.kube/config}
SKIP_BUILD=${SKIP_BUILD:-}
SKIP_IMAGE_BUILD=${SKIP_IMAGE_BUILD:-}
+SKIP_IMAGE_LOAD=${SKIP_IMAGE_LOAD:-}
SKIP_UP=${SKIP_UP:-}
SKIP_DOWN=${SKIP_DOWN:-}
SKIP_TEST=${SKIP_TEST:-}
@@ -199,6 +202,7 @@ KUBE_WORKERS=${KUBE_WORKERS:-3}
DOCKER_IO_MIRROR=${DOCKER_IO_MIRROR:-}
GCR_IO_MIRROR=${GCR_IO_MIRROR:-}
QUAY_IO_MIRROR=${QUAY_IO_MIRROR:-}
+SKIP_GINKGO=${SKIP_GINKGO:-}
RUNNER_SUITE_NAME=${RUNNER_SUITE_NAME:-}
echo "PROVIDER: $PROVIDER"
@@ -485,6 +489,14 @@ else
exit 1
fi
+if [ "${HOSTNAME:-}" == "tidb-operator-dev" -a ! -f /usr/local/bin/helm ]; then
+ ln -s $OUTPUT_BIN/helm /usr/local/bin/helm
+fi
+
+if [ "${HOSTNAME:-}" == "tidb-operator-dev" -a ! -f /usr/local/bin/kind ]; then
+ ln -s $KIND_BIN /usr/local/bin/kind
+fi
+
# Environments for hack/run-e2e.sh
export PROVIDER
export CLUSTER
@@ -494,6 +506,8 @@ export GCP_REGION
export GCP_ZONE
export GCP_CREDENTIALS
export IMAGE_TAG
+export SKIP_GINKGO
+export SKIP_IMAGE_LOAD
export TIDB_OPERATOR_IMAGE=$DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG}
export E2E_IMAGE=$DOCKER_REGISTRY/pingcap/tidb-operator-e2e:${IMAGE_TAG}
export PATH=$OUTPUT_BIN:$PATH
diff --git a/hack/run-e2e.sh b/hack/run-e2e.sh
index bc78ccf3ad..3f207093e0 100755
--- a/hack/run-e2e.sh
+++ b/hack/run-e2e.sh
@@ -29,6 +29,7 @@ GCP_REGION=${GCP_REGION:-}
GCP_ZONE=${GCP_ZONE:-}
GCP_CREDENTIALS=${GCP_CREDENTIALS:-}
IMAGE_TAG=${IMAGE_TAG:-}
+SKIP_IMAGE_LOAD=${SKIP_IMAGE_LOAD:-}
TIDB_OPERATOR_IMAGE=${TIDB_OPERATOR_IMAGE:-localhost:5000/pingcap/tidb-operator:latest}
E2E_IMAGE=${E2E_IMAGE:-localhost:5000/pingcap/tidb-operator-e2e:latest}
KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config}
@@ -41,6 +42,7 @@ GINKGO_PARALLEL=${GINKGO_PARALLEL:-n} # set to 'y' to run tests in parallel
# in parallel
GINKGO_NO_COLOR=${GINKGO_NO_COLOR:-n}
GINKGO_STREAM=${GINKGO_STREAM:-y}
+SKIP_GINKGO=${SKIP_GINKGO:-}
if [ -z "$KUBECONFIG" ]; then
echo "error: KUBECONFIG is required"
@@ -284,10 +286,18 @@ if [ -z "$KUBECONTEXT" ]; then
echo "info: current kubeconfig context is '$KUBECONTEXT'"
fi
-e2e::image_load
+if [ -z "$SKIP_IMAGE_LOAD" ]; then
+ e2e::image_load
+fi
+
e2e::setup_local_pvs
e2e::setup_helm_server
+if [ -n "$SKIP_GINKGO" ]; then
+ echo "info: skipping ginkgo"
+ exit 0
+fi
+
echo "info: start to run e2e process"
ginkgo_args=()
diff --git a/images/tidb-backup-manager/Dockerfile b/images/tidb-backup-manager/Dockerfile
index 861490bb88..17fdc3e216 100644
--- a/images/tidb-backup-manager/Dockerfile
+++ b/images/tidb-backup-manager/Dockerfile
@@ -1,6 +1,6 @@
FROM pingcap/tidb-enterprise-tools:latest
-
-ARG VERSION=v1.48.0
+ARG VERSION=v1.51.0
+ARG SHUSH_VERSION=v1.4.0
RUN apk update && apk add ca-certificates
RUN wget -nv https://github.com/ncw/rclone/releases/download/${VERSION}/rclone-${VERSION}-linux-amd64.zip \
@@ -15,6 +15,10 @@ RUN wget -nv http://download.pingcap.org/br-latest-linux-amd64.tar.gz \
&& chmod 755 /usr/local/bin/br \
&& rm -rf br-latest-linux-amd64.tar.gz
+RUN wget -nv https://github.com/realestate-com-au/shush/releases/download/${SHUSH_VERSION}/shush_linux_amd64 \
+ && mv shush_linux_amd64 /usr/local/bin/shush \
+ && chmod 755 /usr/local/bin/shush
+
COPY bin/tidb-backup-manager /tidb-backup-manager
COPY entrypoint.sh /entrypoint.sh
diff --git a/images/tidb-backup-manager/entrypoint.sh b/images/tidb-backup-manager/entrypoint.sh
index 85c889147d..fc11dc02f2 100755
--- a/images/tidb-backup-manager/entrypoint.sh
+++ b/images/tidb-backup-manager/entrypoint.sh
@@ -19,7 +19,7 @@ echo "Create rclone.conf file."
cat < /tmp/rclone.conf
[s3]
type = s3
-env_auth = false
+env_auth = true
provider = ${S3_PROVIDER}
access_key_id = ${AWS_ACCESS_KEY_ID}
secret_access_key = ${AWS_SECRET_ACCESS_KEY:-$AWS_SECRET_KEY}
@@ -51,33 +51,40 @@ else
fi
BACKUP_BIN=/tidb-backup-manager
+if [[ -n "${AWS_DEFAULT_REGION}"]]; then
+ EXEC_COMMAND="exec"
+else
+ EXEC_COMMAND="/usr/local/bin/shush exec --"
+fi
+
+cat /tmp/rclone.conf
# exec command
case "$1" in
backup)
shift 1
echo "$BACKUP_BIN backup $@"
- exec $BACKUP_BIN backup "$@"
+ $EXEC_COMMAND $BACKUP_BIN backup "$@"
;;
export)
shift 1
echo "$BACKUP_BIN export $@"
- exec $BACKUP_BIN export "$@"
+ $EXEC_COMMAND $BACKUP_BIN export "$@"
;;
restore)
shift 1
echo "$BACKUP_BIN restore $@"
- exec $BACKUP_BIN restore "$@"
+ $EXEC_COMMAND $BACKUP_BIN restore "$@"
;;
import)
shift 1
echo "$BACKUP_BIN import $@"
- exec $BACKUP_BIN import "$@"
+ $EXEC_COMMAND $BACKUP_BIN import "$@"
;;
clean)
shift 1
echo "$BACKUP_BIN clean $@"
- exec $BACKUP_BIN clean "$@"
+ $EXEC_COMMAND $BACKUP_BIN clean "$@"
;;
*)
echo "Usage: $0 {backup|restore|clean}"
diff --git a/manifests/backup/backup-aws-s3-br.yaml b/manifests/backup/backup-aws-s3-br.yaml
new file mode 100644
index 0000000000..1efec57a76
--- /dev/null
+++ b/manifests/backup/backup-aws-s3-br.yaml
@@ -0,0 +1,34 @@
+---
+apiVersion: pingcap.com/v1alpha1
+kind: Backup
+metadata:
+ name: demo1-backup-s3
+ namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
+spec:
+ # backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
+ br:
+ cluster: myCluster
+ # clusterNamespce:
+ # enableTLSClient: true
+ # logLevel: info
+ # statusAddr:
+ # concurrency: 4
+ # rateLimit: 0
+ # timeAgo:
+ # checksum: true
+ # sendCredToTikv: true
+ from:
+ host: 172.30.6.56
+ secretName: mySecret
+ # port: 4000
+ # user: root
+ s3:
+ provider: aws
+ region: us-west-2
+ bucket: backup
+ prefix: test1-demo1
+ # secretName: aws-secret
diff --git a/manifests/backup/backup-s3-br.yaml b/manifests/backup/backup-s3-br.yaml
index d6a7bbbf60..d57c23beb7 100644
--- a/manifests/backup/backup-s3-br.yaml
+++ b/manifests/backup/backup-s3-br.yaml
@@ -4,13 +4,16 @@ kind: Backup
metadata:
name: demo1-backup-s3
namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
spec:
- #backupType: full
+ # backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
br:
- pd: 10.233.40.168:2379
- # ca:
- # cert:
- # key:
+ cluster: myCluster
+ # clusterNamespce:
+ # enableTLSClient: true
# logLevel: info
# statusAddr:
# concurrency: 4
@@ -18,6 +21,11 @@ spec:
# timeAgo:
# checksum: true
# sendCredToTikv: true
+ from:
+ host: 172.30.6.56
+ secretName: mySecret
+ # port: 4000
+ # user: root
s3:
provider: ceph
endpoint: http://10.233.57.220
diff --git a/manifests/backup/backup-schedule-aws-s3-br.yaml b/manifests/backup/backup-schedule-aws-s3-br.yaml
new file mode 100644
index 0000000000..bf9501f784
--- /dev/null
+++ b/manifests/backup/backup-schedule-aws-s3-br.yaml
@@ -0,0 +1,39 @@
+---
+apiVersion: pingcap.com/v1alpha1
+kind: BackupSchedule
+metadata:
+ name: demo1-backup-schedule-s3
+ namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
+spec:
+ #maxBackups: 5
+ #pause: true
+ maxReservedTime: "3h"
+ schedule: "*/2 * * * *"
+ backupTemplate:
+ #backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
+ br:
+ cluster: myCluster
+ # clusterNamespce: backupNamespace
+ # enableTLSClient: true
+ # logLevel: info
+ # statusAddr:
+ # concurrency: 4
+ # rateLimit: 0
+ # timeAgo:
+ # checksum: true
+ # sendCredToTikv: true
+ from:
+ host: 172.30.6.56
+ secretName: mysecret
+ # port: 4000
+ # user: root
+ s3:
+ provider: aws
+ region: us-west-2
+ bucket: backup
+ prefix: test1-demo1
+ # secretName: aws-secret
diff --git a/manifests/backup/backup-schedule-s3-br.yaml b/manifests/backup/backup-schedule-s3-br.yaml
index 622a7680ae..9cfda351e2 100644
--- a/manifests/backup/backup-schedule-s3-br.yaml
+++ b/manifests/backup/backup-schedule-s3-br.yaml
@@ -4,6 +4,8 @@ kind: BackupSchedule
metadata:
name: demo1-backup-schedule-s3
namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
spec:
#maxBackups: 5
#pause: true
@@ -11,11 +13,12 @@ spec:
schedule: "*/2 * * * *"
backupTemplate:
#backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
br:
- pd: 10.233.40.168:2379
- # ca:
- # cert:
- # key:
+ cluster: myCluster
+ # clusterNamespce: backupNamespace
+ # enableTLSClient: true
# logLevel: info
# statusAddr:
# concurrency: 4
@@ -23,6 +26,11 @@ spec:
# timeAgo:
# checksum: true
# sendCredToTikv: true
+ from:
+ host: 172.30.6.56
+ secretName: mysecret
+ # port: 4000
+ # user: root
s3:
provider: ceph
endpoint: http://10.233.57.220
diff --git a/manifests/backup/restore-aws-s3-br.yaml b/manifests/backup/restore-aws-s3-br.yaml
new file mode 100644
index 0000000000..de5edeedb7
--- /dev/null
+++ b/manifests/backup/restore-aws-s3-br.yaml
@@ -0,0 +1,36 @@
+---
+apiVersion: pingcap.com/v1alpha1
+kind: Restore
+metadata:
+ name: demo1-restore-s3-br
+ namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
+spec:
+ # backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
+ br:
+ cluster: myCluster
+ # clusterNamespce:
+ # enableTLSClient: true
+ # db:
+ # table:
+ # logLevel: info
+ # statusAddr:
+ # concurrency: 4
+ # rateLimit: 0
+ # timeAgo:
+ # checksum: true
+ # sendCredToTikv: true
+ to:
+ host: 172.30.6.56
+ secretName: mySecret
+ # port: 4000
+ # user: root
+ s3:
+ provider: aws
+ region: us-west-2
+ bucket: backup
+ prefix: test1-demo1
+ # secretName: aws-secret
diff --git a/manifests/backup/restore-s3-br.yaml b/manifests/backup/restore-s3-br.yaml
index b0d03f1718..9ce686f63e 100644
--- a/manifests/backup/restore-s3-br.yaml
+++ b/manifests/backup/restore-s3-br.yaml
@@ -4,15 +4,18 @@ kind: Restore
metadata:
name: demo1-restore-s3-br
namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
spec:
# backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
br:
- pd: 10.233.40.168:2379
+ cluster: myCluster
+ # clusterNamespce:
+ # enableTLSClient: true
# db:
# table:
- # ca:
- # cert:
- # key:
# logLevel: info
# statusAddr:
# concurrency: 4
@@ -20,6 +23,11 @@ spec:
# timeAgo:
# checksum: true
# sendCredToTikv: true
+ to:
+ host: 172.30.6.56
+ secretName: mySecret
+ # port: 4000
+ # user: root
s3:
provider: ceph
endpoint: http://10.233.57.220
diff --git a/manifests/crd.yaml b/manifests/crd.yaml
index f39fb1d36d..d69a3a1d49 100644
--- a/manifests/crd.yaml
+++ b/manifests/crd.yaml
@@ -5996,6 +5996,9 @@ spec:
description: 'SchedulerName of the component. Override the cluster-level
one if present Optional: Defaults to cluster-level setting'
type: string
+ serviceAccount:
+ description: Specify a Service Account for tikv
+ type: string
storageClassName:
description: The storageClassName of the persistent volume for TiKV
data storage. Defaults to Kubernetes default storage class.
@@ -6896,8 +6899,10 @@ spec:
type: string
required:
- provider
- - secretName
type: object
+ serviceAccount:
+ description: Specify service account of backup
+ type: string
storageClassName:
description: The storageClassName of the persistent volume for Backup
data storage. Defaults to Kubernetes default storage class.
@@ -6952,6 +6957,9 @@ spec:
type: string
type: object
type: array
+ useKMS:
+ description: Use KMS to decrypt the secrets
+ type: boolean
type: object
type: object
version: v1alpha1
@@ -7712,8 +7720,10 @@ spec:
type: string
required:
- provider
- - secretName
type: object
+ serviceAccount:
+ description: Specify service account of restore
+ type: string
storageClassName:
description: The storageClassName of the persistent volume for Restore
data storage. Defaults to Kubernetes default storage class.
@@ -7791,6 +7801,9 @@ spec:
type: string
type: object
type: array
+ useKMS:
+ description: Use KMS to decrypt the secrets
+ type: boolean
type: object
type: object
version: v1alpha1
@@ -8617,8 +8630,10 @@ spec:
type: string
required:
- provider
- - secretName
type: object
+ serviceAccount:
+ description: Specify service account of backup
+ type: string
storageClassName:
description: The storageClassName of the persistent volume for Backup
data storage. Defaults to Kubernetes default storage class.
@@ -8674,6 +8689,9 @@ spec:
type: string
type: object
type: array
+ useKMS:
+ description: Use KMS to decrypt the secrets
+ type: boolean
type: object
maxBackups:
description: MaxBackups is to specify how many backups we want to keep
diff --git a/pkg/apis/pingcap/v1alpha1/openapi_generated.go b/pkg/apis/pingcap/v1alpha1/openapi_generated.go
index 3626ed2f7f..077d8ddad4 100644
--- a/pkg/apis/pingcap/v1alpha1/openapi_generated.go
+++ b/pkg/apis/pingcap/v1alpha1/openapi_generated.go
@@ -772,6 +772,20 @@ func schema_pkg_apis_pingcap_v1alpha1_BackupSpec(ref common.ReferenceCallback) c
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
+ "useKMS": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Use KMS to decrypt the secrets",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "serviceAccount": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specify service account of backup",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
},
},
},
@@ -2843,6 +2857,20 @@ func schema_pkg_apis_pingcap_v1alpha1_RestoreSpec(ref common.ReferenceCallback)
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
+ "useKMS": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Use KMS to decrypt the secrets",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "serviceAccount": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specify service account of restore",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
},
},
},
@@ -2929,7 +2957,7 @@ func schema_pkg_apis_pingcap_v1alpha1_S3StorageProvider(ref common.ReferenceCall
},
},
},
- Required: []string{"provider", "secretName"},
+ Required: []string{"provider"},
},
},
}
@@ -5421,6 +5449,13 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVSpec(ref common.ReferenceCallback) com
},
},
},
+ "serviceAccount": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specify a Service Account for tikv",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "The desired ready replicas",
diff --git a/pkg/apis/pingcap/v1alpha1/types.go b/pkg/apis/pingcap/v1alpha1/types.go
index 25eb8551df..e0de883d86 100644
--- a/pkg/apis/pingcap/v1alpha1/types.go
+++ b/pkg/apis/pingcap/v1alpha1/types.go
@@ -238,6 +238,9 @@ type TiKVSpec struct {
ComponentSpec `json:",inline"`
corev1.ResourceRequirements `json:",inline"`
+ // Specify a Service Account for tikv
+ ServiceAccount string `json:"serviceAccount,omitempty"`
+
// The desired ready replicas
// +kubebuilder:validation:Minimum=1
Replicas int32 `json:"replicas"`
@@ -719,7 +722,7 @@ type S3StorageProvider struct {
Acl string `json:"acl,omitempty"`
// SecretName is the name of secret which stores
// S3 compliant storage access key and secret key.
- SecretName string `json:"secretName"`
+ SecretName string `json:"secretName,omitempty"`
// Prefix for the keys.
Prefix string `json:"prefix,omitempty"`
// SSE Sever-Side Encryption.
@@ -804,6 +807,10 @@ type BackupSpec struct {
// Affinity of backup Pods
// +optional
Affinity *corev1.Affinity `json:"affinity,omitempty"`
+ // Use KMS to decrypt the secrets
+ UseKMS bool `json:"useKMS,omitempty"`
+ // Specify service account of backup
+ ServiceAccount string `json:"serviceAccount,omitempty"`
}
// +k8s:openapi-gen=true
@@ -1022,6 +1029,10 @@ type RestoreSpec struct {
// Affinity of restore Pods
// +optional
Affinity *corev1.Affinity `json:"affinity,omitempty"`
+ // Use KMS to decrypt the secrets
+ UseKMS bool `json:"useKMS,omitempty"`
+ // Specify service account of restore
+ ServiceAccount string `json:"serviceAccount,omitempty"`
}
// RestoreStatus represents the current status of a tidb cluster restore.
diff --git a/pkg/backup/backup/backup_cleaner.go b/pkg/backup/backup/backup_cleaner.go
index 1c9baca499..dafa6f3e38 100644
--- a/pkg/backup/backup/backup_cleaner.go
+++ b/pkg/backup/backup/backup_cleaner.go
@@ -123,14 +123,18 @@ func (bc *backupCleaner) makeCleanJob(backup *v1alpha1.Backup) (*batchv1.Job, st
fmt.Sprintf("--backupName=%s", name),
}
+ serviceAccount := constants.DefaultServiceAccountName
+ if backup.Spec.ServiceAccount != "" {
+ serviceAccount = backup.Spec.ServiceAccount
+ }
backupLabel := label.NewBackup().Instance(backup.GetInstanceName()).CleanJob().Backup(name)
-
podSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: backupLabel.Labels(),
+ Labels: backupLabel.Labels(),
+ Annotations: backup.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: constants.DefaultServiceAccountName,
+ ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: label.BackupJobLabelVal,
diff --git a/pkg/backup/backup/backup_manager.go b/pkg/backup/backup/backup_manager.go
index 5b9a9b36ed..66f33a73c5 100644
--- a/pkg/backup/backup/backup_manager.go
+++ b/pkg/backup/backup/backup_manager.go
@@ -163,7 +163,7 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s
ns := backup.GetNamespace()
name := backup.GetName()
- envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, bm.secretLister)
+ envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.secretLister)
if err != nil {
return nil, reason, err
}
@@ -172,7 +172,6 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s
if err != nil {
return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err)
}
-
envVars = append(envVars, storageEnv...)
// TODO: make pvc request storage size configurable
reason, err = bm.ensureBackupPVCExist(backup)
@@ -193,14 +192,19 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s
fmt.Sprintf("--storageType=%s", backuputil.GetStorageType(backup.Spec.StorageProvider)),
}
+ serviceAccount := constants.DefaultServiceAccountName
+ if backup.Spec.ServiceAccount != "" {
+ serviceAccount = backup.Spec.ServiceAccount
+ }
backupLabel := label.NewBackup().Instance(backup.GetInstanceName()).BackupJob().Backup(name)
// TODO: need add ResourceRequirement for backup job
podSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: backupLabel.Labels(),
+ Labels: backupLabel.Labels(),
+ Annotations: backup.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: constants.DefaultServiceAccountName,
+ ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: label.BackupJobLabelVal,
@@ -251,7 +255,7 @@ func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, s
ns := backup.GetNamespace()
name := backup.GetName()
- envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, bm.secretLister)
+ envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.secretLister)
if err != nil {
return nil, reason, err
}
@@ -285,12 +289,17 @@ func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, s
})
}
+ serviceAccount := constants.DefaultServiceAccountName
+ if backup.Spec.ServiceAccount != "" {
+ serviceAccount = backup.Spec.ServiceAccount
+ }
podSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: backupLabel.Labels(),
+ Labels: backupLabel.Labels(),
+ Annotations: backup.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: constants.DefaultServiceAccountName,
+ ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: label.BackupJobLabelVal,
diff --git a/pkg/backup/backupschedule/backup_schedule_manager.go b/pkg/backup/backupschedule/backup_schedule_manager.go
index b27ae240c6..d4eb31a3b5 100644
--- a/pkg/backup/backupschedule/backup_schedule_manager.go
+++ b/pkg/backup/backupschedule/backup_schedule_manager.go
@@ -233,9 +233,10 @@ func (bm *backupScheduleManager) createBackup(bs *v1alpha1.BackupSchedule, times
backup := &v1alpha1.Backup{
Spec: backupSpec,
ObjectMeta: metav1.ObjectMeta{
- Namespace: ns,
- Name: bs.GetBackupCRDName(timestamp),
- Labels: bsLabel.Labels(),
+ Namespace: ns,
+ Name: bs.GetBackupCRDName(timestamp),
+ Labels: bsLabel.Labels(),
+ Annotations: bs.Annotations,
OwnerReferences: []metav1.OwnerReference{
controller.GetBackupScheduleOwnerRef(bs),
},
diff --git a/pkg/backup/constants/constants.go b/pkg/backup/constants/constants.go
index ba7d8d706d..ce4133c616 100644
--- a/pkg/backup/constants/constants.go
+++ b/pkg/backup/constants/constants.go
@@ -55,4 +55,7 @@ const (
// ServiceAccountCAPath is where is CABundle of serviceaccount locates
ServiceAccountCAPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+
+ // KMS secret env prefix
+ KMSSecretPrefix = "KMS_ENCRYPTED"
)
diff --git a/pkg/backup/restore/restore_manager.go b/pkg/backup/restore/restore_manager.go
index 03303b16da..e29c938c9a 100644
--- a/pkg/backup/restore/restore_manager.go
+++ b/pkg/backup/restore/restore_manager.go
@@ -154,7 +154,7 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job
ns := restore.GetNamespace()
name := restore.GetName()
- envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, rm.secretLister)
+ envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.secretLister)
if err != nil {
return nil, reason, err
}
@@ -178,14 +178,19 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job
}
restoreLabel := label.NewBackup().Instance(restore.GetInstanceName()).RestoreJob().Restore(name)
+ serviceAccount := constants.DefaultServiceAccountName
+ if restore.Spec.ServiceAccount != "" {
+ serviceAccount = restore.Spec.ServiceAccount
+ }
// TODO: need add ResourceRequirement for restore job
podSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: restoreLabel.Labels(),
+ Labels: restoreLabel.Labels(),
+ Annotations: restore.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: constants.DefaultServiceAccountName,
+ ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: label.RestoreJobLabelVal,
@@ -235,7 +240,7 @@ func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Jo
ns := restore.GetNamespace()
name := restore.GetName()
- envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, rm.secretLister)
+ envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.secretLister)
if err != nil {
return nil, reason, err
}
@@ -267,13 +272,19 @@ func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Jo
},
})
}
+
+ serviceAccount := constants.DefaultServiceAccountName
+ if restore.Spec.ServiceAccount != "" {
+ serviceAccount = restore.Spec.ServiceAccount
+ }
// TODO: need add ResourceRequirement for restore job
podSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: restoreLabel.Labels(),
+ Labels: restoreLabel.Labels(),
+ Annotations: restore.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: constants.DefaultServiceAccountName,
+ ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: label.RestoreJobLabelVal,
diff --git a/pkg/backup/util/util.go b/pkg/backup/util/util.go
index 86317f53e9..4cf3a8faf3 100644
--- a/pkg/backup/util/util.go
+++ b/pkg/backup/util/util.go
@@ -86,24 +86,28 @@ func GenerateS3CertEnvVar(s3 *v1alpha1.S3StorageProvider) ([]corev1.EnvVar, stri
Name: "AWS_STORAGE_CLASS",
Value: s3.StorageClass,
},
- {
- Name: "AWS_ACCESS_KEY_ID",
- ValueFrom: &corev1.EnvVarSource{
- SecretKeyRef: &corev1.SecretKeySelector{
- LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretName},
- Key: constants.S3AccessKey,
+ }
+ if s3.SecretName != "" {
+ envVars = append(envVars, []corev1.EnvVar{
+ {
+ Name: "AWS_ACCESS_KEY_ID",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretName},
+ Key: constants.S3AccessKey,
+ },
},
},
- },
- {
- Name: "AWS_SECRET_ACCESS_KEY",
- ValueFrom: &corev1.EnvVarSource{
- SecretKeyRef: &corev1.SecretKeySelector{
- LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretName},
- Key: constants.S3SecretKey,
+ {
+ Name: "AWS_SECRET_ACCESS_KEY",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretName},
+ Key: constants.S3SecretKey,
+ },
},
},
- },
+ }...)
}
return envVars, "", nil
}
@@ -151,6 +155,7 @@ func GenerateGcsCertEnvVar(gcs *v1alpha1.GcsStorageProvider) ([]corev1.EnvVar, s
func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) {
var certEnv []corev1.EnvVar
var reason string
+ var err error
storageType := GetStorageType(provider)
switch storageType {
@@ -158,17 +163,20 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret
if provider.S3 == nil {
return certEnv, "S3ConfigIsEmpty", errors.New("s3 config is empty")
}
+
s3SecretName := provider.S3.SecretName
- secret, err := secretLister.Secrets(ns).Get(s3SecretName)
- if err != nil {
- err := fmt.Errorf("get s3 secret %s/%s failed, err: %v", ns, s3SecretName, err)
- return certEnv, "GetS3SecretFailed", err
- }
+ if s3SecretName != "" {
+ secret, err := secretLister.Secrets(ns).Get(s3SecretName)
+ if err != nil {
+ err := fmt.Errorf("get s3 secret %s/%s failed, err: %v", ns, s3SecretName, err)
+ return certEnv, "GetS3SecretFailed", err
+ }
- keyStr, exist := CheckAllKeysExistInSecret(secret, constants.S3AccessKey, constants.S3SecretKey)
- if !exist {
- err := fmt.Errorf("s3 secret %s/%s missing some keys %s", ns, s3SecretName, keyStr)
- return certEnv, "s3KeyNotExist", err
+ keyStr, exist := CheckAllKeysExistInSecret(secret, constants.S3AccessKey, constants.S3SecretKey)
+ if !exist {
+ err := fmt.Errorf("s3 secret %s/%s missing some keys %s", ns, s3SecretName, keyStr)
+ return certEnv, "s3KeyNotExist", err
+ }
}
certEnv, reason, err = GenerateS3CertEnvVar(provider.S3.DeepCopy())
@@ -205,8 +213,9 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret
}
// GenerateTidbPasswordEnv generate the password EnvVar
-func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) {
+func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, useKMS bool, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) {
var certEnv []corev1.EnvVar
+ var passwordKey string
secret, err := secretLister.Secrets(ns).Get(tidbSecretName)
if err != nil {
err = fmt.Errorf("backup %s/%s get tidb secret %s failed, err: %v", ns, name, tidbSecretName, err)
@@ -219,9 +228,15 @@ func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, secretLister corel
return certEnv, "KeyNotExist", err
}
+ if useKMS {
+ passwordKey = fmt.Sprintf("%s_%s_%s", constants.KMSSecretPrefix, constants.BackupManagerEnvVarPrefix, strings.ToUpper(constants.TidbPasswordKey))
+ } else {
+ passwordKey = fmt.Sprintf("%s_%s", constants.BackupManagerEnvVarPrefix, strings.ToUpper(constants.TidbPasswordKey))
+ }
+
certEnv = []corev1.EnvVar{
{
- Name: fmt.Sprintf("%s_%s", constants.BackupManagerEnvVarPrefix, strings.ToUpper(constants.TidbPasswordKey)),
+ Name: passwordKey,
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{Name: tidbSecretName},
diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go
index 44eea0305a..80953161ba 100644
--- a/pkg/manager/member/tikv_member_manager.go
+++ b/pkg/manager/member/tikv_member_manager.go
@@ -445,6 +445,7 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap)
podSpec.SecurityContext = podSecurityContext
podSpec.InitContainers = initContainers
podSpec.Containers = []corev1.Container{tikvContainer}
+ podSpec.ServiceAccountName = tc.Spec.TiKV.ServiceAccount
tikvset := &apps.StatefulSet{
ObjectMeta: metav1.ObjectMeta{