Skip to content

Commit

Permalink
Merge branch 'master' into Louis/advertise-address
Browse files Browse the repository at this point in the history
  • Loading branch information
LinuxGit authored Mar 4, 2020
2 parents a662543 + 27c061f commit a4798ce
Show file tree
Hide file tree
Showing 11 changed files with 349 additions and 23 deletions.
1 change: 1 addition & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ jobs:
- "check-setup check"
- "docker e2e-docker cli"
- "test GOFLAGS=-race"
- "e2e-examples"
steps:
- name: Set up Go 1.13
uses: actions/setup-go@v1
Expand Down
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,9 @@ e2e-build:
e2e:
./hack/e2e.sh

e2e-examples:
./hack/e2e-examples.sh

stability-test-build:
$(GO_BUILD) -ldflags '$(LDFLAGS)' -o tests/images/stability-test/bin/blockwriter ./tests/cmd/blockwriter
$(GO_BUILD) -ldflags '$(LDFLAGS)' -o tests/images/stability-test/bin/stability-test ./tests/cmd/stability
Expand Down
29 changes: 20 additions & 9 deletions charts/tidb-backup/templates/scripts/_start_backup.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -89,13 +89,24 @@ uploader \
{{- end }}

{{- if .Values.s3 }}
uploader \
--cloud=aws \
--region={{ .Values.s3.region }} \
{{- if .Values.s3.prefix }}
--bucket={{ .Values.s3.bucket }}/{{ .Values.s3.prefix }} \
{{- else }}
--bucket={{ .Values.s3.bucket }} \
{{- end }}
--backup-dir=${dirname}
# Once we know there are no more credentials that will be logged we can run with -x
set -x
bucket={{ .Values.s3.bucket }}

cat <<EOF > /tmp/rclone.conf
[s3]
type = s3
provider = AWS
env_auth = true
region = {{ .Values.s3.region }}
EOF

cd "${backup_base_dir}"
{{- if .Values.s3.prefix }}
tar -cf - "${backup_name}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat s3:${bucket}/{{ .Values.s3.prefix }}/${backup_name}/${backup_name}.tgz
{{- else }}
tar -cf - "${backup_name}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat s3:${bucket}/${backup_name}/${backup_name}.tgz
{{- end }}
{{- end }}
2 changes: 1 addition & 1 deletion charts/tidb-backup/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ name: fullbackup-{{ date "200601021504" .Release.Time }}
image:
pullPolicy: IfNotPresent
# https://github.com/pingcap/tidb-cloud-backup
backup: pingcap/tidb-cloud-backup:20191217
backup: pingcap/tidb-cloud-backup:20200229

## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ spec:
- name: GOOGLE_APPLICATION_CREDENTIALS
value: /gcp/credentials.json
{{- end }}
{{- if or .Values.scheduledBackup.ceph .Values.scheduledBackup.s3 }}
{{- if or .Values.scheduledBackup.ceph.secretName .Values.scheduledBackup.s3.secretName }}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@ set -euo pipefail
host=$(getent hosts {{ template "cluster.name" . }}-tidb | head | awk '{print $1}')

backupName=scheduled-backup-`date "+%Y%m%d-%H%M%S"`
backupPath=/data/${backupName}
backupBase=/data
backupPath=${backupBase}/${backupName}

echo "making dir ${backupPath}"
mkdir -p ${backupPath}
Expand Down Expand Up @@ -37,10 +38,29 @@ echo "Reset TiKV GC life time to ${gc_life_time}"
/usr/bin/mysql -h${host} -P4000 -u${TIDB_USER} ${password_str} -Nse "select variable_name,variable_value from mysql.tidb where variable_name='tikv_gc_life_time';"

{{- if .Values.scheduledBackup.gcp }}
uploader \
--cloud=gcp \
--bucket={{ .Values.scheduledBackup.gcp.bucket }} \
--backup-dir=${backupPath}
# Once we know there are no more credentials that will be logged we can run with -x
set -x
bucket={{ .Values.scheduledBackup.gcp.bucket }}
creds=${GOOGLE_APPLICATION_CREDENTIALS:-""}
if ! [[ -z $creds ]] ; then
creds="service_account_file = ${creds}"
fi

cat <<EOF > /tmp/rclone.conf
[gcp]
type = google cloud storage
bucket_policy_only = true
$creds
EOF

cd "${backupBase}"
{{- if .Values.scheduledBackup.gcp.prefix }}
tar -cf - "${backupName}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat gcp:${bucket}/{{ .Values.scheduledBackup.gcp.prefix }}/${backupName}/${backupName}.tgz
{{- else }}
tar -cf - "${backupName}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat gcp:${bucket}/${backupName}/${backupName}.tgz
{{- end }}
{{- end }}

{{- if .Values.scheduledBackup.ceph }}
Expand All @@ -52,11 +72,26 @@ uploader \
{{- end }}

{{- if .Values.scheduledBackup.s3 }}
uploader \
--cloud=aws \
--region={{ .Values.scheduledBackup.s3.region }} \
--bucket={{ .Values.scheduledBackup.s3.bucket }} \
--backup-dir=${backupPath}
# Once we know there are no more credentials that will be logged we can run with -x
set -x
bucket={{ .Values.scheduledBackup.s3.bucket }}

cat <<EOF > /tmp/rclone.conf
[s3]
type = s3
provider = AWS
env_auth = true
region = {{ .Values.scheduledBackup.s3.region }}
EOF

cd "${backupBase}"
{{- if .Values.scheduledBackup.s3.prefix }}
tar -cf - "${backupName}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat s3:${bucket}/{{ .Values.scheduledBackup.s3.prefix }}/${backupName}/${backupName}.tgz
{{- else }}
tar -cf - "${backupName}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat s3:${bucket}/${backupName}/${backupName}.tgz
{{- end }}
{{- end }}

{{- if and (.Values.scheduledBackup.cleanupAfterUpload) (or (.Values.scheduledBackup.gcp) (or .Values.scheduledBackup.ceph .Values.scheduledBackup.s3)) }}
Expand Down
4 changes: 3 additions & 1 deletion charts/tidb-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -700,7 +700,7 @@ binlog:
scheduledBackup:
create: false
# https://github.com/pingcap/tidb-cloud-backup
mydumperImage: pingcap/tidb-cloud-backup:20191217
mydumperImage: pingcap/tidb-cloud-backup:20200229
mydumperImagePullPolicy: IfNotPresent
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
Expand Down Expand Up @@ -741,6 +741,7 @@ scheduledBackup:
# backup to gcp
gcp: {}
# bucket: ""
# prefix: ""
# secretName is the name of the secret which stores the gcp service account credentials json file
# The service account must have read/write permission to the above bucket.
# Read the following document to create the service account and download the credentials file as credentials.json:
Expand All @@ -761,6 +762,7 @@ scheduledBackup:
s3: {}
# region: ""
# bucket: ""
# prefix: ""
# secretName is the name of the secret which stores s3 object store access key and secret key
# You can create the secret by:
# kubectl create secret generic s3-backup-secret --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
Expand Down
2 changes: 1 addition & 1 deletion examples/basic/tidb-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ spec:
baseImage: pingcap/tikv
replicas: 3
requests:
storage: "50Gi"
storage: "1Gi"
config: {}
tidb:
baseImage: pingcap/tidb
Expand Down
44 changes: 44 additions & 0 deletions hack/e2e-examples.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#!/usr/bin/env bash

# Copyright 2020 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.

#
# E2E entrypoint script for examples.
#

ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
cd $ROOT

source "${ROOT}/hack/lib.sh"

hack::ensure_kind

echo "info: create a Kubernetes cluster"
$KIND_BIN create cluster

echo "info: start tidb-operator"
hack/local-up-operator.sh

echo "info: testing examples"
export PATH=$PATH:$OUTPUT_BIN
hack::ensure_kubectl
for t in $(find tests/examples/ -name '*.sh'); do
echo "info: testing $t"
$t
if [ $? -eq 0 ]; then
echo "info: test $t passed"
else
echo "error: test $t failed"
fi
done
Loading

0 comments on commit a4798ce

Please sign in to comment.