From 368331dc38eff87517f297b27b175de71427c31f Mon Sep 17 00:00:00 2001 From: github-actions Date: Fri, 25 Aug 2023 09:13:07 +0000 Subject: [PATCH] Preview PR https://github.com/pingcap/docs-tidb-operator/pull/2406 and this preview is triggered from commit https://github.com/pingcap/docs-tidb-operator/pull/2406/commits/7cab2fef071fa66c9b6acc9ca2d722781f1727b0 --- .../backup-by-ebs-snapshot-across-multiple-kubernetes.md | 6 +++--- .../en/tidb-in-kubernetes/master/deploy-br-federation.md | 1 + ...restore-from-ebs-snapshot-across-multiple-kubernetes.md | 7 ++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/markdown-pages/en/tidb-in-kubernetes/master/backup-by-ebs-snapshot-across-multiple-kubernetes.md b/markdown-pages/en/tidb-in-kubernetes/master/backup-by-ebs-snapshot-across-multiple-kubernetes.md index 5e7547823..7cb6d2435 100644 --- a/markdown-pages/en/tidb-in-kubernetes/master/backup-by-ebs-snapshot-across-multiple-kubernetes.md +++ b/markdown-pages/en/tidb-in-kubernetes/master/backup-by-ebs-snapshot-across-multiple-kubernetes.md @@ -20,11 +20,11 @@ If you have any other requirements, refer to [Backup and Restore Overview](backu ## Prerequisites -> storage blocks on volumes that were created from snapshots must be initialized (pulled down from Amazon S3 and written to the volume) before you can access the block. This preliminary action takes time and can cause a significant increase in the latency of an I/O operation the first time each block is accessed. Volume performance is achieved after all blocks have been downloaded and written to the volume. +Storage blocks on volumes that were created from snapshots must be initialized (pulled down from Amazon S3 and written to the volume) before you can access the block. This preliminary action takes time and can cause a significant increase in the latency of an I/O operation the first time each block is accessed. Volume performance is achieved after all blocks have been downloaded and written to the volume. -From AWS documentation, the EBS volume restored from snapshot may have high latency before it's initialized, which can result in big performance hit of restored TiDB cluster. See details in [ebs create volume from snapshot](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html#ebs-create-volume-from-snapshot). +According to AWS documentation, the EBS volume restored from snapshots might have high latency before it is initialized. This can impact the performance of a restored TiDB cluster. See details in [Create a volume from a snapshot](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html#ebs-create-volume-from-snapshot). -To initialize the restored volume more efficiently, you should **separate WAL and raft log to a dedicated small volume from TiKV data**. So that we can improve write performance of restored TiDB cluster by full initializing the volume of WAL and raft log. +To initialize the restored volume more efficiently, it is recommended to **separate WAL and raft log into a dedicated small volume apart from TiKV data**. By fully initializing the volume of WAL and raft log separately, we can enhance write performance for a restored TiDB cluster. ## Limitations diff --git a/markdown-pages/en/tidb-in-kubernetes/master/deploy-br-federation.md b/markdown-pages/en/tidb-in-kubernetes/master/deploy-br-federation.md index 7404d2485..f83b26224 100644 --- a/markdown-pages/en/tidb-in-kubernetes/master/deploy-br-federation.md +++ b/markdown-pages/en/tidb-in-kubernetes/master/deploy-br-federation.md @@ -86,6 +86,7 @@ export CURRENT_CONTEXT=$(kubectl config current-context) export CURRENT_CLUSTER=$(kubectl config view --raw -o=go-template='{{range .contexts}}{{if eq .name "'''${CURRENT_CONTEXT}'''"}}{{ index .context "cluster" }}{{end}}{{end}}') export CLUSTER_CA=$(kubectl config view --raw -o=go-template='{{range .clusters}}{{if eq .name "'''${CURRENT_CLUSTER}'''"}}"{{with index .cluster "certificate-authority-data" }}{{.}}{{end}}"{{ end }}{{ end }}') export CLUSTER_SERVER=$(kubectl config view --raw -o=go-template='{{range .clusters}}{{if eq .name "'''${CURRENT_CLUSTER}'''"}}{{ .cluster.server }}{{end}}{{ end }}') +# you should modify this value in different data plane export DATA_PLANE_SYMBOL="a" cat << EOF > {k8s-name}-kubeconfig diff --git a/markdown-pages/en/tidb-in-kubernetes/master/restore-from-ebs-snapshot-across-multiple-kubernetes.md b/markdown-pages/en/tidb-in-kubernetes/master/restore-from-ebs-snapshot-across-multiple-kubernetes.md index 610ad7e16..022afd6e1 100644 --- a/markdown-pages/en/tidb-in-kubernetes/master/restore-from-ebs-snapshot-across-multiple-kubernetes.md +++ b/markdown-pages/en/tidb-in-kubernetes/master/restore-from-ebs-snapshot-across-multiple-kubernetes.md @@ -30,8 +30,9 @@ Before restoring a TiDB cluster across multiple Kubernetes clusters from EBS vol > **Note:** > -> The EBS volume restored from snapshot may have high latency before it's initialized, which can result in big performance hit of restored TiDB cluster. See details in [ebs create volume from snapshot](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html#ebs-create-volume-from-snapshot). -> So we recommend that you can configure `spec.template.warmup: sync` to initialize TiKV volumes automatically during restoration process. +> The EBS volume restored from snapshots might have high latency before it is initialized. This can impact the performance of a restored TiDB cluster. See details in [Create a volume from a snapshot](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html#ebs-create-volume-from-snapshot). +> +> It is recommended that you configure `spec.template.warmup: sync` to initialize TiKV volumes automatically during the restoration process. ## Restore process @@ -215,7 +216,7 @@ spec: toolImage: ${br-image} serviceAccount: tidb-backup-manager warmup: sync - warmupImage: ${wamrup-image} + warmupImage: ${warmup-image} ```