From 9a73329aa4b14459bb5766fccc2ac22170d94280 Mon Sep 17 00:00:00 2001 From: xiaojingchen Date: Wed, 3 Jul 2019 17:39:14 +0800 Subject: [PATCH] add storeLabels setting (#527) --- .../templates/config/_pd-config.tpl | 4 +++ .../tidb-cluster/templates/tidb-cluster.yaml | 10 ++++++ charts/tidb-cluster/values.yaml | 10 ++++++ pkg/apis/pingcap.com/v1alpha1/types.go | 1 + .../v1alpha1/zz_generated.deepcopy.go | 5 +++ pkg/manager/member/tikv_member_manager.go | 34 +++++++++---------- .../member/tikv_member_manager_test.go | 1 + tests/actions.go | 31 +++++++++++++++++ tests/cmd/e2e/main.go | 13 ++++--- tests/cmd/stability/stability.go | 3 +- tests/util.go | 18 +++++++--- 11 files changed, 103 insertions(+), 27 deletions(-) diff --git a/charts/tidb-cluster/templates/config/_pd-config.tpl b/charts/tidb-cluster/templates/config/_pd-config.tpl index ea5d044f5a..9a644e9b08 100644 --- a/charts/tidb-cluster/templates/config/_pd-config.tpl +++ b/charts/tidb-cluster/templates/config/_pd-config.tpl @@ -82,7 +82,11 @@ max-replicas = {{ .Values.pd.maxReplicas }} # The placement priorities is implied by the order of label keys. # For example, ["zone", "rack"] means that we should place replicas to # different zones first, then to different racks if we don't have enough zones. +{{- if .Values.tikv.storeLabels }} +location-labels = {{ toJson .Values.tikv.storeLabels }} +{{- else }} location-labels = ["region", "zone", "rack", "host"] +{{- end }} [label-property] # Do not assign region leaders to stores that have these tags. diff --git a/charts/tidb-cluster/templates/tidb-cluster.yaml b/charts/tidb-cluster/templates/tidb-cluster.yaml index a2d94ea355..7e6484a368 100644 --- a/charts/tidb-cluster/templates/tidb-cluster.yaml +++ b/charts/tidb-cluster/templates/tidb-cluster.yaml @@ -67,6 +67,16 @@ spec: annotations: {{ toYaml .Values.tikv.annotations | indent 6 }} {{- end }} + {{- if .Values.tikv.storeLabels }} + storeLabels: +{{ toYaml .Values.tikv.storeLabels | indent 4 }} + {{- else }} + storeLabels: + - region + - zone + - rack + - host + {{- end }} tidb: replicas: {{ .Values.tidb.replicas }} image: {{ .Values.tidb.image }} diff --git a/charts/tidb-cluster/values.yaml b/charts/tidb-cluster/values.yaml index 2c8144ad0b..4962618226 100644 --- a/charts/tidb-cluster/values.yaml +++ b/charts/tidb-cluster/values.yaml @@ -192,6 +192,16 @@ tikv: # effect: "NoSchedule" annotations: {} + ## storeLabels is used to define store label keys + ## The label keys specified the location of a store. + ## In order to use the location awareness feature of TiKV, users have to label their k8s nodes with the same labels. + ## Note: current can not support these labels contains "/" + ## The placement priorities is implied by the order of label keys. + ## For example, ["zone", "rack"] means that we should place replicas to + ## different zones first, then to different racks if we don't have enough zones. + ## default value is ["region", "zone", "rack", "host"] + ## storeLabels: ["region", "zone", "rack", "host"] + # block-cache used to cache uncompressed blocks, big block-cache can speed up read. # in normal cases should tune to 30%-50% tikv.resources.limits.memory # defaultcfBlockCacheSize: "1GB" diff --git a/pkg/apis/pingcap.com/v1alpha1/types.go b/pkg/apis/pingcap.com/v1alpha1/types.go index a539b4c123..402ceb449b 100644 --- a/pkg/apis/pingcap.com/v1alpha1/types.go +++ b/pkg/apis/pingcap.com/v1alpha1/types.go @@ -144,6 +144,7 @@ type TiKVSpec struct { StorageClassName string `json:"storageClassName,omitempty"` Tolerations []corev1.Toleration `json:"tolerations,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` + StoreLabels []string `json:"storeLabels,omitempty"` } // TiKVPromGatewaySpec runs as a sidecar with TiKVSpec diff --git a/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go index 1bf3b6d605..df77c36db6 100644 --- a/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go @@ -388,6 +388,11 @@ func (in *TiKVSpec) DeepCopyInto(out *TiKVSpec) { (*out)[key] = val } } + if in.StoreLabels != nil { + in, out := &in.StoreLabels, &out.StoreLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index ebc5cf7712..ac01f5f8cc 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -537,8 +537,8 @@ func (tkmm *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) ( } nodeName := pod.Spec.NodeName - ls, err := tkmm.getNodeLabels(nodeName) - if err != nil { + ls, err := tkmm.getNodeLabels(nodeName, tc.Spec.TiKV.StoreLabels) + if err != nil || len(ls) == 0 { glog.Warningf("node: [%s] has no node labels, skipping set store labels for Pod: [%s/%s]", nodeName, ns, podName) continue } @@ -559,28 +559,28 @@ func (tkmm *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) ( return setCount, nil } -func (tkmm *tikvMemberManager) getNodeLabels(nodeName string) (map[string]string, error) { +func (tkmm *tikvMemberManager) getNodeLabels(nodeName string, storeLabels []string) (map[string]string, error) { node, err := tkmm.nodeLister.Get(nodeName) if err != nil { return nil, err } - if ls := node.GetLabels(); ls != nil { - labels := map[string]string{} - if region, found := ls["region"]; found { - labels["region"] = region - } - if zone, found := ls["zone"]; found { - labels["zone"] = zone - } - if rack, found := ls["rack"]; found { - labels["rack"] = rack + labels := map[string]string{} + ls := node.GetLabels() + for _, storeLabel := range storeLabels { + if value, found := ls[storeLabel]; found { + labels[storeLabel] = value + continue } - if host, found := ls[apis.LabelHostname]; found { - labels["host"] = host + + // TODO after pd supports storeLabel containing slash character, these codes should be deleted + if storeLabel == "host" { + if host, found := ls[apis.LabelHostname]; found { + labels[storeLabel] = host + } } - return labels, nil + } - return nil, fmt.Errorf("labels not found") + return labels, nil } // storeLabelsEqualNodeLabels compares store labels with node labels diff --git a/pkg/manager/member/tikv_member_manager_test.go b/pkg/manager/member/tikv_member_manager_test.go index 27ace7f605..200876e399 100644 --- a/pkg/manager/member/tikv_member_manager_test.go +++ b/pkg/manager/member/tikv_member_manager_test.go @@ -488,6 +488,7 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { } testFn := func(test *testcase, t *testing.T) { tc := newTidbClusterForPD() + tc.Spec.TiKV.StoreLabels = []string{"region", "zone", "rack"} pmm, _, _, pdClient, podIndexer, nodeIndexer := newFakeTiKVMemberManager(tc) if test.errWhenGetStores { diff --git a/tests/actions.go b/tests/actions.go index b57ae3da15..d10a689d33 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -256,6 +256,7 @@ type TidbClusterConfig struct { BlockWriteConfig blockwriter.Config GrafanaClient *metrics.Client SubValues string + TopologyKey string } func (tc *TidbClusterConfig) String() string { @@ -708,6 +709,13 @@ func (oa *operatorActions) CheckTidbClusterStatus(info *TidbClusterConfig) error return false, nil } + glog.V(4).Infof("check store labels") + if b, err := oa.storeLabelsIsSet(tc, info.TopologyKey); !b && err == nil { + return false, nil + } else if err != nil { + return false, err + } + glog.V(4).Infof("check tidb cluster begin passwordIsSet") if b, err := oa.passwordIsSet(info); !b && err == nil { return false, nil @@ -1367,6 +1375,29 @@ func (oa *operatorActions) schedulerHAFn(tc *v1alpha1.TidbCluster) (bool, error) return true, nil } +func (oa *operatorActions) storeLabelsIsSet(tc *v1alpha1.TidbCluster, topologyKey string) (bool, error) { + pdCli := oa.pdControl.GetPDClient(tc) + for _, store := range tc.Status.TiKV.Stores { + storeID, err := strconv.ParseUint(store.ID, 10, 64) + if err != nil { + return false, err + } + storeInfo, err := pdCli.GetStore(storeID) + if err != nil { + return false, nil + } + if len(storeInfo.Store.Labels) == 0 { + return false, nil + } + for _, label := range storeInfo.Store.Labels { + if label.Key != topologyKey { + return false, nil + } + } + } + return true, nil +} + func (oa *operatorActions) passwordIsSet(clusterInfo *TidbClusterConfig) (bool, error) { ns := clusterInfo.Namespace tcName := clusterInfo.ClusterName diff --git a/tests/cmd/e2e/main.go b/tests/cmd/e2e/main.go index cdd0e69041..07e8f97fa9 100644 --- a/tests/cmd/e2e/main.go +++ b/tests/cmd/e2e/main.go @@ -73,6 +73,7 @@ func main() { name1 := "e2e-cluster1" name2 := "e2e-cluster2" name3 := "e2e-pd-replicas-1" + topologyKey := "rack" clusterInfos := []*tests.TidbClusterConfig{ { Namespace: name1, @@ -110,7 +111,8 @@ func main() { BatchSize: 1, RawSize: 1, }, - SubValues: tests.GetAffinityConfigOrDie(name1, name1), + TopologyKey: topologyKey, + SubValues: fmt.Sprintf("%s", tests.GetAffinityConfigOrDie(name1, name1, topologyKey, []string{topologyKey})), EnableConfigMapRollout: true, PDMaxReplicas: 3, TiKVGrpcConcurrency: 4, @@ -153,7 +155,8 @@ func main() { BatchSize: 1, RawSize: 1, }, - SubValues: tests.GetAffinityConfigOrDie(name2, name2), + TopologyKey: topologyKey, + SubValues: fmt.Sprintf("%s", tests.GetAffinityConfigOrDie(name2, name2, topologyKey, []string{topologyKey})), EnableConfigMapRollout: false, PDMaxReplicas: 3, TiKVGrpcConcurrency: 4, @@ -176,7 +179,9 @@ func main() { "pd.replicas": "1", "discovery.image": conf.OperatorImage, }, - SubValues: tests.GetAffinityConfigOrDie(name3, name2), + + TopologyKey: topologyKey, + SubValues: fmt.Sprintf("%s", tests.GetAffinityConfigOrDie(name3, name2, topologyKey, []string{topologyKey})), }, } @@ -335,7 +340,7 @@ func main() { restoreClusterInfo.ClusterName = restoreClusterInfo.ClusterName + "-other" restoreClusterInfo.InitSecretName = fmt.Sprintf("%s-set-secret", restoreClusterInfo.ClusterName) restoreClusterInfo.BackupSecretName = fmt.Sprintf("%s-backup-secret", restoreClusterInfo.ClusterName) - restoreClusterInfo.SubValues = tests.GetAffinityConfigOrDie(restoreClusterInfo.ClusterName, restoreClusterInfo.Namespace) + restoreClusterInfo.SubValues = fmt.Sprintf("%s", tests.GetAffinityConfigOrDie(restoreClusterInfo.ClusterName, restoreClusterInfo.Namespace, topologyKey, []string{topologyKey})) if err = oa.CleanTidbCluster(restoreClusterInfo); err != nil { glog.Fatal(err) diff --git a/tests/cmd/stability/stability.go b/tests/cmd/stability/stability.go index 27cf1f5893..37ae08a79e 100644 --- a/tests/cmd/stability/stability.go +++ b/tests/cmd/stability/stability.go @@ -28,6 +28,7 @@ func newOperatorConfig() *tests.OperatorConfig { func newTidbClusterConfig(ns, clusterName string) *tests.TidbClusterConfig { tidbVersion := cfg.GetTiDBVersionOrDie() + topologyKey := "rack" return &tests.TidbClusterConfig{ Namespace: ns, ClusterName: clusterName, @@ -69,6 +70,6 @@ func newTidbClusterConfig(ns, clusterName string) *tests.TidbClusterConfig { TiKVGrpcConcurrency: 4, TiDBTokenLimit: 1000, PDLogLevel: "info", - SubValues: tests.GetAffinityConfigOrDie(clusterName, ns), + SubValues: tests.GetAffinityConfigOrDie(clusterName, ns, topologyKey, []string{topologyKey}), } } diff --git a/tests/util.go b/tests/util.go index 1d61483199..adc05bd684 100644 --- a/tests/util.go +++ b/tests/util.go @@ -89,6 +89,12 @@ func GetPodsByLabels(kubeCli kubernetes.Interface, node string, lables map[strin } var affinityTemp string = `{{.Kind}}: +{{ $length := len .StoreLabels}} {{ if or (not .StoreLabels) (eq $length 0)}} +{{else if eq .Kind "tikv"}} + storeLabels: +{{range .StoreLabels}} - {{.}} +{{end}} +{{end}} affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: @@ -98,7 +104,7 @@ var affinityTemp string = `{{.Kind}}: matchLabels: app.kubernetes.io/instance: {{.ClusterName}} app.kubernetes.io/component: {{.Kind}} - topologyKey: "rack" + topologyKey: {{.TopologyKey}} namespaces: - {{.Namespace}} ` @@ -108,26 +114,28 @@ type AffinityInfo struct { Kind string Weight int Namespace string + TopologyKey string + StoreLabels []string } -func GetAffinityConfigOrDie(clusterName, namespace string) string { +func GetAffinityConfigOrDie(clusterName, namespace, topologyKey string, storeLabels []string) string { temp, err := template.New("dt-affinity").Parse(affinityTemp) if err != nil { slack.NotifyAndPanic(err) } pdbuff := new(bytes.Buffer) - err = temp.Execute(pdbuff, &AffinityInfo{ClusterName: clusterName, Kind: "pd", Weight: 50, Namespace: namespace}) + err = temp.Execute(pdbuff, &AffinityInfo{ClusterName: clusterName, Kind: "pd", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, StoreLabels: storeLabels}) if err != nil { slack.NotifyAndPanic(err) } tikvbuff := new(bytes.Buffer) - err = temp.Execute(tikvbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tikv", Weight: 50, Namespace: namespace}) + err = temp.Execute(tikvbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tikv", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, StoreLabels: storeLabels}) if err != nil { slack.NotifyAndPanic(err) } tidbbuff := new(bytes.Buffer) - err = temp.Execute(tidbbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tidb", Weight: 50, Namespace: namespace}) + err = temp.Execute(tidbbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tidb", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, StoreLabels: storeLabels}) if err != nil { slack.NotifyAndPanic(err) }