From e7531906c661da14cbd01aa13f7c7e1a1372c71c Mon Sep 17 00:00:00 2001 From: husharp Date: Fri, 23 Feb 2024 15:32:04 +0800 Subject: [PATCH 1/4] add test Signed-off-by: husharp --- .../member/pd_ms_member_manager_test.go | 94 ++--- pkg/manager/member/pd_ms_scaler.go | 1 - pkg/manager/member/pd_ms_scaler_test.go | 181 +++++++++ pkg/manager/member/pd_ms_upgrader.go | 17 +- pkg/manager/member/pd_ms_upgrader_test.go | 370 ++++++++++++++++++ pkg/manager/member/pd_upgrader_test.go | 4 +- 6 files changed, 612 insertions(+), 55 deletions(-) create mode 100644 pkg/manager/member/pd_ms_scaler_test.go create mode 100644 pkg/manager/member/pd_ms_upgrader_test.go diff --git a/pkg/manager/member/pd_ms_member_manager_test.go b/pkg/manager/member/pd_ms_member_manager_test.go index 2a74934ee20..e2d7ec7e379 100644 --- a/pkg/manager/member/pd_ms_member_manager_test.go +++ b/pkg/manager/member/pd_ms_member_manager_test.go @@ -79,8 +79,8 @@ func TestPDMSMemberManagerSyncCreate(t *testing.T) { test.errExpectFn(g, err) g.Expect(tc.Spec).To(Equal(oldSpec)) - svc1, err := pmm.deps.ServiceLister.Services(ns).Get(controller.PDMSMemberName(tcName, "tso")) - eps1, eperr := pmm.deps.EndpointLister.Endpoints(ns).Get(controller.PDMSMemberName(tcName, "tso")) + svc1, err := pmm.deps.ServiceLister.Services(ns).Get(controller.PDMSMemberName(tcName, tsoService)) + eps1, eperr := pmm.deps.EndpointLister.Endpoints(ns).Get(controller.PDMSMemberName(tcName, tsoService)) if test.pdSvcCreated { g.Expect(err).NotTo(HaveOccurred()) g.Expect(svc1).NotTo(Equal(nil)) @@ -91,7 +91,7 @@ func TestPDMSMemberManagerSyncCreate(t *testing.T) { expectErrIsNotFound(g, eperr) } - tc1, err := pmm.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, "tso")) + tc1, err := pmm.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, tsoService)) if test.setCreated { g.Expect(err).NotTo(HaveOccurred()) g.Expect(tc1).NotTo(Equal(nil)) @@ -152,18 +152,18 @@ func TestPDMSMemberManagerSyncUpdate(t *testing.T) { pdClient := controller.NewFakePDClient(fakePDControl, tc) pdClient.AddReaction(pdapi.GetPDMSMembersActionType, func(action *pdapi.Action) (interface{}, error) { - return []string{"tso"}, nil + return []string{tsoService}, nil }) err := pmm.Sync(tc) g.Expect(controller.IsRequeueError(err)).To(BeTrue()) - _, err = pmm.deps.ServiceLister.Services(ns).Get(controller.PDMSMemberName(tcName, "tso")) + _, err = pmm.deps.ServiceLister.Services(ns).Get(controller.PDMSMemberName(tcName, tsoService)) g.Expect(err).NotTo(HaveOccurred()) - _, err = pmm.deps.EndpointLister.Endpoints(ns).Get(controller.PDMSMemberName(tcName, "tso")) + _, err = pmm.deps.EndpointLister.Endpoints(ns).Get(controller.PDMSMemberName(tcName, tsoService)) g.Expect(err).NotTo(HaveOccurred()) - _, err = pmm.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, "tso")) + _, err = pmm.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, tsoService)) g.Expect(err).NotTo(HaveOccurred()) tc1 := tc.DeepCopy() @@ -187,11 +187,11 @@ func TestPDMSMemberManagerSyncUpdate(t *testing.T) { } if test.expectPDServiceFn != nil { - svc, err := pmm.deps.ServiceLister.Services(ns).Get(controller.PDMSMemberName(tcName, "tso")) + svc, err := pmm.deps.ServiceLister.Services(ns).Get(controller.PDMSMemberName(tcName, tsoService)) test.expectPDServiceFn(g, svc, err) } if test.expectStatefulSetFn != nil { - set, err := pmm.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, "tso")) + set, err := pmm.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, tsoService)) test.expectStatefulSetFn(g, set, err) } if test.expectTidbClusterFn != nil { @@ -208,12 +208,12 @@ func TestPDMSMemberManagerSyncUpdate(t *testing.T) { ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", }, - Name: "tso", + Name: tsoService, Replicas: 5, }, } tc.Spec.Services = []v1alpha1.Service{ - {Name: "tso", Type: string(corev1.ServiceTypeNodePort)}, + {Name: tsoService, Type: string(corev1.ServiceTypeNodePort)}, } }, errWhenUpdateStatefulSet: false, @@ -229,7 +229,7 @@ func TestPDMSMemberManagerSyncUpdate(t *testing.T) { // g.Expect(int(*set.Spec.Replicas)).To(Equal(4)) }, expectTidbClusterFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { - g.Expect(tc.Status.PDMS["tso"].Phase).To(Equal(v1alpha1.ScalePhase)) + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.ScalePhase)) }, }, { @@ -240,7 +240,7 @@ func TestPDMSMemberManagerSyncUpdate(t *testing.T) { ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", }, - Name: "tso", + Name: tsoService, Replicas: 5, }, } @@ -296,15 +296,15 @@ func TestPDMSMemberManagerSyncPDMSSts(t *testing.T) { fakePDControl := pmm.deps.PDControl.(*pdapi.FakePDControl) pdClient := controller.NewFakePDClient(fakePDControl, tc) pdClient.AddReaction(pdapi.GetPDMSMembersActionType, func(action *pdapi.Action) (interface{}, error) { - return []string{"tso"}, nil + return []string{tsoService}, nil }) err := pmm.Sync(tc) g.Expect(controller.IsRequeueError(err)).To(BeTrue()) - _, err = pmm.deps.ServiceLister.Services(ns).Get(controller.PDMSMemberName(tcName, "tso")) + _, err = pmm.deps.ServiceLister.Services(ns).Get(controller.PDMSMemberName(tcName, tsoService)) g.Expect(err).NotTo(HaveOccurred()) - _, err = pmm.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, "tso")) + _, err = pmm.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, tsoService)) g.Expect(err).NotTo(HaveOccurred()) test.modify(tc) @@ -316,7 +316,7 @@ func TestPDMSMemberManagerSyncPDMSSts(t *testing.T) { } if test.expectStatefulSetFn != nil { - set, err := pmm.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, "tso")) + set, err := pmm.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, tsoService)) test.expectStatefulSetFn(g, set, err) println("set.Spec.Template.Spec.Containers[0].Image", set.Spec.Template.Spec.Containers[0].Image) } @@ -334,7 +334,7 @@ func TestPDMSMemberManagerSyncPDMSSts(t *testing.T) { ComponentSpec: v1alpha1.ComponentSpec{ Image: "pd-test-image", }, - Name: "tso", + Name: tsoService, Replicas: 1, }, } @@ -346,7 +346,7 @@ func TestPDMSMemberManagerSyncPDMSSts(t *testing.T) { g.Expect(*set.Spec.Replicas).To(Equal(int32(2))) }, expectTidbClusterFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { - g.Expect(tc.Status.PDMS["tso"].Phase).To(Equal(v1alpha1.ScalePhase)) + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.ScalePhase)) }, }, { @@ -356,7 +356,7 @@ func TestPDMSMemberManagerSyncPDMSSts(t *testing.T) { tc.Spec.PDMS = []*v1alpha1.PDMSSpec{ { ComponentSpec: v1alpha1.ComponentSpec{}, - Name: "tso", + Name: tsoService, Replicas: 1, }, } @@ -368,7 +368,7 @@ func TestPDMSMemberManagerSyncPDMSSts(t *testing.T) { g.Expect(*set.Spec.Replicas).To(Equal(int32(2))) }, expectTidbClusterFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { - g.Expect(tc.Status.PDMS["tso"].Phase).To(Equal(v1alpha1.ScalePhase)) + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.ScalePhase)) }, }, } @@ -401,7 +401,7 @@ func newTidbClusterForPDMS() *v1alpha1.TidbCluster { }, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", }, @@ -483,7 +483,7 @@ func TestGetNewPDMSHeadlessServiceForTidbCluster(t *testing.T) { for i := range tests { tt := tests[i] t.Run(tt.name, func(t *testing.T) { - svc := getNewPDMSHeadlessService(&tt.tc, "tso") + svc := getNewPDMSHeadlessService(&tt.tc, tsoService) if diff := cmp.Diff(tt.expected, *svc); diff != "" { t.Errorf("unexpected Service (-want, +got): %s", diff) } @@ -534,7 +534,7 @@ func TestGetNewPDMSSetForTidbCluster(t *testing.T) { ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", }, - Name: "tso"}}, + Name: tsoService}}, }, }, testSts: testHostNetwork(t, false, ""), @@ -557,7 +557,7 @@ func TestGetNewPDMSSetForTidbCluster(t *testing.T) { TiDB: &v1alpha1.TiDBSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", Env: []corev1.EnvVar{ @@ -647,7 +647,7 @@ func TestGetNewPDMSSetForTidbCluster(t *testing.T) { TiDB: &v1alpha1.TiDBSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", AdditionalContainers: []corev1.Container{customSideCarContainers[0]}, @@ -676,7 +676,7 @@ func TestGetNewPDMSSetForTidbCluster(t *testing.T) { TiKV: &v1alpha1.TiKVSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", PodSecurityContext: &corev1.PodSecurityContext{ @@ -749,7 +749,7 @@ func TestGetNewPDMSSetForTidbCluster(t *testing.T) { TiKV: &v1alpha1.TiKVSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", Annotations: map[string]string{ @@ -795,7 +795,7 @@ func TestGetNewPDMSSetForTidbCluster(t *testing.T) { ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", }, - Name: "tso"}}, + Name: tsoService}}, }, }, testSts: func(sts *apps.StatefulSet) { @@ -822,7 +822,7 @@ func TestGetNewPDMSSetForTidbCluster(t *testing.T) { TiKV: &v1alpha1.TiKVSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", Annotations: map[string]string{ @@ -894,7 +894,7 @@ func TestGetNewPDMSSetForTidbCluster(t *testing.T) { TiKV: &v1alpha1.TiKVSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", Annotations: map[string]string{ @@ -966,7 +966,7 @@ func TestGetNewPDMSSetForTidbCluster(t *testing.T) { TiKV: &v1alpha1.TiKVSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", Annotations: map[string]string{ @@ -1044,7 +1044,7 @@ func TestGetNewPDMSSetForTidbCluster(t *testing.T) { ComponentSpec: v1alpha1.ComponentSpec{ Image: "pingcap/pd:v7.3.0", }, - Name: "tso"}}, + Name: tsoService}}, }, }, testSts: func(sts *apps.StatefulSet) { @@ -1092,7 +1092,7 @@ func TestGetPDMSConfigMap(t *testing.T) { }, TiDB: &v1alpha1.TiDBSpec{}, TiKV: &v1alpha1.TiKVSpec{}, - PDMS: []*v1alpha1.PDMSSpec{{Name: "tso"}}, + PDMS: []*v1alpha1.PDMSSpec{{Name: tsoService}}, }, }, expected: &corev1.ConfigMap{ @@ -1144,7 +1144,7 @@ func TestGetPDMSConfigMap(t *testing.T) { TiDB: &v1alpha1.TiDBSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, ComponentSpec: v1alpha1.ComponentSpec{ ConfigUpdateStrategy: &updateStrategy, }, @@ -1217,7 +1217,7 @@ func TestGetPDMSConfigMap(t *testing.T) { }, TiDB: &v1alpha1.TiDBSpec{}, TiKV: &v1alpha1.TiKVSpec{}, - PDMS: []*v1alpha1.PDMSSpec{{Name: "tso"}}, + PDMS: []*v1alpha1.PDMSSpec{{Name: tsoService}}, }, }, expected: &corev1.ConfigMap{ @@ -1274,7 +1274,7 @@ func TestGetPDMSConfigMap(t *testing.T) { TiDB: &v1alpha1.TiDBSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, ComponentSpec: v1alpha1.ComponentSpec{ ConfigUpdateStrategy: &updateStrategy, }, @@ -1372,7 +1372,7 @@ func TestGetNewPdMSServiceForTidbCluster(t *testing.T) { Spec: v1alpha1.TidbClusterSpec{ Services: []v1alpha1.Service{ - {Name: "tso", Type: string(corev1.ServiceTypeClusterIP)}, + {Name: tsoService, Type: string(corev1.ServiceTypeClusterIP)}, }, PD: &v1alpha1.PDSpec{ @@ -1387,7 +1387,7 @@ func TestGetNewPdMSServiceForTidbCluster(t *testing.T) { }, }, TiKV: &v1alpha1.TiKVSpec{}, - PDMS: []*v1alpha1.PDMSSpec{{Name: "tso"}}, + PDMS: []*v1alpha1.PDMSSpec{{Name: tsoService}}, }, }, expected: corev1.Service{ @@ -1444,7 +1444,7 @@ func TestGetNewPdMSServiceForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ Services: []v1alpha1.Service{ - {Name: "tso", Type: string(corev1.ServiceTypeClusterIP)}, + {Name: tsoService, Type: string(corev1.ServiceTypeClusterIP)}, }, PD: &v1alpha1.PDSpec{ ComponentSpec: v1alpha1.ComponentSpec{ @@ -1460,7 +1460,7 @@ func TestGetNewPdMSServiceForTidbCluster(t *testing.T) { TiKV: &v1alpha1.TiKVSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, Service: &v1alpha1.ServiceSpec{ClusterIP: pointer.StringPtr("172.20.10.1")}, }, }, @@ -1521,7 +1521,7 @@ func TestGetNewPdMSServiceForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ Services: []v1alpha1.Service{ - {Name: "tso", Type: string(corev1.ServiceTypeLoadBalancer)}, + {Name: tsoService, Type: string(corev1.ServiceTypeLoadBalancer)}, }, PD: &v1alpha1.PDSpec{ ComponentSpec: v1alpha1.ComponentSpec{ @@ -1537,7 +1537,7 @@ func TestGetNewPdMSServiceForTidbCluster(t *testing.T) { TiKV: &v1alpha1.TiKVSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, Service: &v1alpha1.ServiceSpec{LoadBalancerIP: pointer.StringPtr("172.20.10.1")}, }, }, @@ -1598,7 +1598,7 @@ func TestGetNewPdMSServiceForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ Services: []v1alpha1.Service{ - {Name: "tso", Type: string(corev1.ServiceTypeLoadBalancer)}, + {Name: tsoService, Type: string(corev1.ServiceTypeLoadBalancer)}, }, TiDB: &v1alpha1.TiDBSpec{ TLSClient: &v1alpha1.TiDBTLSClient{ @@ -1614,7 +1614,7 @@ func TestGetNewPdMSServiceForTidbCluster(t *testing.T) { }, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, Service: &v1alpha1.ServiceSpec{Type: corev1.ServiceTypeClusterIP, ClusterIP: pointer.StringPtr("172.20.10.1")}, }, @@ -1676,7 +1676,7 @@ func TestGetNewPdMSServiceForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ Services: []v1alpha1.Service{ - {Name: "tso", Type: string(corev1.ServiceTypeLoadBalancer)}, + {Name: tsoService, Type: string(corev1.ServiceTypeLoadBalancer)}, }, PD: &v1alpha1.PDSpec{ ComponentSpec: v1alpha1.ComponentSpec{ @@ -1692,7 +1692,7 @@ func TestGetNewPdMSServiceForTidbCluster(t *testing.T) { TiKV: &v1alpha1.TiKVSpec{}, PDMS: []*v1alpha1.PDMSSpec{ { - Name: "tso", + Name: tsoService, Service: &v1alpha1.ServiceSpec{Type: corev1.ServiceTypeClusterIP, ClusterIP: pointer.StringPtr("172.20.10.1"), PortName: pointer.StringPtr("http-tso"), diff --git a/pkg/manager/member/pd_ms_scaler.go b/pkg/manager/member/pd_ms_scaler.go index 8f707d46991..90b6b5dfe6f 100644 --- a/pkg/manager/member/pd_ms_scaler.go +++ b/pkg/manager/member/pd_ms_scaler.go @@ -56,7 +56,6 @@ func (s *pdMSScaler) ScaleOut(meta metav1.Object, oldSet *apps.StatefulSet, newS ns := tc.GetNamespace() tcName := tc.GetName() serviceName := controller.PDMSTrimName(oldSet.Name) - println("scaleOne", serviceName) klog.Infof("scaling out PDMS component %s for cluster [%s/%s] statefulset, ordinal: %d (replicas: %d, delete slots: %v)", serviceName, oldSet.Namespace, tcName, ordinal, replicas, deleteSlots.List()) if !tc.Status.PDMS[serviceName].Synced { diff --git a/pkg/manager/member/pd_ms_scaler_test.go b/pkg/manager/member/pd_ms_scaler_test.go new file mode 100644 index 00000000000..a7cc5dac68d --- /dev/null +++ b/pkg/manager/member/pd_ms_scaler_test.go @@ -0,0 +1,181 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "fmt" + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/controller" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +func TestPDMSScaler(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + hasSynced bool + errExpectFn func(*GomegaWithT, error) + changed bool + scaleIn bool + } + + testFn := func(test testcase, t *testing.T) { + tc := newTidbClusterForPDMSScaler() + + if test.hasSynced { + tc.Status.PDMS[tsoService].Synced = true + } else { + tc.Status.PDMS[tsoService].Synced = false + } + + // default replicas is 5 + oldSet := newStatefulSetForPDMSScale() + oldSet.Name = fmt.Sprintf("%s-pdms-%s", tc.Name, tsoService) + newSet := oldSet.DeepCopy() + + scaler := newFakePDMSScaler() + if test.scaleIn { + newSet.Spec.Replicas = pointer.Int32Ptr(3) + err := scaler.ScaleIn(tc, oldSet, newSet) + test.errExpectFn(g, err) + if test.changed { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(4)) + } else { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(5)) + } + } else { + newSet.Spec.Replicas = pointer.Int32Ptr(7) + err := scaler.ScaleOut(tc, oldSet, newSet) + test.errExpectFn(g, err) + if test.changed { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(6)) + } else { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(5)) + } + } + } + + tests := []testcase{ + { + name: "scaleIn normal", + scaleIn: true, + hasSynced: true, + errExpectFn: errExpectNil, + changed: true, + }, + { + name: "scaleIn pdms is upgrading", + scaleIn: true, + hasSynced: false, + errExpectFn: errExpectNotNil, + changed: false, + }, + { + name: "scaleOut normal", + scaleIn: false, + hasSynced: true, + errExpectFn: errExpectNil, + changed: true, + }, + { + name: "scaleOut pdms is upgrading", + scaleIn: false, + hasSynced: false, + errExpectFn: errExpectNotNil, + changed: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testFn(tt, t) + }) + } +} + +func newTidbClusterForPDMSScaler() *v1alpha1.TidbCluster { + return &v1alpha1.TidbCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "TidbCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: corev1.NamespaceDefault, + }, + Spec: v1alpha1.TidbClusterSpec{ + PD: &v1alpha1.PDSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + Image: "pingcap/pd:v7.3.0", + }, + Replicas: 1, + StorageClassName: pointer.StringPtr("my-storage-class"), + Mode: "ms", + }, + PDMS: []*v1alpha1.PDMSSpec{ + { + Name: tsoService, + ComponentSpec: v1alpha1.ComponentSpec{ + Image: "pd-test-image", + }, + Replicas: 3, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + }, + }, + Status: v1alpha1.TidbClusterStatus{ + PDMS: map[string]*v1alpha1.PDMSStatus{ + tsoService: { + Phase: v1alpha1.NormalPhase, + StatefulSet: &apps.StatefulSetStatus{ + CurrentRevision: "1", + UpdateRevision: "2", + ReadyReplicas: 3, + Replicas: 3, + CurrentReplicas: 2, + UpdatedReplicas: 1, + }, + }, + }, + }, + } +} + +func newFakePDMSScaler(resyncDuration ...time.Duration) *pdMSScaler { + fakeDeps := controller.NewFakeDependencies() + if len(resyncDuration) > 0 { + fakeDeps.CLIConfig.ResyncDuration = resyncDuration[0] + } + return &pdMSScaler{generalScaler{deps: fakeDeps}} +} + +func newStatefulSetForPDMSScale() *apps.StatefulSet { + set := &apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "scaler", + Namespace: metav1.NamespaceDefault, + }, + Spec: apps.StatefulSetSpec{ + Replicas: pointer.Int32Ptr(5), + }, + } + return set +} diff --git a/pkg/manager/member/pd_ms_upgrader.go b/pkg/manager/member/pd_ms_upgrader.go index be712b5069d..b42ed0a95dd 100644 --- a/pkg/manager/member/pd_ms_upgrader.go +++ b/pkg/manager/member/pd_ms_upgrader.go @@ -15,13 +15,13 @@ package member import ( "fmt" + "strings" "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" "github.com/pingcap/tidb-operator/pkg/third_party/k8s" - apps "k8s.io/api/apps/v1" "k8s.io/klog/v2" ) @@ -61,7 +61,7 @@ func (u *pdMSUpgrader) gracefulUpgrade(tc *v1alpha1.TidbCluster, oldSet *apps.St if oldTrimName != componentName { return fmt.Errorf("tidbcluster: [%s/%s]'s pdMS oldTrimName is %s, not equal to componentName: %s", ns, tcName, oldTrimName, componentName) } - klog.Infof("gracefulUpgrade pdMS trim name oldTrimName: %s", oldTrimName) + klog.Infof("gracefulUpgrade pdMS trim name, oldTrimName: %s", oldTrimName) if tc.PDMSScaling(oldTrimName) { klog.Infof("TidbCluster: [%s/%s]'s pdMS status is %v, can not upgrade pdMS", ns, tcName, tc.Status.PDMS[componentName].Phase) @@ -107,14 +107,21 @@ func (u *pdMSUpgrader) gracefulUpgrade(tc *v1alpha1.TidbCluster, oldSet *apps.St if !k8s.IsPodReady(pod) { return controller.RequeueErrorf("tidbcluster: [%s/%s]'s upgraded pdMS pod: [%s] is not ready", ns, tcName, podName) } + + var exist bool for _, member := range tc.Status.PDMS[componentName].Members { - if member == podName { - continue + if strings.Contains(member, podName) { + exist = true } - return controller.RequeueErrorf("tidbcluster: [%s/%s]'s pdMS upgraded pod: [%s] is not health", ns, tcName, podName) + } + if !exist { + return controller.RequeueErrorf("tidbcluster: [%s/%s]'s pdMS upgraded pod: [%s] is not exist, all members: %v", + ns, tcName, podName, tc.Status.PDMS[componentName].Members) } continue } + mngerutils.SetUpgradePartition(newSet, i) + return nil } return nil diff --git a/pkg/manager/member/pd_ms_upgrader_test.go b/pkg/manager/member/pd_ms_upgrader_test.go new file mode 100644 index 00000000000..9059b4c58dd --- /dev/null +++ b/pkg/manager/member/pd_ms_upgrader_test.go @@ -0,0 +1,370 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "testing" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/label" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/controller" + mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + podinformers "k8s.io/client-go/informers/core/v1" + "k8s.io/utils/pointer" +) + +const tsoService = "tso" + +func TestPDMSUpgraderUpgrade(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + changeFn func(*v1alpha1.TidbCluster) + changePods func(pods []*corev1.Pod) + changeOldSet func(set *apps.StatefulSet) + errExpectFn func(*GomegaWithT, error) + expectFn func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) + } + + testFn := func(test *testcase) { + t.Log(test.name) + upgrader, podInformer := newPDMSUpgrader() + tc := newTidbClusterForPDMSUpgrader() + + if test.changeFn != nil { + test.changeFn(tc) + } + + pods := getPDMSPods() + if test.changePods != nil { + test.changePods(pods) + } + for i := range pods { + podInformer.Informer().GetIndexer().Add(pods[i]) + } + + newSet := newStatefulSetForPDMSUpgrader() + oldSet := newSet.DeepCopy() + if test.changeOldSet != nil { + test.changeOldSet(oldSet) + } + mngerutils.SetStatefulSetLastAppliedConfigAnnotation(oldSet) + + newSet.Spec.UpdateStrategy.RollingUpdate.Partition = pointer.Int32Ptr(3) + + err := upgrader.Upgrade(tc, oldSet, newSet) + test.errExpectFn(g, err) + test.expectFn(g, tc, newSet) + } + + tests := []testcase{ + { + name: "normal upgrade", + changeFn: func(tc *v1alpha1.TidbCluster) { + tc.Status.PDMS[tsoService].Synced = true + }, + changePods: nil, + changeOldSet: nil, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) { + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(1))) + }, + }, + { + name: "normal upgrade with notReady pod", + changeFn: func(tc *v1alpha1.TidbCluster) { + tc.Status.PDMS[tsoService].Synced = true + }, + changePods: func(pods []*corev1.Pod) { + for _, pod := range pods { + pod.Status = *new(corev1.PodStatus) + } + }, + changeOldSet: nil, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) { + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(2))) + }, + }, + { + name: "modify oldSet update strategy to OnDelete", + changeFn: func(tc *v1alpha1.TidbCluster) { + tc.Status.PDMS[tsoService].Synced = true + }, + changePods: nil, + changeOldSet: func(set *apps.StatefulSet) { + set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{ + Type: apps.OnDeleteStatefulSetStrategyType, + } + }, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) { + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy).To(Equal(apps.StatefulSetUpdateStrategy{Type: apps.OnDeleteStatefulSetStrategyType})) + }, + }, + { + name: "set oldSet's RollingUpdate strategy to nil", + changeFn: func(tc *v1alpha1.TidbCluster) { + tc.Status.PDMS[tsoService].Synced = true + }, + changePods: nil, + changeOldSet: func(set *apps.StatefulSet) { + set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{ + Type: apps.RollingUpdateStatefulSetStrategyType, + } + }, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) { + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy).To(Equal(apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType})) + }, + }, + { + name: "newSet template changed", + changeFn: func(tc *v1alpha1.TidbCluster) { + tc.Status.PDMS[tsoService].Synced = true + }, + changePods: nil, + changeOldSet: func(set *apps.StatefulSet) { + set.Spec.Template.Spec.Containers[0].Image = "pd-test-image:old" + }, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) { + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(3))) + }, + }, + { + name: "pdms scaling", + changeFn: func(tc *v1alpha1.TidbCluster) { + tc.Status.PDMS[tsoService].Synced = true + tc.Status.PDMS[tsoService].Phase = v1alpha1.ScalePhase + }, + changePods: nil, + changeOldSet: func(set *apps.StatefulSet) { + set.Spec.Template.Spec.Containers[0].Image = "pd-test-image:old" + }, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) { + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.ScalePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(3))) + }, + }, + { + name: "pdms sync failed", + changeFn: func(tc *v1alpha1.TidbCluster) { + tc.Status.PDMS[tsoService].Synced = false + }, + changePods: nil, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) { + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.NormalPhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(3))) + }, + }, + + { + name: "ignore pd peers health if annotation is not set", + changeFn: func(tc *v1alpha1.TidbCluster) { + tc.Status.PDMS[tsoService].Synced = true + }, + changePods: nil, + changeOldSet: nil, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) { + g.Expect(tc.Status.PDMS[tsoService].Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(1))) + }, + }, + } + + for i := range tests { + testFn(&tests[i]) + } + +} + +func newPDMSUpgrader() (Upgrader, podinformers.PodInformer) { + fakeDeps := controller.NewFakeDependencies() + pdMSUpgrader := &pdMSUpgrader{deps: fakeDeps} + podInformer := fakeDeps.KubeInformerFactory.Core().V1().Pods() + return pdMSUpgrader, podInformer +} + +func newStatefulSetForPDMSUpgrader() *apps.StatefulSet { + return &apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: controller.PDMSMemberName(upgradeTcName, tsoService), + Namespace: metav1.NamespaceDefault, + }, + Spec: apps.StatefulSetSpec{ + Replicas: pointer.Int32Ptr(3), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "pd", + Image: "pd-test-image", + }, + }, + }, + }, + UpdateStrategy: apps.StatefulSetUpdateStrategy{ + Type: apps.RollingUpdateStatefulSetStrategyType, + RollingUpdate: &apps.RollingUpdateStatefulSetStrategy{Partition: pointer.Int32Ptr(2)}, + }, + }, + Status: apps.StatefulSetStatus{ + CurrentRevision: "1", + UpdateRevision: "2", + ReadyReplicas: 3, + Replicas: 3, + CurrentReplicas: 2, + UpdatedReplicas: 1, + }, + } +} + +func newTidbClusterForPDMSUpgrader() *v1alpha1.TidbCluster { + podName0 := PDMSPodName(upgradeTcName, 0, tsoService) + podName1 := PDMSPodName(upgradeTcName, 1, tsoService) + podName2 := PDMSPodName(upgradeTcName, 2, tsoService) + return &v1alpha1.TidbCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "TidbCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: upgradeTcName, + Namespace: corev1.NamespaceDefault, + UID: upgradeTcName, + Labels: label.New().Instance(upgradeInstanceName), + }, + Spec: v1alpha1.TidbClusterSpec{ + PD: &v1alpha1.PDSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + Image: "pingcap/pd:v7.3.0", + }, + Replicas: 1, + StorageClassName: pointer.StringPtr("my-storage-class"), + Mode: "ms", + }, + PDMS: []*v1alpha1.PDMSSpec{ + { + Name: tsoService, + ComponentSpec: v1alpha1.ComponentSpec{ + Image: "pd-test-image", + }, + Replicas: 3, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + }, + }, + Status: v1alpha1.TidbClusterStatus{ + PDMS: map[string]*v1alpha1.PDMSStatus{ + tsoService: { + Phase: v1alpha1.NormalPhase, + StatefulSet: &apps.StatefulSetStatus{ + CurrentRevision: "1", + UpdateRevision: "2", + ReadyReplicas: 3, + Replicas: 3, + CurrentReplicas: 2, + UpdatedReplicas: 1, + }, + Members: []string{podName0, podName1, podName2}, + }, + }, + }, + } +} + +func getPDMSPods() []*corev1.Pod { + lc := label.New().Instance(upgradeInstanceName).PDMS(tsoService).Labels() + lc[apps.ControllerRevisionHashLabelKey] = "1" + lu := label.New().Instance(upgradeInstanceName).PDMS(tsoService).Labels() + lu[apps.ControllerRevisionHashLabelKey] = "2" + pods := []*corev1.Pod{ + { + TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: PDMSPodName(upgradeTcName, 0, tsoService), + Namespace: corev1.NamespaceDefault, + Labels: lc, + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue}, + }, + }, + }, + { + TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: PDMSPodName(upgradeTcName, 1, tsoService), + Namespace: corev1.NamespaceDefault, + Labels: lc, + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue}, + }, + }, + }, + { + TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: PDMSPodName(upgradeTcName, 2, tsoService), + Namespace: corev1.NamespaceDefault, + Labels: lu, + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue}, + }, + }, + }, + } + return pods +} diff --git a/pkg/manager/member/pd_upgrader_test.go b/pkg/manager/member/pd_upgrader_test.go index 5eb2008d0c5..f9b80a6c7d7 100644 --- a/pkg/manager/member/pd_upgrader_test.go +++ b/pkg/manager/member/pd_upgrader_test.go @@ -66,7 +66,7 @@ func TestPDUpgraderUpgrade(t *testing.T) { }) } - pods := getPods() + pods := getPDPods() if test.changePods != nil { test.changePods(pods) } @@ -539,7 +539,7 @@ func newTidbClusterForPDUpgrader() *v1alpha1.TidbCluster { } } -func getPods() []*corev1.Pod { +func getPDPods() []*corev1.Pod { lc := label.New().Instance(upgradeInstanceName).PD().Labels() lc[apps.ControllerRevisionHashLabelKey] = "1" lu := label.New().Instance(upgradeInstanceName).PD().Labels() From 817ecf163971d98d4e710249d338ee0809083a06 Mon Sep 17 00:00:00 2001 From: husharp Date: Mon, 26 Feb 2024 16:56:47 +0800 Subject: [PATCH 2/4] remove shutdown pdms Signed-off-by: husharp --- pkg/manager/member/pd_ms_member_manager.go | 27 +++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/pkg/manager/member/pd_ms_member_manager.go b/pkg/manager/member/pd_ms_member_manager.go index bed7712ddc6..46ae6802d6d 100644 --- a/pkg/manager/member/pd_ms_member_manager.go +++ b/pkg/manager/member/pd_ms_member_manager.go @@ -64,10 +64,35 @@ func (m *pdMSMemberManager) Sync(tc *v1alpha1.TidbCluster) error { } // Need to start PD API - if tc.Spec.PD == nil || tc.Spec.PD.Mode != "ms" { + if tc.Spec.PD == nil { klog.Infof("PD Micro Service is enabled, but PD is not enabled or not in `ms` mode, skip syncing PD Micro Service") return nil } + if tc.Spec.PD.Mode != "ms" { + // remove all micro service components + for _, comp := range tc.Spec.PDMS { + ns := tc.GetNamespace() + tcName := tc.GetName() + curService := comp.Name + + oldPDMSSetTmp, err := m.deps.StatefulSetLister.StatefulSets(ns).Get(controller.PDMSMemberName(tcName, curService)) + if err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("syncPDMSStatefulSet: fail to get sts %s PDMS component %s for cluster [%s/%s], error: %s", + controller.PDMSMemberName(tcName, curService), curService, ns, tcName, err) + } + + oldPDMSSet := oldPDMSSetTmp.DeepCopy() + newPDMSSet := oldPDMSSetTmp.DeepCopy() + *newPDMSSet.Spec.Replicas = 0 + if err := m.scaler.Scale(tc, oldPDMSSet, newPDMSSet); err != nil { + return err + } + mngerutils.UpdateStatefulSetWithPrecheck(m.deps, tc, "FailedUpdatePDMSSTS", newPDMSSet, oldPDMSSet) + } + klog.Infof("PD Micro Service is enabled, but PD is not enabled or not in `ms` mode, skip syncing PD Micro Service") + return nil + } + // init PD Micro Service status if tc.Status.PDMS == nil { tc.Status.PDMS = make(map[string]*v1alpha1.PDMSStatus) From 5261e05eb3a94f9b6aa5101c967d75e711e40e08 Mon Sep 17 00:00:00 2001 From: husharp Date: Tue, 27 Feb 2024 15:05:22 +0800 Subject: [PATCH 3/4] remove storage size Signed-off-by: husharp --- pkg/manager/member/pd_ms_member_manager.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pkg/manager/member/pd_ms_member_manager.go b/pkg/manager/member/pd_ms_member_manager.go index 46ae6802d6d..a1fc5f88e47 100644 --- a/pkg/manager/member/pd_ms_member_manager.go +++ b/pkg/manager/member/pd_ms_member_manager.go @@ -723,14 +723,6 @@ func (m *pdMSMemberManager) getNewPDMSStatefulSet(tc *v1alpha1.TidbCluster, cm * } // default in nil if curSpec.StorageVolumes != nil { - storageRequest, err := controller.ParseStorageRequest(curSpec.Requests) - if err != nil { - return nil, fmt.Errorf("cannot parse storage request for PD, tidbcluster %s/%s, error: %v", tc.Namespace, tc.Name, err) - } - dataVolumeName := string(v1alpha1.GetStorageVolumeName("", v1alpha1.PDMSMemberType(curService))) - pdMSSet.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{ - util.VolumeClaimTemplate(storageRequest, dataVolumeName, tc.Spec.PD.StorageClassName), - } pdMSSet.Spec.VolumeClaimTemplates = append(pdMSSet.Spec.VolumeClaimTemplates, additionalPVCs...) } From 19b953752e26ecd143931f27e3fa4b5c551f483a Mon Sep 17 00:00:00 2001 From: husharp Date: Wed, 28 Feb 2024 14:58:34 +0800 Subject: [PATCH 4/4] make log accurate Signed-off-by: husharp --- pkg/manager/member/pd_ms_member_manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/manager/member/pd_ms_member_manager.go b/pkg/manager/member/pd_ms_member_manager.go index a1fc5f88e47..fdf8ee48aa0 100644 --- a/pkg/manager/member/pd_ms_member_manager.go +++ b/pkg/manager/member/pd_ms_member_manager.go @@ -65,7 +65,7 @@ func (m *pdMSMemberManager) Sync(tc *v1alpha1.TidbCluster) error { // Need to start PD API if tc.Spec.PD == nil { - klog.Infof("PD Micro Service is enabled, but PD is not enabled or not in `ms` mode, skip syncing PD Micro Service") + klog.Infof("PD Micro Service is enabled, but PD is not enabled, skip syncing PD Micro Service") return nil } if tc.Spec.PD.Mode != "ms" { @@ -89,7 +89,7 @@ func (m *pdMSMemberManager) Sync(tc *v1alpha1.TidbCluster) error { } mngerutils.UpdateStatefulSetWithPrecheck(m.deps, tc, "FailedUpdatePDMSSTS", newPDMSSet, oldPDMSSet) } - klog.Infof("PD Micro Service is enabled, but PD is not enabled or not in `ms` mode, skip syncing PD Micro Service") + klog.Infof("PD Micro Service is enabled, but PD is not in `ms` mode, skip syncing PD Micro Service") return nil }