From 71b83fc87b875aabe5d30e6328bcdecd216e0984 Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Fri, 10 Apr 2020 14:38:48 +0800 Subject: [PATCH 01/11] add event for tidbcluster and auto-scaler --- pkg/autoscaler/autoscaler/autoscaler_manager.go | 15 +++++++++++++-- .../tidbcluster/tidb_cluster_controller.go | 11 +++++++---- pkg/manager/member/failover.go | 5 +++++ pkg/manager/member/pd_failover.go | 4 ++-- pkg/manager/member/pd_member_manager.go | 9 +++++++-- pkg/manager/member/pd_scaler.go | 10 ++++++++-- pkg/manager/member/scaler.go | 4 ++++ pkg/manager/member/tidb_failover.go | 9 ++++++++- pkg/manager/member/tidb_member_manager.go | 7 ++++++- pkg/manager/member/tikv_failover.go | 10 ++++++++-- pkg/manager/member/tikv_member_manager.go | 7 ++++++- pkg/manager/member/tikv_scaler.go | 10 ++++++++-- pkg/manager/member/upgrader.go | 5 +++++ 13 files changed, 87 insertions(+), 19 deletions(-) diff --git a/pkg/autoscaler/autoscaler/autoscaler_manager.go b/pkg/autoscaler/autoscaler/autoscaler_manager.go index 3bbc7c7454..550f6ead9a 100644 --- a/pkg/autoscaler/autoscaler/autoscaler_manager.go +++ b/pkg/autoscaler/autoscaler/autoscaler_manager.go @@ -15,12 +15,14 @@ package autoscaler import ( "fmt" + "strings" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" v1alpha1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -80,7 +82,7 @@ func (am *autoScalerManager) Sync(tac *v1alpha1.TidbClusterAutoScaler) error { if err := am.syncAutoScaling(tc, tac); err != nil { return err } - if err := am.syncTidbClusterReplicas(tc, oldTc); err != nil { + if err := am.syncTidbClusterReplicas(tac, tc, oldTc); err != nil { return err } return am.syncAutoScalingStatus(tc, oldTc, tac) @@ -102,10 +104,19 @@ func (am *autoScalerManager) syncAutoScaling(tc *v1alpha1.TidbCluster, tac *v1al return nil } -func (am *autoScalerManager) syncTidbClusterReplicas(tc *v1alpha1.TidbCluster, oldTc *v1alpha1.TidbCluster) error { +func (am *autoScalerManager) syncTidbClusterReplicas(tac *v1alpha1.TidbClusterAutoScaler, tc *v1alpha1.TidbCluster, oldTc *v1alpha1.TidbCluster) error { if tc.Spec.TiDB.Replicas == oldTc.Spec.TiDB.Replicas && tc.Spec.TiKV.Replicas == oldTc.Spec.TiKV.Replicas { return nil } + reason := fmt.Sprintf("Successful %s", strings.Title("auto-scaling")) + msg := "" + if tc.Spec.TiDB.Replicas != oldTc.Spec.TiDB.Replicas { + msg = fmt.Sprintf("%s auto-scaling tidb from %d to %d", msg, oldTc.Spec.TiDB.Replicas, tc.Spec.TiDB.Replicas) + } + if tc.Spec.TiKV.Replicas != oldTc.Spec.TiKV.Replicas { + msg = fmt.Sprintf("%s auto-scaling tikv from %d to %d", msg, oldTc.Spec.TiKV.Replicas, tc.Spec.TiKV.Replicas) + } + am.recorder.Event(tac, corev1.EventTypeNormal, reason, msg) newTc := tc.DeepCopy() _, err := am.tcControl.UpdateTidbCluster(newTc, &newTc.Status, &oldTc.Status) if err != nil { diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go index dba0a9e7bb..bf86409d74 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go @@ -104,11 +104,11 @@ func NewController( secControl := controller.NewRealSecretControl(kubeCli) certControl := controller.NewRealCertControl(kubeCli, csrInformer.Lister(), secControl) typedControl := controller.NewTypedControl(controller.NewRealGenericControl(genericCli, recorder)) - pdScaler := mm.NewPDScaler(pdControl, pvcInformer.Lister(), pvcControl) - tikvScaler := mm.NewTiKVScaler(pdControl, pvcInformer.Lister(), pvcControl, podInformer.Lister()) + pdScaler := mm.NewPDScaler(pdControl, pvcInformer.Lister(), pvcControl, recorder) + tikvScaler := mm.NewTiKVScaler(pdControl, pvcInformer.Lister(), pvcControl, podInformer.Lister(), recorder) pdFailover := mm.NewPDFailover(cli, pdControl, pdFailoverPeriod, podInformer.Lister(), podControl, pvcInformer.Lister(), pvcControl, pvInformer.Lister(), recorder) - tikvFailover := mm.NewTiKVFailover(tikvFailoverPeriod) - tidbFailover := mm.NewTiDBFailover(tidbFailoverPeriod) + tikvFailover := mm.NewTiKVFailover(tikvFailoverPeriod, recorder) + tidbFailover := mm.NewTiDBFailover(tidbFailoverPeriod, recorder) pdUpgrader := mm.NewPDUpgrader(pdControl, podControl, podInformer.Lister()) tikvUpgrader := mm.NewTiKVUpgrader(pdControl, podControl, podInformer.Lister()) tidbUpgrader := mm.NewTiDBUpgrader(tidbControl, podInformer.Lister()) @@ -135,6 +135,7 @@ func NewController( pdUpgrader, autoFailover, pdFailover, + recorder, ), mm.NewTiKVMemberManager( pdControl, @@ -150,6 +151,7 @@ func NewController( tikvFailover, tikvScaler, tikvUpgrader, + recorder, ), mm.NewTiDBMemberManager( setControl, @@ -163,6 +165,7 @@ func NewController( tidbUpgrader, autoFailover, tidbFailover, + recorder, ), meta.NewReclaimPolicyManager( pvcInformer.Lister(), diff --git a/pkg/manager/member/failover.go b/pkg/manager/member/failover.go index 7744ee7a80..d86a77b48d 100644 --- a/pkg/manager/member/failover.go +++ b/pkg/manager/member/failover.go @@ -15,6 +15,11 @@ package member import "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" +const ( + failoverEventReason = "Failover" + failoverEventMsgPattern = "%s is during failover due to %s makred as failureMember, msg:%v" +) + // Failover implements the logic for pd/tikv/tidb's failover and recovery. type Failover interface { Failover(*v1alpha1.TidbCluster) error diff --git a/pkg/manager/member/pd_failover.go b/pkg/manager/member/pd_failover.go index 0d668eea23..4d5cde3858 100644 --- a/pkg/manager/member/pd_failover.go +++ b/pkg/manager/member/pd_failover.go @@ -140,8 +140,8 @@ func (pf *pdFailover) tryToMarkAPeerAsFailure(tc *v1alpha1.TidbCluster) error { return err } - pf.recorder.Eventf(tc, apiv1.EventTypeWarning, "PDMemberMarkedAsFailure", - "%s(%s) marked as a failure member", podName, pdMember.ID) + msg := fmt.Sprintf("pd member[%s] is unhealthy", pdMember.ID) + pf.recorder.Event(tc, apiv1.EventTypeWarning, failoverEventReason, fmt.Sprintf(failoverEventMsgPattern, "pd", podName, msg)) tc.Status.PD.FailureMembers[podName] = v1alpha1.PDFailureMember{ PodName: podName, diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index a316d3b53e..e32a21ed06 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/record" "k8s.io/klog" ) @@ -58,6 +59,7 @@ type pdMemberManager struct { pdUpgrader Upgrader autoFailover bool pdFailover Failover + recorder record.EventRecorder } // NewPDMemberManager returns a *pdMemberManager @@ -75,7 +77,8 @@ func NewPDMemberManager(pdControl pdapi.PDControlInterface, pdScaler Scaler, pdUpgrader Upgrader, autoFailover bool, - pdFailover Failover) manager.Manager { + pdFailover Failover, + recorder record.EventRecorder) manager.Manager { return &pdMemberManager{ pdControl, setControl, @@ -91,7 +94,8 @@ func NewPDMemberManager(pdControl pdapi.PDControlInterface, pdScaler, pdUpgrader, autoFailover, - pdFailover} + pdFailover, + recorder} } func (pmm *pdMemberManager) Sync(tc *v1alpha1.TidbCluster) error { @@ -245,6 +249,7 @@ func (pmm *pdMemberManager) syncPDStatefulSetForTidbCluster(tc *v1alpha1.TidbClu } if !templateEqual(newPDSet, oldPDSet) || tc.Status.PD.Phase == v1alpha1.UpgradePhase { + pmm.recorder.Event(tc, corev1.EventTypeNormal, upgradingEventReason, fmt.Sprintf(upgradingEventMessagePattern, v1alpha1.PDMemberType.String())) if err := pmm.pdUpgrader.Upgrade(tc, oldPDSet, newPDSet); err != nil { return err } diff --git a/pkg/manager/member/pd_scaler.go b/pkg/manager/member/pd_scaler.go index 47c11f4f74..f795a3c3c7 100644 --- a/pkg/manager/member/pd_scaler.go +++ b/pkg/manager/member/pd_scaler.go @@ -23,7 +23,9 @@ import ( "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/pdapi" apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/record" "k8s.io/klog" ) @@ -36,12 +38,16 @@ type pdScaler struct { // NewPDScaler returns a Scaler func NewPDScaler(pdControl pdapi.PDControlInterface, pvcLister corelisters.PersistentVolumeClaimLister, - pvcControl controller.PVCControlInterface) Scaler { - return &pdScaler{generalScaler{pdControl, pvcLister, pvcControl}} + pvcControl controller.PVCControlInterface, + recorder record.EventRecorder) Scaler { + return &pdScaler{generalScaler{pdControl, pvcLister, pvcControl, recorder}} } func (psd *pdScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { scaling, _, _, _ := scaleOne(oldSet, newSet) + oldReplicas := *oldSet.Spec.Replicas + targetReplicas := *newSet.Spec.Replicas + psd.recorder.Event(tc, corev1.EventTypeNormal, scalingEventReason, fmt.Sprintf(scalingEventMsgPattern, "pd", oldReplicas, targetReplicas)) if scaling > 0 { return psd.ScaleOut(tc, oldSet, newSet) } else if scaling < 0 { diff --git a/pkg/manager/member/scaler.go b/pkg/manager/member/scaler.go index 14b9bcfe71..9321b2c477 100644 --- a/pkg/manager/member/scaler.go +++ b/pkg/manager/member/scaler.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/sets" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/record" "k8s.io/klog" ) @@ -33,6 +34,8 @@ const ( skipReasonScalerPVCNotFound = "scaler: pvc is not found" skipReasonScalerAnnIsNil = "scaler: pvc annotations is nil" skipReasonScalerAnnDeferDeletingIsEmpty = "scaler: pvc annotations defer deleting is empty" + scalingEventReason = "Scaling" + scalingEventMsgPattern = "%s is scaling from %d to %d" ) // Scaler implements the logic for scaling out or scaling in the cluster. @@ -51,6 +54,7 @@ type generalScaler struct { pdControl pdapi.PDControlInterface pvcLister corelisters.PersistentVolumeClaimLister pvcControl controller.PVCControlInterface + recorder record.EventRecorder } func (gs *generalScaler) deleteDeferDeletingPVC(tc *v1alpha1.TidbCluster, diff --git a/pkg/manager/member/tidb_failover.go b/pkg/manager/member/tidb_failover.go index fbd18dfb66..a4e4e2ba87 100644 --- a/pkg/manager/member/tidb_failover.go +++ b/pkg/manager/member/tidb_failover.go @@ -14,21 +14,26 @@ package member import ( + "fmt" "time" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" "k8s.io/klog" ) type tidbFailover struct { tidbFailoverPeriod time.Duration + recorder record.EventRecorder } // NewTiDBFailover returns a tidbFailover instance -func NewTiDBFailover(failoverPeriod time.Duration) Failover { +func NewTiDBFailover(failoverPeriod time.Duration, recorder record.EventRecorder) Failover { return &tidbFailover{ tidbFailoverPeriod: failoverPeriod, + recorder: recorder, } } @@ -59,6 +64,8 @@ func (tf *tidbFailover) Failover(tc *v1alpha1.TidbCluster) error { PodName: tidbMember.Name, CreatedAt: metav1.Now(), } + msg := fmt.Sprintf("tidb[%s] is unhealthy", tidbMember.Name) + tf.recorder.Event(tc, corev1.EventTypeWarning, failoverEventReason, fmt.Sprintf(failoverEventMsgPattern, "tidb", tidbMember.Name, msg)) break } } diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index fc74272f13..b66df9d9fd 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/record" "k8s.io/klog" "k8s.io/utils/pointer" ) @@ -68,6 +69,7 @@ type tidbMemberManager struct { autoFailover bool tidbFailover Failover tidbStatefulSetIsUpgradingFn func(corelisters.PodLister, *apps.StatefulSet, *v1alpha1.TidbCluster) (bool, error) + recorder record.EventRecorder } // NewTiDBMemberManager returns a *tidbMemberManager @@ -81,7 +83,8 @@ func NewTiDBMemberManager(setControl controller.StatefulSetControlInterface, podLister corelisters.PodLister, tidbUpgrader Upgrader, autoFailover bool, - tidbFailover Failover) manager.Manager { + tidbFailover Failover, + recorder record.EventRecorder) manager.Manager { return &tidbMemberManager{ setControl: setControl, svcControl: svcControl, @@ -95,6 +98,7 @@ func NewTiDBMemberManager(setControl controller.StatefulSetControlInterface, autoFailover: autoFailover, tidbFailover: tidbFailover, tidbStatefulSetIsUpgradingFn: tidbStatefulSetIsUpgrading, + recorder: recorder, } } @@ -202,6 +206,7 @@ func (tmm *tidbMemberManager) syncTiDBStatefulSetForTidbCluster(tc *v1alpha1.Tid } if !templateEqual(newTiDBSet, oldTiDBSet) || tc.Status.TiDB.Phase == v1alpha1.UpgradePhase { + tmm.recorder.Event(tc, corev1.EventTypeNormal, upgradingEventReason, fmt.Sprintf(upgradingEventMessagePattern, "tidb")) if err := tmm.tidbUpgrader.Upgrade(tc, oldTiDBSet, newTiDBSet); err != nil { return err } diff --git a/pkg/manager/member/tikv_failover.go b/pkg/manager/member/tikv_failover.go index 59085b3640..44981d6a40 100644 --- a/pkg/manager/member/tikv_failover.go +++ b/pkg/manager/member/tikv_failover.go @@ -14,20 +14,24 @@ package member import ( + "fmt" "time" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" "k8s.io/klog" ) type tikvFailover struct { tikvFailoverPeriod time.Duration + recorder record.EventRecorder } // NewTiKVFailover returns a tikv Failover -func NewTiKVFailover(tikvFailoverPeriod time.Duration) Failover { - return &tikvFailover{tikvFailoverPeriod} +func NewTiKVFailover(tikvFailoverPeriod time.Duration, recorder record.EventRecorder) Failover { + return &tikvFailover{tikvFailoverPeriod, recorder} } func (tf *tikvFailover) Failover(tc *v1alpha1.TidbCluster) error { @@ -62,6 +66,8 @@ func (tf *tikvFailover) Failover(tc *v1alpha1.TidbCluster) error { StoreID: store.ID, CreatedAt: metav1.Now(), } + msg := fmt.Sprintf("store[%s] is Down", store.ID) + tf.recorder.Event(tc, corev1.EventTypeWarning, failoverEventReason, fmt.Sprintf(failoverEventMsgPattern, "tikv", podName, msg)) } } } diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 39d1146729..5f17c54799 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/record" "k8s.io/klog" ) @@ -62,6 +63,7 @@ type tikvMemberManager struct { tikvScaler Scaler tikvUpgrader Upgrader tikvStatefulSetIsUpgradingFn func(corelisters.PodLister, pdapi.PDControlInterface, *apps.StatefulSet, *v1alpha1.TidbCluster) (bool, error) + recorder record.EventRecorder } // NewTiKVMemberManager returns a *tikvMemberManager @@ -78,7 +80,8 @@ func NewTiKVMemberManager( autoFailover bool, tikvFailover Failover, tikvScaler Scaler, - tikvUpgrader Upgrader) manager.Manager { + tikvUpgrader Upgrader, + recorder record.EventRecorder) manager.Manager { kvmm := tikvMemberManager{ pdControl: pdControl, podLister: podLister, @@ -93,6 +96,7 @@ func NewTiKVMemberManager( tikvFailover: tikvFailover, tikvScaler: tikvScaler, tikvUpgrader: tikvUpgrader, + recorder: recorder, } kvmm.tikvStatefulSetIsUpgradingFn = tikvStatefulSetIsUpgrading return &kvmm @@ -225,6 +229,7 @@ func (tkmm *tikvMemberManager) syncStatefulSetForTidbCluster(tc *v1alpha1.TidbCl } if !templateEqual(newSet, oldSet) || tc.Status.TiKV.Phase == v1alpha1.UpgradePhase { + tkmm.recorder.Event(tc, corev1.EventTypeNormal, upgradingEventReason, fmt.Sprintf(upgradingEventMessagePattern, v1alpha1.PDMemberType.String())) if err := tkmm.tikvUpgrader.Upgrade(tc, oldSet, newSet); err != nil { return err } diff --git a/pkg/manager/member/tikv_scaler.go b/pkg/manager/member/tikv_scaler.go index 5522dc68bc..01d70f5cf9 100644 --- a/pkg/manager/member/tikv_scaler.go +++ b/pkg/manager/member/tikv_scaler.go @@ -25,7 +25,9 @@ import ( "github.com/pingcap/tidb-operator/pkg/pdapi" "github.com/pingcap/tidb-operator/pkg/util" apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/record" "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) @@ -39,12 +41,16 @@ type tikvScaler struct { func NewTiKVScaler(pdControl pdapi.PDControlInterface, pvcLister corelisters.PersistentVolumeClaimLister, pvcControl controller.PVCControlInterface, - podLister corelisters.PodLister) Scaler { - return &tikvScaler{generalScaler{pdControl, pvcLister, pvcControl}, podLister} + podLister corelisters.PodLister, + recorder record.EventRecorder) Scaler { + return &tikvScaler{generalScaler{pdControl, pvcLister, pvcControl, recorder}, podLister} } func (tsd *tikvScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { scaling, _, _, _ := scaleOne(oldSet, newSet) + oldReplicas := *oldSet.Spec.Replicas + targetReplicas := *newSet.Spec.Replicas + tsd.recorder.Event(tc, corev1.EventTypeNormal, scalingEventReason, fmt.Sprintf(scalingEventMsgPattern, "tikv", oldReplicas, targetReplicas)) if scaling > 0 { return tsd.ScaleOut(tc, oldSet, newSet) } else if scaling < 0 { diff --git a/pkg/manager/member/upgrader.go b/pkg/manager/member/upgrader.go index 33f7a6a47d..9fb15cb137 100644 --- a/pkg/manager/member/upgrader.go +++ b/pkg/manager/member/upgrader.go @@ -18,6 +18,11 @@ import ( apps "k8s.io/api/apps/v1" ) +const ( + upgradingEventReason = "Upgrading" + upgradingEventMessagePattern = "%s is during upgrading" +) + // Upgrader implements the logic for upgrading the tidb cluster. type Upgrader interface { // Upgrade upgrade the cluster From 1fc957ee19235372af45edf3792307f7480f95ea Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Fri, 10 Apr 2020 16:58:32 +0800 Subject: [PATCH 02/11] fix unit test --- pkg/manager/member/pd_failover_test.go | 2 +- pkg/manager/member/pd_member_manager_test.go | 4 +++- pkg/manager/member/pd_scaler_test.go | 6 ++++-- pkg/manager/member/tidb_failover.go | 3 ++- pkg/manager/member/tidb_failover_test.go | 4 +++- pkg/manager/member/tidb_member_manager_test.go | 3 +++ pkg/manager/member/tikv_failover_test.go | 4 +++- pkg/manager/member/tikv_scaler_test.go | 5 +++-- 8 files changed, 22 insertions(+), 9 deletions(-) diff --git a/pkg/manager/member/pd_failover_test.go b/pkg/manager/member/pd_failover_test.go index cd9ca847d1..5da10129eb 100644 --- a/pkg/manager/member/pd_failover_test.go +++ b/pkg/manager/member/pd_failover_test.go @@ -275,7 +275,7 @@ func TestPDFailoverFailover(t *testing.T) { events := collectEvents(recorder.Events) g.Expect(events).To(HaveLen(2)) g.Expect(events[0]).To(ContainSubstring("test-pd-1(12891273174085095651) is unhealthy")) - g.Expect(events[1]).To(ContainSubstring("test-pd-1(12891273174085095651) marked as a failure member")) + g.Expect(events[1]).To(ContainSubstring("Failover pd is during failover due to test-pd-1 makred as failureMember, msg:pd member[12891273174085095651] is unhealthy")) }, }, { diff --git a/pkg/manager/member/pd_member_manager_test.go b/pkg/manager/member/pd_member_manager_test.go index b2d5dc46f7..60ceb5d572 100644 --- a/pkg/manager/member/pd_member_manager_test.go +++ b/pkg/manager/member/pd_member_manager_test.go @@ -38,6 +38,7 @@ import ( kubeinformers "k8s.io/client-go/informers" kubefake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" ) @@ -758,7 +759,7 @@ func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetCont pdFailover := NewFakePDFailover() pdUpgrader := NewFakePDUpgrader() genericControll := controller.NewFakeGenericControl() - + recorder := record.NewFakeRecorder(100) return &pdMemberManager{ pdControl, setControl, @@ -775,6 +776,7 @@ func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetCont pdUpgrader, autoFailover, pdFailover, + recorder, }, setControl, svcControl, pdControl, podInformer.Informer().GetIndexer(), pvcInformer.Informer().GetIndexer(), podControl } diff --git a/pkg/manager/member/pd_scaler_test.go b/pkg/manager/member/pd_scaler_test.go index 39b4ca7f6e..f89d175d00 100644 --- a/pkg/manager/member/pd_scaler_test.go +++ b/pkg/manager/member/pd_scaler_test.go @@ -15,11 +15,11 @@ package member import ( "fmt" - "github.com/pingcap/kvproto/pkg/pdpb" "testing" "time" . "github.com/onsi/gomega" + "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" @@ -31,6 +31,7 @@ import ( kubeinformers "k8s.io/client-go/informers" kubefake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" ) func TestPDScalerScaleOut(t *testing.T) { @@ -389,8 +390,9 @@ func newFakePDScaler() (*pdScaler, *pdapi.FakePDControl, cache.Indexer, *control pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims() pdControl := pdapi.NewFakePDControl(kubeCli) pvcControl := controller.NewFakePVCControl(pvcInformer) + recorder := record.NewFakeRecorder(100) - return &pdScaler{generalScaler{pdControl, pvcInformer.Lister(), pvcControl}}, + return &pdScaler{generalScaler{pdControl, pvcInformer.Lister(), pvcControl, recorder}}, pdControl, pvcInformer.Informer().GetIndexer(), pvcControl } diff --git a/pkg/manager/member/tidb_failover.go b/pkg/manager/member/tidb_failover.go index a4e4e2ba87..1707558fbe 100644 --- a/pkg/manager/member/tidb_failover.go +++ b/pkg/manager/member/tidb_failover.go @@ -78,7 +78,8 @@ func (tf *tidbFailover) Recover(tc *v1alpha1.TidbCluster) { tc.Status.TiDB.FailureMembers = nil } -type fakeTiDBFailover struct{} +type fakeTiDBFailover struct { +} // NewFakeTiDBFailover returns a fake Failover func NewFakeTiDBFailover() Failover { diff --git a/pkg/manager/member/tidb_failover_test.go b/pkg/manager/member/tidb_failover_test.go index 4f14218be3..1b15420fa6 100644 --- a/pkg/manager/member/tidb_failover_test.go +++ b/pkg/manager/member/tidb_failover_test.go @@ -19,6 +19,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" @@ -382,7 +383,8 @@ func TestFakeTiDBFailoverRecover(t *testing.T) { } func newTiDBFailover() Failover { - return &tidbFailover{tidbFailoverPeriod: time.Duration(5 * time.Minute)} + recorder := record.NewFakeRecorder(100) + return &tidbFailover{tidbFailoverPeriod: time.Duration(5 * time.Minute), recorder: recorder} } func newTidbClusterForTiDBFailover() *v1alpha1.TidbCluster { diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go index ba01f3db38..edfa16be0e 100644 --- a/pkg/manager/member/tidb_member_manager_test.go +++ b/pkg/manager/member/tidb_member_manager_test.go @@ -38,6 +38,7 @@ import ( kubefake "k8s.io/client-go/kubernetes/fake" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" ) @@ -778,6 +779,7 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet tidbUpgrader := NewFakeTiDBUpgrader() tidbFailover := NewFakeTiDBFailover() tidbControl := controller.NewFakeTiDBControl() + recorder := record.NewFakeRecorder(100) tmm := &tidbMemberManager{ setControl, @@ -793,6 +795,7 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet true, tidbFailover, tidbStatefulSetIsUpgrading, + recorder, } indexers := &fakeIndexers{ pod: podInformer.Informer().GetIndexer(), diff --git a/pkg/manager/member/tikv_failover_test.go b/pkg/manager/member/tikv_failover_test.go index adce3184a1..477d698a97 100644 --- a/pkg/manager/member/tikv_failover_test.go +++ b/pkg/manager/member/tikv_failover_test.go @@ -20,6 +20,7 @@ import ( . "github.com/onsi/gomega" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" ) @@ -304,5 +305,6 @@ func TestTiKVFailoverFailover(t *testing.T) { } func newFakeTiKVFailover() *tikvFailover { - return &tikvFailover{1 * time.Hour} + recorder := record.NewFakeRecorder(100) + return &tikvFailover{1 * time.Hour, recorder} } diff --git a/pkg/manager/member/tikv_scaler_test.go b/pkg/manager/member/tikv_scaler_test.go index a86ea8df6b..8e00b504aa 100644 --- a/pkg/manager/member/tikv_scaler_test.go +++ b/pkg/manager/member/tikv_scaler_test.go @@ -29,6 +29,7 @@ import ( kubeinformers "k8s.io/client-go/informers" kubefake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" ) func TestTiKVScalerScaleOut(t *testing.T) { @@ -435,8 +436,8 @@ func newFakeTiKVScaler() (*tikvScaler, *pdapi.FakePDControl, cache.Indexer, cach podInformer := kubeInformerFactory.Core().V1().Pods() pdControl := pdapi.NewFakePDControl(kubeCli) pvcControl := controller.NewFakePVCControl(pvcInformer) - - return &tikvScaler{generalScaler{pdControl, pvcInformer.Lister(), pvcControl}, podInformer.Lister()}, + recorder := record.NewFakeRecorder(100) + return &tikvScaler{generalScaler{pdControl, pvcInformer.Lister(), pvcControl, recorder}, podInformer.Lister()}, pdControl, pvcInformer.Informer().GetIndexer(), podInformer.Informer().GetIndexer(), pvcControl } From 4d535d4324936a224d960b7bc0bee253079c543f Mon Sep 17 00:00:00 2001 From: Song Gao Date: Mon, 13 Apr 2020 16:46:57 +0800 Subject: [PATCH 03/11] Update pkg/manager/member/upgrader.go Co-Authored-By: DanielZhangQD <36026334+DanielZhangQD@users.noreply.github.com> --- pkg/manager/member/upgrader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/manager/member/upgrader.go b/pkg/manager/member/upgrader.go index 9fb15cb137..9b79bec5a5 100644 --- a/pkg/manager/member/upgrader.go +++ b/pkg/manager/member/upgrader.go @@ -20,7 +20,7 @@ import ( const ( upgradingEventReason = "Upgrading" - upgradingEventMessagePattern = "%s is during upgrading" + upgradingEventMessagePattern = "%s is upgrading" ) // Upgrader implements the logic for upgrading the tidb cluster. From 7ddda196558a0400bd9592c907d3d066d1445031 Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Mon, 13 Apr 2020 16:54:49 +0800 Subject: [PATCH 04/11] revise scaling logic --- pkg/manager/member/pd_scaler.go | 19 ++++++++++++++----- pkg/manager/member/tikv_scaler.go | 19 ++++++++++++++----- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/pkg/manager/member/pd_scaler.go b/pkg/manager/member/pd_scaler.go index f795a3c3c7..6755f7c30f 100644 --- a/pkg/manager/member/pd_scaler.go +++ b/pkg/manager/member/pd_scaler.go @@ -47,11 +47,20 @@ func (psd *pdScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, n scaling, _, _, _ := scaleOne(oldSet, newSet) oldReplicas := *oldSet.Spec.Replicas targetReplicas := *newSet.Spec.Replicas - psd.recorder.Event(tc, corev1.EventTypeNormal, scalingEventReason, fmt.Sprintf(scalingEventMsgPattern, "pd", oldReplicas, targetReplicas)) - if scaling > 0 { - return psd.ScaleOut(tc, oldSet, newSet) - } else if scaling < 0 { - return psd.ScaleIn(tc, oldSet, newSet) + if scaling != 0 { + if scaling > 0 { + err := psd.ScaleOut(tc, oldSet, newSet) + if err != nil { + return err + } + } else if scaling < 0 { + err := psd.ScaleIn(tc, oldSet, newSet) + if err != nil { + return err + } + } + psd.recorder.Event(tc, corev1.EventTypeNormal, scalingEventReason, fmt.Sprintf(scalingEventMsgPattern, "pd", oldReplicas, targetReplicas)) + return nil } return psd.SyncAutoScalerAnn(tc, oldSet) } diff --git a/pkg/manager/member/tikv_scaler.go b/pkg/manager/member/tikv_scaler.go index 01d70f5cf9..72b1afd7a8 100644 --- a/pkg/manager/member/tikv_scaler.go +++ b/pkg/manager/member/tikv_scaler.go @@ -50,11 +50,20 @@ func (tsd *tikvScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, scaling, _, _, _ := scaleOne(oldSet, newSet) oldReplicas := *oldSet.Spec.Replicas targetReplicas := *newSet.Spec.Replicas - tsd.recorder.Event(tc, corev1.EventTypeNormal, scalingEventReason, fmt.Sprintf(scalingEventMsgPattern, "tikv", oldReplicas, targetReplicas)) - if scaling > 0 { - return tsd.ScaleOut(tc, oldSet, newSet) - } else if scaling < 0 { - return tsd.ScaleIn(tc, oldSet, newSet) + if scaling != 0 { + if scaling > 0 { + err := tsd.ScaleOut(tc, oldSet, newSet) + if err != nil { + return err + } + } else if scaling < 0 { + err := tsd.ScaleIn(tc, oldSet, newSet) + if err != nil { + return err + } + } + tsd.recorder.Event(tc, corev1.EventTypeNormal, scalingEventReason, fmt.Sprintf(scalingEventMsgPattern, "tikv", oldReplicas, targetReplicas)) + return nil } // we only sync auto scaler annotations when we are finishing syncing scaling return tsd.SyncAutoScalerAnn(tc, oldSet) From 912678989749e9b3954041463d0ccc95947bb39b Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Mon, 13 Apr 2020 17:05:38 +0800 Subject: [PATCH 05/11] revise logic --- pkg/autoscaler/autoscaler/autoscaler_manager.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/autoscaler/autoscaler/autoscaler_manager.go b/pkg/autoscaler/autoscaler/autoscaler_manager.go index 550f6ead9a..16784cee1c 100644 --- a/pkg/autoscaler/autoscaler/autoscaler_manager.go +++ b/pkg/autoscaler/autoscaler/autoscaler_manager.go @@ -108,6 +108,11 @@ func (am *autoScalerManager) syncTidbClusterReplicas(tac *v1alpha1.TidbClusterAu if tc.Spec.TiDB.Replicas == oldTc.Spec.TiDB.Replicas && tc.Spec.TiKV.Replicas == oldTc.Spec.TiKV.Replicas { return nil } + newTc := tc.DeepCopy() + _, err := am.tcControl.UpdateTidbCluster(newTc, &newTc.Status, &oldTc.Status) + if err != nil { + return err + } reason := fmt.Sprintf("Successful %s", strings.Title("auto-scaling")) msg := "" if tc.Spec.TiDB.Replicas != oldTc.Spec.TiDB.Replicas { @@ -117,11 +122,6 @@ func (am *autoScalerManager) syncTidbClusterReplicas(tac *v1alpha1.TidbClusterAu msg = fmt.Sprintf("%s auto-scaling tikv from %d to %d", msg, oldTc.Spec.TiKV.Replicas, tc.Spec.TiKV.Replicas) } am.recorder.Event(tac, corev1.EventTypeNormal, reason, msg) - newTc := tc.DeepCopy() - _, err := am.tcControl.UpdateTidbCluster(newTc, &newTc.Status, &oldTc.Status) - if err != nil { - return err - } return nil } From c01242c72277557293cc919f03350d250fe034f9 Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Mon, 13 Apr 2020 17:18:30 +0800 Subject: [PATCH 06/11] fix failover event --- pkg/manager/member/failover.go | 4 ++-- pkg/manager/member/pd_failover.go | 2 +- pkg/manager/member/pd_failover_test.go | 2 +- pkg/manager/member/tidb_failover.go | 2 +- pkg/manager/member/tikv_failover.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/manager/member/failover.go b/pkg/manager/member/failover.go index d86a77b48d..7a71f1015e 100644 --- a/pkg/manager/member/failover.go +++ b/pkg/manager/member/failover.go @@ -16,8 +16,8 @@ package member import "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" const ( - failoverEventReason = "Failover" - failoverEventMsgPattern = "%s is during failover due to %s makred as failureMember, msg:%v" + unHealthEventReason = "Unhealthy" + unHealthEventMsgPattern = "%s pod[%s] is unhealthy, msg:%s" ) // Failover implements the logic for pd/tikv/tidb's failover and recovery. diff --git a/pkg/manager/member/pd_failover.go b/pkg/manager/member/pd_failover.go index 4d5cde3858..51526f1926 100644 --- a/pkg/manager/member/pd_failover.go +++ b/pkg/manager/member/pd_failover.go @@ -141,7 +141,7 @@ func (pf *pdFailover) tryToMarkAPeerAsFailure(tc *v1alpha1.TidbCluster) error { } msg := fmt.Sprintf("pd member[%s] is unhealthy", pdMember.ID) - pf.recorder.Event(tc, apiv1.EventTypeWarning, failoverEventReason, fmt.Sprintf(failoverEventMsgPattern, "pd", podName, msg)) + pf.recorder.Event(tc, apiv1.EventTypeWarning, unHealthEventReason, fmt.Sprintf(unHealthEventMsgPattern, "pd", podName, msg)) tc.Status.PD.FailureMembers[podName] = v1alpha1.PDFailureMember{ PodName: podName, diff --git a/pkg/manager/member/pd_failover_test.go b/pkg/manager/member/pd_failover_test.go index 5da10129eb..bbb4696a7d 100644 --- a/pkg/manager/member/pd_failover_test.go +++ b/pkg/manager/member/pd_failover_test.go @@ -275,7 +275,7 @@ func TestPDFailoverFailover(t *testing.T) { events := collectEvents(recorder.Events) g.Expect(events).To(HaveLen(2)) g.Expect(events[0]).To(ContainSubstring("test-pd-1(12891273174085095651) is unhealthy")) - g.Expect(events[1]).To(ContainSubstring("Failover pd is during failover due to test-pd-1 makred as failureMember, msg:pd member[12891273174085095651] is unhealthy")) + g.Expect(events[1]).To(ContainSubstring("Unhealthy pd pod[test-pd-1] is unhealthy, msg:pd member[12891273174085095651] is unhealthy")) }, }, { diff --git a/pkg/manager/member/tidb_failover.go b/pkg/manager/member/tidb_failover.go index 1707558fbe..17df4353ee 100644 --- a/pkg/manager/member/tidb_failover.go +++ b/pkg/manager/member/tidb_failover.go @@ -65,7 +65,7 @@ func (tf *tidbFailover) Failover(tc *v1alpha1.TidbCluster) error { CreatedAt: metav1.Now(), } msg := fmt.Sprintf("tidb[%s] is unhealthy", tidbMember.Name) - tf.recorder.Event(tc, corev1.EventTypeWarning, failoverEventReason, fmt.Sprintf(failoverEventMsgPattern, "tidb", tidbMember.Name, msg)) + tf.recorder.Event(tc, corev1.EventTypeWarning, unHealthEventReason, fmt.Sprintf(unHealthEventMsgPattern, "tidb", tidbMember.Name, msg)) break } } diff --git a/pkg/manager/member/tikv_failover.go b/pkg/manager/member/tikv_failover.go index 44981d6a40..036659a471 100644 --- a/pkg/manager/member/tikv_failover.go +++ b/pkg/manager/member/tikv_failover.go @@ -67,7 +67,7 @@ func (tf *tikvFailover) Failover(tc *v1alpha1.TidbCluster) error { CreatedAt: metav1.Now(), } msg := fmt.Sprintf("store[%s] is Down", store.ID) - tf.recorder.Event(tc, corev1.EventTypeWarning, failoverEventReason, fmt.Sprintf(failoverEventMsgPattern, "tikv", podName, msg)) + tf.recorder.Event(tc, corev1.EventTypeWarning, unHealthEventReason, fmt.Sprintf(unHealthEventMsgPattern, "tikv", podName, msg)) } } } From 8848f8eaaa294bb41b6f0118022ed4f3a44c645c Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Mon, 13 Apr 2020 17:26:47 +0800 Subject: [PATCH 07/11] remove upgrading event --- pkg/manager/member/pd_member_manager.go | 1 - pkg/manager/member/tidb_member_manager.go | 1 - pkg/manager/member/tikv_member_manager.go | 1 - pkg/manager/member/upgrader.go | 5 ----- 4 files changed, 8 deletions(-) diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index e32a21ed06..18fc2f2ce4 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -249,7 +249,6 @@ func (pmm *pdMemberManager) syncPDStatefulSetForTidbCluster(tc *v1alpha1.TidbClu } if !templateEqual(newPDSet, oldPDSet) || tc.Status.PD.Phase == v1alpha1.UpgradePhase { - pmm.recorder.Event(tc, corev1.EventTypeNormal, upgradingEventReason, fmt.Sprintf(upgradingEventMessagePattern, v1alpha1.PDMemberType.String())) if err := pmm.pdUpgrader.Upgrade(tc, oldPDSet, newPDSet); err != nil { return err } diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index b66df9d9fd..803705a5bd 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -206,7 +206,6 @@ func (tmm *tidbMemberManager) syncTiDBStatefulSetForTidbCluster(tc *v1alpha1.Tid } if !templateEqual(newTiDBSet, oldTiDBSet) || tc.Status.TiDB.Phase == v1alpha1.UpgradePhase { - tmm.recorder.Event(tc, corev1.EventTypeNormal, upgradingEventReason, fmt.Sprintf(upgradingEventMessagePattern, "tidb")) if err := tmm.tidbUpgrader.Upgrade(tc, oldTiDBSet, newTiDBSet); err != nil { return err } diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 5f17c54799..8472cc09b1 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -229,7 +229,6 @@ func (tkmm *tikvMemberManager) syncStatefulSetForTidbCluster(tc *v1alpha1.TidbCl } if !templateEqual(newSet, oldSet) || tc.Status.TiKV.Phase == v1alpha1.UpgradePhase { - tkmm.recorder.Event(tc, corev1.EventTypeNormal, upgradingEventReason, fmt.Sprintf(upgradingEventMessagePattern, v1alpha1.PDMemberType.String())) if err := tkmm.tikvUpgrader.Upgrade(tc, oldSet, newSet); err != nil { return err } diff --git a/pkg/manager/member/upgrader.go b/pkg/manager/member/upgrader.go index 9b79bec5a5..33f7a6a47d 100644 --- a/pkg/manager/member/upgrader.go +++ b/pkg/manager/member/upgrader.go @@ -18,11 +18,6 @@ import ( apps "k8s.io/api/apps/v1" ) -const ( - upgradingEventReason = "Upgrading" - upgradingEventMessagePattern = "%s is upgrading" -) - // Upgrader implements the logic for upgrading the tidb cluster. type Upgrader interface { // Upgrade upgrade the cluster From 1da76c08ddbc4ca2471508f463ae9f3efc3a22d9 Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Mon, 13 Apr 2020 21:10:33 +0800 Subject: [PATCH 08/11] remove scaling event --- pkg/manager/member/pd_scaler.go | 21 ++++----------------- pkg/manager/member/scaler.go | 2 -- pkg/manager/member/tikv_scaler.go | 21 ++++----------------- 3 files changed, 8 insertions(+), 36 deletions(-) diff --git a/pkg/manager/member/pd_scaler.go b/pkg/manager/member/pd_scaler.go index 6755f7c30f..20448ba366 100644 --- a/pkg/manager/member/pd_scaler.go +++ b/pkg/manager/member/pd_scaler.go @@ -23,7 +23,6 @@ import ( "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/pdapi" apps "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" "k8s.io/klog" @@ -45,22 +44,10 @@ func NewPDScaler(pdControl pdapi.PDControlInterface, func (psd *pdScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { scaling, _, _, _ := scaleOne(oldSet, newSet) - oldReplicas := *oldSet.Spec.Replicas - targetReplicas := *newSet.Spec.Replicas - if scaling != 0 { - if scaling > 0 { - err := psd.ScaleOut(tc, oldSet, newSet) - if err != nil { - return err - } - } else if scaling < 0 { - err := psd.ScaleIn(tc, oldSet, newSet) - if err != nil { - return err - } - } - psd.recorder.Event(tc, corev1.EventTypeNormal, scalingEventReason, fmt.Sprintf(scalingEventMsgPattern, "pd", oldReplicas, targetReplicas)) - return nil + if scaling > 0 { + return psd.ScaleOut(tc, oldSet, newSet) + } else if scaling < 0 { + return psd.ScaleIn(tc, oldSet, newSet) } return psd.SyncAutoScalerAnn(tc, oldSet) } diff --git a/pkg/manager/member/scaler.go b/pkg/manager/member/scaler.go index 9321b2c477..dda1935c0c 100644 --- a/pkg/manager/member/scaler.go +++ b/pkg/manager/member/scaler.go @@ -34,8 +34,6 @@ const ( skipReasonScalerPVCNotFound = "scaler: pvc is not found" skipReasonScalerAnnIsNil = "scaler: pvc annotations is nil" skipReasonScalerAnnDeferDeletingIsEmpty = "scaler: pvc annotations defer deleting is empty" - scalingEventReason = "Scaling" - scalingEventMsgPattern = "%s is scaling from %d to %d" ) // Scaler implements the logic for scaling out or scaling in the cluster. diff --git a/pkg/manager/member/tikv_scaler.go b/pkg/manager/member/tikv_scaler.go index 72b1afd7a8..2527fdf2cf 100644 --- a/pkg/manager/member/tikv_scaler.go +++ b/pkg/manager/member/tikv_scaler.go @@ -25,7 +25,6 @@ import ( "github.com/pingcap/tidb-operator/pkg/pdapi" "github.com/pingcap/tidb-operator/pkg/util" apps "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" "k8s.io/klog" @@ -48,22 +47,10 @@ func NewTiKVScaler(pdControl pdapi.PDControlInterface, func (tsd *tikvScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { scaling, _, _, _ := scaleOne(oldSet, newSet) - oldReplicas := *oldSet.Spec.Replicas - targetReplicas := *newSet.Spec.Replicas - if scaling != 0 { - if scaling > 0 { - err := tsd.ScaleOut(tc, oldSet, newSet) - if err != nil { - return err - } - } else if scaling < 0 { - err := tsd.ScaleIn(tc, oldSet, newSet) - if err != nil { - return err - } - } - tsd.recorder.Event(tc, corev1.EventTypeNormal, scalingEventReason, fmt.Sprintf(scalingEventMsgPattern, "tikv", oldReplicas, targetReplicas)) - return nil + if scaling > 0 { + return tsd.ScaleOut(tc, oldSet, newSet) + } else if scaling < 0 { + return tsd.ScaleIn(tc, oldSet, newSet) } // we only sync auto scaler annotations when we are finishing syncing scaling return tsd.SyncAutoScalerAnn(tc, oldSet) From ba7b7521ec63495f2a600a6a28a401979ef4c004 Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Mon, 13 Apr 2020 21:13:29 +0800 Subject: [PATCH 09/11] remove unnecessary event --- pkg/controller/tidbcluster/tidb_cluster_controller.go | 4 ++-- pkg/manager/member/pd_scaler.go | 6 ++---- pkg/manager/member/pd_scaler_test.go | 4 +--- pkg/manager/member/scaler.go | 2 -- pkg/manager/member/tikv_scaler.go | 6 ++---- pkg/manager/member/tikv_scaler_test.go | 4 +--- 6 files changed, 8 insertions(+), 18 deletions(-) diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go index bf86409d74..18a5476119 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go @@ -104,8 +104,8 @@ func NewController( secControl := controller.NewRealSecretControl(kubeCli) certControl := controller.NewRealCertControl(kubeCli, csrInformer.Lister(), secControl) typedControl := controller.NewTypedControl(controller.NewRealGenericControl(genericCli, recorder)) - pdScaler := mm.NewPDScaler(pdControl, pvcInformer.Lister(), pvcControl, recorder) - tikvScaler := mm.NewTiKVScaler(pdControl, pvcInformer.Lister(), pvcControl, podInformer.Lister(), recorder) + pdScaler := mm.NewPDScaler(pdControl, pvcInformer.Lister(), pvcControl) + tikvScaler := mm.NewTiKVScaler(pdControl, pvcInformer.Lister(), pvcControl, podInformer.Lister()) pdFailover := mm.NewPDFailover(cli, pdControl, pdFailoverPeriod, podInformer.Lister(), podControl, pvcInformer.Lister(), pvcControl, pvInformer.Lister(), recorder) tikvFailover := mm.NewTiKVFailover(tikvFailoverPeriod, recorder) tidbFailover := mm.NewTiDBFailover(tidbFailoverPeriod, recorder) diff --git a/pkg/manager/member/pd_scaler.go b/pkg/manager/member/pd_scaler.go index 20448ba366..47c11f4f74 100644 --- a/pkg/manager/member/pd_scaler.go +++ b/pkg/manager/member/pd_scaler.go @@ -24,7 +24,6 @@ import ( "github.com/pingcap/tidb-operator/pkg/pdapi" apps "k8s.io/api/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/record" "k8s.io/klog" ) @@ -37,9 +36,8 @@ type pdScaler struct { // NewPDScaler returns a Scaler func NewPDScaler(pdControl pdapi.PDControlInterface, pvcLister corelisters.PersistentVolumeClaimLister, - pvcControl controller.PVCControlInterface, - recorder record.EventRecorder) Scaler { - return &pdScaler{generalScaler{pdControl, pvcLister, pvcControl, recorder}} + pvcControl controller.PVCControlInterface) Scaler { + return &pdScaler{generalScaler{pdControl, pvcLister, pvcControl}} } func (psd *pdScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { diff --git a/pkg/manager/member/pd_scaler_test.go b/pkg/manager/member/pd_scaler_test.go index f89d175d00..ecb4c90788 100644 --- a/pkg/manager/member/pd_scaler_test.go +++ b/pkg/manager/member/pd_scaler_test.go @@ -31,7 +31,6 @@ import ( kubeinformers "k8s.io/client-go/informers" kubefake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" ) func TestPDScalerScaleOut(t *testing.T) { @@ -390,9 +389,8 @@ func newFakePDScaler() (*pdScaler, *pdapi.FakePDControl, cache.Indexer, *control pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims() pdControl := pdapi.NewFakePDControl(kubeCli) pvcControl := controller.NewFakePVCControl(pvcInformer) - recorder := record.NewFakeRecorder(100) - return &pdScaler{generalScaler{pdControl, pvcInformer.Lister(), pvcControl, recorder}}, + return &pdScaler{generalScaler{pdControl, pvcInformer.Lister(), pvcControl}}, pdControl, pvcInformer.Informer().GetIndexer(), pvcControl } diff --git a/pkg/manager/member/scaler.go b/pkg/manager/member/scaler.go index dda1935c0c..14b9bcfe71 100644 --- a/pkg/manager/member/scaler.go +++ b/pkg/manager/member/scaler.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/sets" corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/record" "k8s.io/klog" ) @@ -52,7 +51,6 @@ type generalScaler struct { pdControl pdapi.PDControlInterface pvcLister corelisters.PersistentVolumeClaimLister pvcControl controller.PVCControlInterface - recorder record.EventRecorder } func (gs *generalScaler) deleteDeferDeletingPVC(tc *v1alpha1.TidbCluster, diff --git a/pkg/manager/member/tikv_scaler.go b/pkg/manager/member/tikv_scaler.go index 2527fdf2cf..5522dc68bc 100644 --- a/pkg/manager/member/tikv_scaler.go +++ b/pkg/manager/member/tikv_scaler.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/tidb-operator/pkg/util" apps "k8s.io/api/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/record" "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) @@ -40,9 +39,8 @@ type tikvScaler struct { func NewTiKVScaler(pdControl pdapi.PDControlInterface, pvcLister corelisters.PersistentVolumeClaimLister, pvcControl controller.PVCControlInterface, - podLister corelisters.PodLister, - recorder record.EventRecorder) Scaler { - return &tikvScaler{generalScaler{pdControl, pvcLister, pvcControl, recorder}, podLister} + podLister corelisters.PodLister) Scaler { + return &tikvScaler{generalScaler{pdControl, pvcLister, pvcControl}, podLister} } func (tsd *tikvScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { diff --git a/pkg/manager/member/tikv_scaler_test.go b/pkg/manager/member/tikv_scaler_test.go index 8e00b504aa..02b3921958 100644 --- a/pkg/manager/member/tikv_scaler_test.go +++ b/pkg/manager/member/tikv_scaler_test.go @@ -29,7 +29,6 @@ import ( kubeinformers "k8s.io/client-go/informers" kubefake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" ) func TestTiKVScalerScaleOut(t *testing.T) { @@ -436,8 +435,7 @@ func newFakeTiKVScaler() (*tikvScaler, *pdapi.FakePDControl, cache.Indexer, cach podInformer := kubeInformerFactory.Core().V1().Pods() pdControl := pdapi.NewFakePDControl(kubeCli) pvcControl := controller.NewFakePVCControl(pvcInformer) - recorder := record.NewFakeRecorder(100) - return &tikvScaler{generalScaler{pdControl, pvcInformer.Lister(), pvcControl, recorder}, podInformer.Lister()}, + return &tikvScaler{generalScaler{pdControl, pvcInformer.Lister(), pvcControl}, podInformer.Lister()}, pdControl, pvcInformer.Informer().GetIndexer(), podInformer.Informer().GetIndexer(), pvcControl } From 92866f0ecaa7d1663ab32d63da4c2581f582e87e Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Mon, 13 Apr 2020 21:39:40 +0800 Subject: [PATCH 10/11] remove useless code --- pkg/controller/tidbcluster/tidb_cluster_controller.go | 3 --- pkg/manager/member/pd_member_manager.go | 8 ++------ pkg/manager/member/pd_member_manager_test.go | 3 --- pkg/manager/member/tidb_member_manager.go | 6 +----- pkg/manager/member/tidb_member_manager_test.go | 3 --- pkg/manager/member/tikv_member_manager.go | 6 +----- 6 files changed, 4 insertions(+), 25 deletions(-) diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go index 18a5476119..1c596271f3 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go @@ -135,7 +135,6 @@ func NewController( pdUpgrader, autoFailover, pdFailover, - recorder, ), mm.NewTiKVMemberManager( pdControl, @@ -151,7 +150,6 @@ func NewController( tikvFailover, tikvScaler, tikvUpgrader, - recorder, ), mm.NewTiDBMemberManager( setControl, @@ -165,7 +163,6 @@ func NewController( tidbUpgrader, autoFailover, tidbFailover, - recorder, ), meta.NewReclaimPolicyManager( pvcInformer.Lister(), diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index 18fc2f2ce4..a316d3b53e 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -33,7 +33,6 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/record" "k8s.io/klog" ) @@ -59,7 +58,6 @@ type pdMemberManager struct { pdUpgrader Upgrader autoFailover bool pdFailover Failover - recorder record.EventRecorder } // NewPDMemberManager returns a *pdMemberManager @@ -77,8 +75,7 @@ func NewPDMemberManager(pdControl pdapi.PDControlInterface, pdScaler Scaler, pdUpgrader Upgrader, autoFailover bool, - pdFailover Failover, - recorder record.EventRecorder) manager.Manager { + pdFailover Failover) manager.Manager { return &pdMemberManager{ pdControl, setControl, @@ -94,8 +91,7 @@ func NewPDMemberManager(pdControl pdapi.PDControlInterface, pdScaler, pdUpgrader, autoFailover, - pdFailover, - recorder} + pdFailover} } func (pmm *pdMemberManager) Sync(tc *v1alpha1.TidbCluster) error { diff --git a/pkg/manager/member/pd_member_manager_test.go b/pkg/manager/member/pd_member_manager_test.go index 60ceb5d572..7ba32b8b44 100644 --- a/pkg/manager/member/pd_member_manager_test.go +++ b/pkg/manager/member/pd_member_manager_test.go @@ -38,7 +38,6 @@ import ( kubeinformers "k8s.io/client-go/informers" kubefake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" ) @@ -759,7 +758,6 @@ func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetCont pdFailover := NewFakePDFailover() pdUpgrader := NewFakePDUpgrader() genericControll := controller.NewFakeGenericControl() - recorder := record.NewFakeRecorder(100) return &pdMemberManager{ pdControl, setControl, @@ -776,7 +774,6 @@ func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetCont pdUpgrader, autoFailover, pdFailover, - recorder, }, setControl, svcControl, pdControl, podInformer.Informer().GetIndexer(), pvcInformer.Informer().GetIndexer(), podControl } diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index 803705a5bd..fc74272f13 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/record" "k8s.io/klog" "k8s.io/utils/pointer" ) @@ -69,7 +68,6 @@ type tidbMemberManager struct { autoFailover bool tidbFailover Failover tidbStatefulSetIsUpgradingFn func(corelisters.PodLister, *apps.StatefulSet, *v1alpha1.TidbCluster) (bool, error) - recorder record.EventRecorder } // NewTiDBMemberManager returns a *tidbMemberManager @@ -83,8 +81,7 @@ func NewTiDBMemberManager(setControl controller.StatefulSetControlInterface, podLister corelisters.PodLister, tidbUpgrader Upgrader, autoFailover bool, - tidbFailover Failover, - recorder record.EventRecorder) manager.Manager { + tidbFailover Failover) manager.Manager { return &tidbMemberManager{ setControl: setControl, svcControl: svcControl, @@ -98,7 +95,6 @@ func NewTiDBMemberManager(setControl controller.StatefulSetControlInterface, autoFailover: autoFailover, tidbFailover: tidbFailover, tidbStatefulSetIsUpgradingFn: tidbStatefulSetIsUpgrading, - recorder: recorder, } } diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go index edfa16be0e..ba01f3db38 100644 --- a/pkg/manager/member/tidb_member_manager_test.go +++ b/pkg/manager/member/tidb_member_manager_test.go @@ -38,7 +38,6 @@ import ( kubefake "k8s.io/client-go/kubernetes/fake" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" ) @@ -779,7 +778,6 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet tidbUpgrader := NewFakeTiDBUpgrader() tidbFailover := NewFakeTiDBFailover() tidbControl := controller.NewFakeTiDBControl() - recorder := record.NewFakeRecorder(100) tmm := &tidbMemberManager{ setControl, @@ -795,7 +793,6 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet true, tidbFailover, tidbStatefulSetIsUpgrading, - recorder, } indexers := &fakeIndexers{ pod: podInformer.Informer().GetIndexer(), diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 8472cc09b1..39d1146729 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -35,7 +35,6 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/record" "k8s.io/klog" ) @@ -63,7 +62,6 @@ type tikvMemberManager struct { tikvScaler Scaler tikvUpgrader Upgrader tikvStatefulSetIsUpgradingFn func(corelisters.PodLister, pdapi.PDControlInterface, *apps.StatefulSet, *v1alpha1.TidbCluster) (bool, error) - recorder record.EventRecorder } // NewTiKVMemberManager returns a *tikvMemberManager @@ -80,8 +78,7 @@ func NewTiKVMemberManager( autoFailover bool, tikvFailover Failover, tikvScaler Scaler, - tikvUpgrader Upgrader, - recorder record.EventRecorder) manager.Manager { + tikvUpgrader Upgrader) manager.Manager { kvmm := tikvMemberManager{ pdControl: pdControl, podLister: podLister, @@ -96,7 +93,6 @@ func NewTiKVMemberManager( tikvFailover: tikvFailover, tikvScaler: tikvScaler, tikvUpgrader: tikvUpgrader, - recorder: recorder, } kvmm.tikvStatefulSetIsUpgradingFn = tikvStatefulSetIsUpgrading return &kvmm From d9a2dac20f7a462b59a16674d76c4b1edb7b56d7 Mon Sep 17 00:00:00 2001 From: Song Gao <2695690803@qq.com> Date: Mon, 13 Apr 2020 21:41:44 +0800 Subject: [PATCH 11/11] revert changes --- pkg/manager/member/pd_member_manager_test.go | 1 + pkg/manager/member/tikv_scaler_test.go | 1 + 2 files changed, 2 insertions(+) diff --git a/pkg/manager/member/pd_member_manager_test.go b/pkg/manager/member/pd_member_manager_test.go index 7ba32b8b44..b2d5dc46f7 100644 --- a/pkg/manager/member/pd_member_manager_test.go +++ b/pkg/manager/member/pd_member_manager_test.go @@ -758,6 +758,7 @@ func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetCont pdFailover := NewFakePDFailover() pdUpgrader := NewFakePDUpgrader() genericControll := controller.NewFakeGenericControl() + return &pdMemberManager{ pdControl, setControl, diff --git a/pkg/manager/member/tikv_scaler_test.go b/pkg/manager/member/tikv_scaler_test.go index 02b3921958..a86ea8df6b 100644 --- a/pkg/manager/member/tikv_scaler_test.go +++ b/pkg/manager/member/tikv_scaler_test.go @@ -435,6 +435,7 @@ func newFakeTiKVScaler() (*tikvScaler, *pdapi.FakePDControl, cache.Indexer, cach podInformer := kubeInformerFactory.Core().V1().Pods() pdControl := pdapi.NewFakePDControl(kubeCli) pvcControl := controller.NewFakePVCControl(pvcInformer) + return &tikvScaler{generalScaler{pdControl, pvcInformer.Lister(), pvcControl}, podInformer.Lister()}, pdControl, pvcInformer.Informer().GetIndexer(), podInformer.Informer().GetIndexer(), pvcControl }