Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove consecutive check #1732

Merged
merged 3 commits into from
Feb 20, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions pkg/autoscaler/autoscaler/autoscaler_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,7 @@ func (am *autoScalerManager) Sync(tac *v1alpha1.TidbClusterAutoScaler) error {
if err != nil {
if errors.IsNotFound(err) {
// Target TidbCluster Ref is deleted, empty the auto-scaling status
emptyAutoScalingCountAnn(tac, v1alpha1.TiDBMemberType)
emptyAutoScalingCountAnn(tac, v1alpha1.TiKVMemberType)
resetAutoScalingAnn(tac)
}
return err
}
Expand Down
17 changes: 1 addition & 16 deletions pkg/autoscaler/autoscaler/tidb_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,13 @@ import (

func (am *autoScalerManager) syncTiDB(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, client promClient.Client) error {
if tac.Spec.TiDB == nil {
emptyAutoScalingCountAnn(tac, v1alpha1.TiDBMemberType)
return nil
}
sts, err := am.stsLister.StatefulSets(tc.Namespace).Get(operatorUtils.GetStatefulSetName(tc, v1alpha1.TiDBMemberType))
if err != nil {
return err
}
if !checkAutoScalingPrerequisites(tc, sts, v1alpha1.TiDBMemberType) {
emptyAutoScalingCountAnn(tac, v1alpha1.TiDBMemberType)
return nil
}
currentReplicas := tc.Spec.TiDB.Replicas
Expand All @@ -46,7 +44,6 @@ func (am *autoScalerManager) syncTiDB(tc *v1alpha1.TidbCluster, tac *v1alpha1.Ti
}
targetReplicas = limitTargetReplicas(targetReplicas, tac, v1alpha1.TiDBMemberType)
if targetReplicas == tc.Spec.TiDB.Replicas {
emptyAutoScalingCountAnn(tac, v1alpha1.TiDBMemberType)
return nil
}
return syncTiDBAfterCalculated(tc, tac, currentReplicas, targetReplicas)
Expand All @@ -56,22 +53,11 @@ func (am *autoScalerManager) syncTiDB(tc *v1alpha1.TidbCluster, tac *v1alpha1.Ti
// duration between each auto-scaling. If either of them is not meet, the auto-scaling would be rejected.
// If the auto-scaling is permitted, the timestamp would be recorded and the Consecutive count would be zeroed.
func syncTiDBAfterCalculated(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, currentReplicas, recommendedReplicas int32) error {
if err := updateConsecutiveCount(tac, v1alpha1.TiDBMemberType, currentReplicas, recommendedReplicas); err != nil {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we just store the count in memory?
During container restart, just re-count them, no need to be persistent now.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is remain to be discussed.

return err
}

ableToScale, err := checkConsecutiveCount(tac, v1alpha1.TiDBMemberType, currentReplicas, recommendedReplicas)
if err != nil {
return err
}
if !ableToScale {
return nil
}
intervalSeconds := tac.Spec.TiDB.ScaleInIntervalSeconds
if recommendedReplicas > currentReplicas {
intervalSeconds = tac.Spec.TiDB.ScaleOutIntervalSeconds
}
ableToScale, err = checkStsAutoScalingInterval(tc, *intervalSeconds, v1alpha1.TiDBMemberType)
ableToScale, err := checkStsAutoScalingInterval(tc, *intervalSeconds, v1alpha1.TiDBMemberType)
if err != nil {
return err
}
Expand All @@ -85,7 +71,6 @@ func syncTiDBAfterCalculated(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbCluster

func updateTcTiDBAnnIfScale(tac *v1alpha1.TidbClusterAutoScaler) {
tac.Annotations[label.AnnTiDBLastAutoScalingTimestamp] = time.Now().String()
emptyAutoScalingCountAnn(tac, v1alpha1.TiDBMemberType)
}

func calculateTidbMetrics(tac *v1alpha1.TidbClusterAutoScaler, sts *appsv1.StatefulSet, client promClient.Client, instances []string) (int32, error) {
Expand Down
115 changes: 0 additions & 115 deletions pkg/autoscaler/autoscaler/tidb_autoscaler_test.go

This file was deleted.

17 changes: 1 addition & 16 deletions pkg/autoscaler/autoscaler/tikv_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,13 @@ import (

func (am *autoScalerManager) syncTiKV(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, client promClient.Client) error {
if tac.Spec.TiKV == nil {
emptyAutoScalingCountAnn(tac, v1alpha1.TiKVMemberType)
return nil
}
sts, err := am.stsLister.StatefulSets(tc.Namespace).Get(operatorUtils.GetStatefulSetName(tc, v1alpha1.TiKVMemberType))
if err != nil {
return err
}
if !checkAutoScalingPrerequisites(tc, sts, v1alpha1.TiKVMemberType) {
emptyAutoScalingCountAnn(tac, v1alpha1.TiKVMemberType)
return nil
}
instances := filterTiKVInstances(tc)
Expand All @@ -46,7 +44,6 @@ func (am *autoScalerManager) syncTiKV(tc *v1alpha1.TidbCluster, tac *v1alpha1.Ti
}
targetReplicas = limitTargetReplicas(targetReplicas, tac, v1alpha1.TiKVMemberType)
if targetReplicas == tc.Spec.TiKV.Replicas {
emptyAutoScalingCountAnn(tac, v1alpha1.TiKVMemberType)
return nil
}
return syncTiKVAfterCalculated(tc, tac, currentReplicas, targetReplicas)
Expand All @@ -58,23 +55,12 @@ func (am *autoScalerManager) syncTiKV(tc *v1alpha1.TidbCluster, tac *v1alpha1.Ti
// The currentReplicas of TiKV calculated in auto-scaling is the count of the StateUp TiKV instance, so we need to
// add the number of other state tikv instance replicas when we update the TidbCluster.Spec.TiKV.Replicas
func syncTiKVAfterCalculated(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, currentReplicas, recommendedReplicas int32) error {
if err := updateConsecutiveCount(tac, v1alpha1.TiKVMemberType, currentReplicas, recommendedReplicas); err != nil {
return err
}

ableToScale, err := checkConsecutiveCount(tac, v1alpha1.TiKVMemberType, currentReplicas, recommendedReplicas)
if err != nil {
return err
}
if !ableToScale {
return nil
}

intervalSeconds := tac.Spec.TiKV.ScaleInIntervalSeconds
if recommendedReplicas > tc.Spec.TiKV.Replicas {
intervalSeconds = tac.Spec.TiKV.ScaleOutIntervalSeconds
}
ableToScale, err = checkStsAutoScalingInterval(tc, *intervalSeconds, v1alpha1.TiKVMemberType)
ableToScale, err := checkStsAutoScalingInterval(tc, *intervalSeconds, v1alpha1.TiKVMemberType)
if err != nil {
return err
}
Expand All @@ -99,7 +85,6 @@ func filterTiKVInstances(tc *v1alpha1.TidbCluster) []string {

func updateTcTiKVAnnIfScale(tac *v1alpha1.TidbClusterAutoScaler) {
tac.Annotations[label.AnnTiKVLastAutoScalingTimestamp] = time.Now().String()
emptyAutoScalingCountAnn(tac, v1alpha1.TiKVMemberType)
}

func calculateTikvMetrics(tac *v1alpha1.TidbClusterAutoScaler, sts *appsv1.StatefulSet, client promClient.Client, instances []string) (int32, error) {
Expand Down
114 changes: 0 additions & 114 deletions pkg/autoscaler/autoscaler/tikv_autoscaler_test.go

This file was deleted.

Loading