forked from actions/actions-runner-controller
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Update unconsumed HRA capacity reservation's expiration more frequent…
…ly and consistently (actions#2502) Co-authored-by: Yusuke Kuoka <ykuoka@gmail.com>
- Loading branch information
Showing
4 changed files
with
313 additions
and
83 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
166 changes: 166 additions & 0 deletions
166
controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale_test.go
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,166 @@ | ||
package actionssummerwindnet | ||
|
||
import ( | ||
"context" | ||
"testing" | ||
"time" | ||
|
||
"github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1" | ||
"github.com/go-logr/logr" | ||
"github.com/stretchr/testify/require" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
) | ||
|
||
func TestPlanBatchScale(t *testing.T) { | ||
s := &batchScaler{Log: logr.Discard()} | ||
|
||
var ( | ||
expiry = 10 * time.Second | ||
interval = 3 * time.Second | ||
|
||
t0 = time.Now() | ||
t1 = t0.Add(interval) | ||
t2 = t1.Add(interval) | ||
) | ||
|
||
check := func(t *testing.T, amount int, newExpiry time.Duration, wantReservations []v1alpha1.CapacityReservation) { | ||
t.Helper() | ||
|
||
var ( | ||
op = batchScaleOperation{ | ||
scaleOps: []scaleOperation{ | ||
{ | ||
log: logr.Discard(), | ||
trigger: v1alpha1.ScaleUpTrigger{ | ||
Amount: amount, | ||
Duration: metav1.Duration{Duration: newExpiry}, | ||
}, | ||
}, | ||
}, | ||
} | ||
|
||
hra = &v1alpha1.HorizontalRunnerAutoscaler{ | ||
Spec: v1alpha1.HorizontalRunnerAutoscalerSpec{ | ||
MaxReplicas: intPtr(1), | ||
ScaleUpTriggers: []v1alpha1.ScaleUpTrigger{ | ||
{ | ||
Amount: 1, | ||
Duration: metav1.Duration{Duration: newExpiry}, | ||
}, | ||
}, | ||
CapacityReservations: []v1alpha1.CapacityReservation{ | ||
{ | ||
EffectiveTime: metav1.NewTime(t0), | ||
ExpirationTime: metav1.NewTime(t0.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
{ | ||
EffectiveTime: metav1.NewTime(t1), | ||
ExpirationTime: metav1.NewTime(t1.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
}, | ||
}, | ||
} | ||
) | ||
|
||
want := hra.DeepCopy() | ||
|
||
want.Spec.CapacityReservations = wantReservations | ||
|
||
got, err := s.planBatchScale(context.Background(), op, hra, t2) | ||
|
||
require.NoError(t, err) | ||
require.Equal(t, want, got) | ||
} | ||
|
||
t.Run("scale up", func(t *testing.T) { | ||
check(t, 1, expiry, []v1alpha1.CapacityReservation{ | ||
{ | ||
// This is kept based on t0 because it falls within maxReplicas | ||
// i.e. the corresponding runner has assumbed to be already deployed. | ||
EffectiveTime: metav1.NewTime(t0), | ||
ExpirationTime: metav1.NewTime(t0.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
{ | ||
// Updated from t1 to t2 due to this exceeded maxReplicas | ||
EffectiveTime: metav1.NewTime(t2), | ||
ExpirationTime: metav1.NewTime(t2.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
{ | ||
// This is based on t2(=now) because it has been added just now. | ||
EffectiveTime: metav1.NewTime(t2), | ||
ExpirationTime: metav1.NewTime(t2.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
}) | ||
}) | ||
|
||
t.Run("scale up reuses previous scale trigger duration for extension", func(t *testing.T) { | ||
newExpiry := expiry + time.Second | ||
check(t, 1, newExpiry, []v1alpha1.CapacityReservation{ | ||
{ | ||
// This is kept based on t0 because it falls within maxReplicas | ||
// i.e. the corresponding runner has assumbed to be already deployed. | ||
EffectiveTime: metav1.NewTime(t0), | ||
ExpirationTime: metav1.NewTime(t0.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
{ | ||
// Updated from t1 to t2 due to this exceeded maxReplicas | ||
EffectiveTime: metav1.NewTime(t2), | ||
ExpirationTime: metav1.NewTime(t2.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
{ | ||
// This is based on t2(=now) because it has been added just now. | ||
EffectiveTime: metav1.NewTime(t2), | ||
ExpirationTime: metav1.NewTime(t2.Add(newExpiry)), | ||
Replicas: 1, | ||
}, | ||
}) | ||
}) | ||
|
||
t.Run("scale down", func(t *testing.T) { | ||
check(t, -1, expiry, []v1alpha1.CapacityReservation{ | ||
{ | ||
// Updated from t1 to t2 due to this exceeded maxReplicas | ||
EffectiveTime: metav1.NewTime(t2), | ||
ExpirationTime: metav1.NewTime(t2.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
}) | ||
}) | ||
|
||
t.Run("scale down is not affected by new scale trigger duration", func(t *testing.T) { | ||
check(t, -1, expiry+time.Second, []v1alpha1.CapacityReservation{ | ||
{ | ||
// Updated from t1 to t2 due to this exceeded maxReplicas | ||
EffectiveTime: metav1.NewTime(t2), | ||
ExpirationTime: metav1.NewTime(t2.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
}) | ||
}) | ||
|
||
// TODO: Keep refreshing the expiry date even when there are no other scale down/up triggers before the expiration | ||
t.Run("extension", func(t *testing.T) { | ||
check(t, 0, expiry, []v1alpha1.CapacityReservation{ | ||
{ | ||
// This is kept based on t0 because it falls within maxReplicas | ||
// i.e. the corresponding runner has assumbed to be already deployed. | ||
EffectiveTime: metav1.NewTime(t0), | ||
ExpirationTime: metav1.NewTime(t0.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
{ | ||
// Updated from t1 to t2 due to this exceeded maxReplicas | ||
EffectiveTime: metav1.NewTime(t2), | ||
ExpirationTime: metav1.NewTime(t2.Add(expiry)), | ||
Replicas: 1, | ||
}, | ||
}) | ||
}) | ||
} |
Oops, something went wrong.