Skip to content

Commit

Permalink
VPA: Add unit tests for pruning grace period and containersPerPod field
Browse files Browse the repository at this point in the history
Signed-off-by: Max Cao <macao@redhat.com>
  • Loading branch information
maxcao13 committed Jan 24, 2025
1 parent 24c4eba commit 9a40688
Show file tree
Hide file tree
Showing 2 changed files with 218 additions and 1 deletion.
126 changes: 126 additions & 0 deletions vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
controllerfetcher "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
)

var (
Expand Down Expand Up @@ -453,6 +454,41 @@ func TestUpdatePodSelector(t *testing.T) {
assert.Contains(t, vpa.aggregateContainerStates, cluster.aggregateStateKeyForContainerID(testContainerID))
}

// Creates a VPA without a pruning grace period annotation along with a matching pod and container.
// Then add a VPA pruningGracePeriod annotation such that the related containerAggregateState would
// now be considered stale. Finally, update the VPA to a long grace period to make the aggregate
// active again. Verifies that stale aggregates can be pruned by applying and removing the annotation.
func TestUpdatePruningGracePeriod(t *testing.T) {
shortGracePeriodAnnotations := vpaAnnotationsMap{vpa_api_util.VpaPruningGracePeriodAnnotation: "0"}
longGracePeriodAnnotations := vpaAnnotationsMap{vpa_api_util.VpaPruningGracePeriodAnnotation: "1000000"}
now := time.Now()

cluster := NewClusterState(testGcPeriod)
vpa := addTestVpa(cluster)
addTestPod(cluster)
addTestContainer(t, cluster)
testAggregateStateKey := cluster.aggregateStateKeyForContainerID(testContainerID)

// Make sure aggregate was created.
aggregateContainerState, aggregateStateExists := cluster.aggregateStateMap[testAggregateStateKey]
assert.True(t, aggregateStateExists)
assert.Contains(t, vpa.aggregateContainerStates, testAggregateStateKey)
// Check that the aggregate is not stale.
assert.False(t, aggregateContainerState.IsAggregateStale(now))

vpa_object := test.VerticalPodAutoscaler().WithNamespace(testVpaID.Namespace).
WithName(testVpaID.VpaName).WithContainer(testContainerID.ContainerName).WithAnnotations(shortGracePeriodAnnotations).WithTargetRef(testTargetRef).Get()
addVpaObject(cluster, testVpaID, vpa_object, testSelectorStr)
// Check that the aggregate is stale.
assert.True(t, aggregateContainerState.IsAggregateStale(now))

vpa_object = test.VerticalPodAutoscaler().WithNamespace(testVpaID.Namespace).
WithName(testVpaID.VpaName).WithContainer(testContainerID.ContainerName).WithAnnotations(longGracePeriodAnnotations).WithTargetRef(testTargetRef).Get()
addVpaObject(cluster, testVpaID, vpa_object, testSelectorStr)
// Check that the aggregate is not stale.
assert.False(t, aggregateContainerState.IsAggregateStale(now))
}

// Test setting ResourcePolicy and UpdatePolicy on adding or updating VPA object
func TestAddOrUpdateVPAPolicies(t *testing.T) {
testVpaBuilder := test.VerticalPodAutoscaler().WithName(testVpaID.VpaName).
Expand Down Expand Up @@ -943,3 +979,93 @@ func TestVPAWithMatchingPods(t *testing.T) {
})
}
}

func TestSetVPAContainersPerPod(t *testing.T) {
cases := []struct {
name string
containers []ContainerID
expectedContainersPerPod int
}{
{
name: "Single container",
containers: []ContainerID{
{testPodID, "container-1"},
},
expectedContainersPerPod: 1,
}, {
name: "Two containers",
containers: []ContainerID{
{testPodID, "container-1"},
{testPodID, "container-2"},
},
expectedContainersPerPod: 2,
}, {
name: "Three containers",
containers: []ContainerID{
{testPodID, "container-1"},
{testPodID, "container-2"},
{testPodID, "container-3"},
},
expectedContainersPerPod: 3,
},
}

for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
cluster := NewClusterState(testGcPeriod)
vpa := addTestVpa(cluster)
pod := addTestPod(cluster)
for _, container := range tc.containers {
assert.NoError(t, cluster.AddOrUpdateContainer(container, testRequest))
}
cluster.SetVPAContainersPerPod(pod, true)
assert.Equal(t, tc.expectedContainersPerPod, vpa.ContainersPerPod)
})
}
}

func TestSetVPAContainersPerPodIsHighWatermark(t *testing.T) {
cluster := NewClusterState(testGcPeriod)
vpa := addTestVpa(cluster)
pod := addTestPod(cluster)
containers := []ContainerID{
{testPodID, "container-1"},
{testPodID, "container-2"},
{testPodID, "container-3"},
}
expectedLen := len(containers)
for _, container := range containers {
assert.NoError(t, cluster.AddOrUpdateContainer(container, testRequest))
}
cluster.SetVPAContainersPerPod(pod, true)
assert.Equal(t, len(containers), vpa.ContainersPerPod)

// It's possible we could be in the middle of a rollout, and the new ReplicaSet could contain pods could have 2 containers.
// We should not update the container per pod (high watermark) in this case to preserve the recommendation splitting.
cluster.AddOrUpdatePod(testPodID3, testLabels, apiv1.PodRunning)
pod3 := cluster.Pods[testPodID3]
containers = []ContainerID{
{testPodID3, "container-1"},
{testPodID3, "container-2"},
}
for _, container := range containers {
assert.NoError(t, cluster.AddOrUpdateContainer(container, testRequest))
}
cluster.SetVPAContainersPerPod(pod3, true)
assert.Equal(t, expectedLen, vpa.ContainersPerPod)

// But if we add more than the high watermark, we should update it.
cluster.AddOrUpdatePod(testPodID4, testLabels, apiv1.PodRunning)
pod4 := cluster.Pods[testPodID4]
containers = []ContainerID{
{testPodID4, "container-1"},
{testPodID4, "container-2"},
{testPodID4, "container-3"},
{testPodID4, "container-4"},
}
for _, container := range containers {
assert.NoError(t, cluster.AddOrUpdateContainer(container, testRequest))
}
cluster.SetVPAContainersPerPod(pod4, true)
assert.Equal(t, len(containers), vpa.ContainersPerPod)
}
93 changes: 92 additions & 1 deletion vertical-pod-autoscaler/pkg/recommender/model/vpa_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (

var (
anyTime = time.Unix(0, 0)
// TODO(maxcao13): write tests for the pruningGracePeriod field
)

func TestMergeAggregateContainerState(t *testing.T) {
Expand Down Expand Up @@ -435,6 +434,98 @@ func TestDeleteAggregation(t *testing.T) {
}
}

func TestDeleteAllAggregatesByContainerName(t *testing.T) {
cases := []struct {
name string
toDelete string
aggregateContainerStates aggregateContainerStatesMap
initialContainerAggregates ContainerNameToAggregateStateMap
expectedAggregatesLeft int
expectedInitialAggregatesLeft int
}{
{
name: "Deletes all aggregates since it's the only container name",
toDelete: "container",
aggregateContainerStates: aggregateContainerStatesMap{
aggregateStateKey{
namespace: "ns",
containerName: "container",
labelSetKey: "labelSetKey",
labelSetMap: nil,
}: &AggregateContainerState{},
aggregateStateKey{
namespace: "ns",
containerName: "container",
labelSetKey: "labelSetKey2",
labelSetMap: nil,
}: &AggregateContainerState{},
},
initialContainerAggregates: ContainerNameToAggregateStateMap{
"container": &AggregateContainerState{},
},
expectedAggregatesLeft: 0,
expectedInitialAggregatesLeft: 0,
},
{
name: "Deletes only the aggregates with the specified container name",
toDelete: "container",
aggregateContainerStates: aggregateContainerStatesMap{
aggregateStateKey{
namespace: "ns",
containerName: "container",
labelSetKey: "labelSetKey",
labelSetMap: nil,
}: &AggregateContainerState{},
aggregateStateKey{
namespace: "ns",
containerName: "container2",
labelSetKey: "labelSetKey2",
labelSetMap: nil,
}: &AggregateContainerState{},
},
initialContainerAggregates: ContainerNameToAggregateStateMap{
"container": &AggregateContainerState{},
"container2": &AggregateContainerState{},
},
expectedAggregatesLeft: 1,
expectedInitialAggregatesLeft: 1,
},
{
name: "Deletes none of the aggregates since the container name doesn't match",
toDelete: "nonexistent",
aggregateContainerStates: aggregateContainerStatesMap{
aggregateStateKey{
namespace: "ns",
containerName: "container",
labelSetKey: "labelSetKey",
labelSetMap: nil,
}: &AggregateContainerState{},
aggregateStateKey{
namespace: "ns",
containerName: "container2",
labelSetKey: "labelSetKey2",
labelSetMap: nil,
}: &AggregateContainerState{},
},
initialContainerAggregates: ContainerNameToAggregateStateMap{
"container": &AggregateContainerState{},
"container2": &AggregateContainerState{},
},
expectedAggregatesLeft: 2,
expectedInitialAggregatesLeft: 2,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
vpa := Vpa{
aggregateContainerStates: tc.aggregateContainerStates,
}
vpa.DeleteAllAggregatesByContainerName(tc.toDelete)
assert.Equal(t, tc.expectedAggregatesLeft, len(vpa.aggregateContainerStates))
})
}
}

func TestSetResourcePolicy(t *testing.T) {
scalingModeAuto := vpa_types.ContainerScalingModeAuto
scalingModeOff := vpa_types.ContainerScalingModeOff
Expand Down

0 comments on commit 9a40688

Please sign in to comment.