Skip to content

Commit

Permalink
Merge pull request #7236 from damikag/priority-evictor-ds
Browse files Browse the repository at this point in the history
Make ds pods eviction best effort when draining empty nodes(nodes wit…
  • Loading branch information
k8s-ci-robot committed Sep 4, 2024
2 parents bc49822 + da222a3 commit 386f0f7
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 76 deletions.
7 changes: 2 additions & 5 deletions cluster-autoscaler/core/scaledown/actuation/drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,14 +85,11 @@ func (e Evictor) DrainNode(ctx *acontext.AutoscalingContext, nodeInfo *framework
return e.drainNodeWithPodsBasedOnPodPriority(ctx, node, pods, dsPods)
}

// EvictDaemonSetPods groups daemonSet pods in the node in to priority groups and, evicts daemonSet pods in the ascending order of priorities.
// If priority evictor is not enable, eviction of daemonSet pods is the best effort.
// EvictDaemonSetPods creates eviction objects for all DaemonSet pods on the node.
// Eviction of DaemonSet pods are best effort. Does not wait for evictions to finish.
func (e Evictor) EvictDaemonSetPods(ctx *acontext.AutoscalingContext, nodeInfo *framework.NodeInfo) (map[string]status.PodEvictionResult, error) {
node := nodeInfo.Node()
dsPods, _ := podsToEvict(nodeInfo, ctx.DaemonSetEvictionForEmptyNodes)
if e.fullDsEviction {
return e.drainNodeWithPodsBasedOnPodPriority(ctx, node, dsPods, nil)
}
return e.drainNodeWithPodsBasedOnPodPriority(ctx, node, nil, dsPods)
}

Expand Down
72 changes: 1 addition & 71 deletions cluster-autoscaler/core/scaledown/actuation/drain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ import (
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/types"
)

Expand All @@ -57,7 +56,6 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
evictByDefault bool
extraAnnotationValue map[string]string
expectNotEvicted map[string]struct{}
fullDsEviction bool
podPriorities []int32
}{
{
Expand All @@ -84,57 +82,6 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
extraAnnotationValue: map[string]string{"d1": "false"},
expectNotEvicted: map[string]struct{}{"d1": {}},
},
{
name: "Failed to create DaemonSet eviction",
dsPods: []string{"d1", "d2"},
dsEvictionTimeout: 5000 * time.Millisecond,
evictionSuccess: false,
err: fmt.Errorf("Failed to drain node /n1, due to following errors"),
evictByDefault: true,
fullDsEviction: true,
podPriorities: []int32{0, 1000},
},
{
name: "Eviction timeout exceed",
dsPods: []string{"d1", "d2", "d3"},
evictionTimeoutExceed: true,
dsEvictionTimeout: 100 * time.Millisecond,
evictionSuccess: false,
err: fmt.Errorf("Failed to drain node /n1, due to following errors"),
evictByDefault: true,
fullDsEviction: true,
podPriorities: []int32{0, 1000, 2000},
},
{
name: "Successful attempt to evict DaemonSet pods",
dsPods: []string{"d1", "d2"},
dsEvictionTimeout: 5000 * time.Millisecond,
evictionSuccess: true,
evictByDefault: true,
fullDsEviction: true,
podPriorities: []int32{0, 1000},
},
{
name: "Evict single pod due to annotation",
dsPods: []string{"d1", "d2"},
dsEvictionTimeout: 5000 * time.Millisecond,
evictionSuccess: true,
extraAnnotationValue: map[string]string{"d1": "true"},
expectNotEvicted: map[string]struct{}{"d2": {}},
fullDsEviction: true,
podPriorities: []int32{0, 1000},
},
{
name: "Don't evict single pod due to annotation",
dsPods: []string{"d1", "d2"},
dsEvictionTimeout: 5000 * time.Millisecond,
evictionSuccess: true,
evictByDefault: true,
extraAnnotationValue: map[string]string{"d1": "false"},
expectNotEvicted: map[string]struct{}{"d1": {}},
fullDsEviction: true,
podPriorities: []int32{0, 1000},
},
}

for _, scenario := range testScenarios {
Expand All @@ -160,9 +107,6 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
for i, dsName := range scenario.dsPods {
ds := BuildTestPod(dsName, 100, 0, WithDSController())
ds.Spec.NodeName = "n1"
if scenario.fullDsEviction {
ds.Spec.Priority = &scenario.podPriorities[i]
}
if v, ok := scenario.extraAnnotationValue[dsName]; ok {
ds.Annotations[daemonset.EnableDsEvictionKey] = v
}
Expand Down Expand Up @@ -198,19 +142,9 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
clustersnapshot.InitializeClusterSnapshotOrDie(t, context.ClusterSnapshot, []*apiv1.Node{n1}, dsPods)

drainConfig := SingleRuleDrainConfig(context.MaxGracefulTerminationSec)
if scenario.fullDsEviction {
drainConfig = []kubelet_config.ShutdownGracePeriodByPodPriority{}
for _, priority := range scenario.podPriorities {
drainConfig = append(drainConfig, kubelet_config.ShutdownGracePeriodByPodPriority{
Priority: priority,
ShutdownGracePeriodSeconds: int64(context.MaxGracefulTerminationSec),
})
}
}
evictor := Evictor{
EvictionRetryTime: waitBetweenRetries,
shutdownGracePeriodByPodPriority: drainConfig,
fullDsEviction: scenario.fullDsEviction,
}
nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(n1.Name)
assert.NoError(t, err)
Expand All @@ -232,11 +166,7 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
for i := 0; i < len(expectEvicted); i++ {
deleted[i] = utils.GetStringFromChan(deletedPods)
}
if scenario.fullDsEviction {
assert.Equal(t, expectEvicted, deleted)
} else {
assert.ElementsMatch(t, deleted, expectEvicted)
}
assert.ElementsMatch(t, deleted, expectEvicted)
})
}
}
Expand Down

0 comments on commit 386f0f7

Please sign in to comment.