From 4da18623e4efbb4a49ce500d6e4d504cd17cebc6 Mon Sep 17 00:00:00 2001 From: Mike Dame Date: Thu, 11 Mar 2021 10:04:59 -0500 Subject: [PATCH] (TopologySpread) Evict pods with selectors that match multiple nodes --- .../strategies/topologyspreadconstraint.go | 6 +-- .../topologyspreadconstraint_test.go | 50 +++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/pkg/descheduler/strategies/topologyspreadconstraint.go b/pkg/descheduler/strategies/topologyspreadconstraint.go index 52147425a5..bda97f1b20 100644 --- a/pkg/descheduler/strategies/topologyspreadconstraint.go +++ b/pkg/descheduler/strategies/topologyspreadconstraint.go @@ -289,11 +289,11 @@ func balanceDomains( // however we still account for it "being evicted" so the algorithm can complete // TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod // but still try to get the required # of movePods (instead of just chopping that value off the slice above) - if aboveToEvict[k].Spec.NodeSelector != nil || + if (aboveToEvict[k].Spec.NodeSelector != nil || (aboveToEvict[k].Spec.Affinity != nil && aboveToEvict[k].Spec.Affinity.NodeAffinity != nil && - aboveToEvict[k].Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil && - nodesPodFitsOnBesidesCurrent(aboveToEvict[k], nodeMap) == 0) { + aboveToEvict[k].Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil)) && + nodesPodFitsOnBesidesCurrent(aboveToEvict[k], nodeMap) == 0 { klog.V(2).InfoS("Ignoring pod for eviction due to node selector/affinity", "pod", klog.KObj(aboveToEvict[k])) continue } diff --git a/pkg/descheduler/strategies/topologyspreadconstraint_test.go b/pkg/descheduler/strategies/topologyspreadconstraint_test.go index 93bc6d931c..f8c3370335 100644 --- a/pkg/descheduler/strategies/topologyspreadconstraint_test.go +++ b/pkg/descheduler/strategies/topologyspreadconstraint_test.go @@ -281,6 +281,56 @@ func TestTopologySpreadConstraint(t *testing.T) { strategy: api.DeschedulerStrategy{}, namespaces: []string{"ns1"}, }, + { + name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes", + nodes: []*v1.Node{ + test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { + n.Labels["zone"] = "zoneA" + n.Labels["region"] = "boston" + }), + test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { + n.Labels["zone"] = "zoneB" + n.Labels["region"] = "boston" + }), + }, + pods: createTestPods([]testPodList{ + { + count: 1, + node: "n1", + labels: map[string]string{"foo": "bar"}, + constraints: []v1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "zone", + WhenUnsatisfiable: v1.DoNotSchedule, + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + nodeSelector: map[string]string{"region": "boston"}, + }, + { + count: 1, + node: "n1", + labels: map[string]string{"foo": "bar"}, + nodeSelector: map[string]string{"region": "boston"}, + }, + { + count: 1, + node: "n1", + labels: map[string]string{"foo": "bar"}, + nodeSelector: map[string]string{"region": "boston"}, + }, + { + count: 1, + node: "n1", + labels: map[string]string{"foo": "bar"}, + nodeSelector: map[string]string{"region": "boston"}, + }, + }), + expectedEvictedCount: 2, + strategy: api.DeschedulerStrategy{}, + namespaces: []string{"ns1"}, + }, { name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]", nodes: []*v1.Node{