Skip to content

Commit

Permalink
Merge pull request kubernetes-sigs#2 from sjenning/sync-upstream
Browse files Browse the repository at this point in the history
pick upstream fixes
  • Loading branch information
aveshagarwal committed Apr 26, 2018
2 parents e42e372 + 3b92d38 commit 065778e
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 49 deletions.
2 changes: 1 addition & 1 deletion pkg/descheduler/strategies/duplicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
glog.V(1).Infof("%#v", creator)
// i = 0 does not evict the first pod
for i := 1; i < len(pods); i++ {
if nodepodCount[node]+1 > maxPodsToEvict {
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
break
}
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
Expand Down
2 changes: 1 addition & 1 deletion pkg/descheduler/strategies/lownodeutilization.go
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ func evictPods(inputPods []*v1.Pod,
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCpu > 0 || *totalMem > 0) {
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
for _, pod := range inputPods {
if *podsEvicted+1 > maxPodsToEvict {
if maxPodsToEvict > 0 && *podsEvicted+1 > maxPodsToEvict {
break
}
cUsage := helper.GetResourceRequest(pod, v1.ResourceCPU)
Expand Down
8 changes: 4 additions & 4 deletions pkg/descheduler/strategies/node_affinity.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@ import (
)

func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
evictionCount := removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
glog.V(1).Infof("Evicted %v pods", evictionCount)
removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
}

func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
Expand All @@ -51,14 +50,14 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
}

for _, pod := range pods {
if nodepodCount[node]+1 > maxPodsToEvict {
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
break
}
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {

if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
glog.V(1).Infof("Evicting pod: %v", pod.Name)
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, false)
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, ds.DryRun)
nodepodCount[node]++
}
}
Expand All @@ -70,5 +69,6 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
return evictedPodCount
}
}
glog.V(1).Infof("Evicted %v pods", evictedPodCount)
return evictedPodCount
}
8 changes: 4 additions & 4 deletions pkg/descheduler/strategies/node_affinity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,16 +143,16 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 1,
maxPodsToEvict: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 0, should not be evicted",
expectedEvictedPodCount: 0,
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
maxPodsToEvict: 1,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
Expand Down
2 changes: 1 addition & 1 deletion pkg/descheduler/strategies/pod_antiaffinity.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
}
totalPods := len(pods)
for i := 0; i < totalPods; i++ {
if nodePodCount[node]+1 > maxPodsToEvict {
if maxPodsToEvict > 0 && nodePodCount[node]+1 > maxPodsToEvict {
break
}
if checkPodsWithAntiAffinityExist(pods[i], pods) {
Expand Down
71 changes: 33 additions & 38 deletions pkg/descheduler/strategies/pod_antiaffinity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,29 +32,43 @@ func TestPodAntiAffinity(t *testing.T) {
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
p3.Labels = map[string]string{"foo": "bar"}
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
p2.Labels = map[string]string{"foo": "bar"}
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p1.Spec.Affinity = &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"bar"},
},
},
},
TopologyKey: "region",
},
},
},
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()

// set pod anti affinity
setPodAntiAffinity(p1)
setPodAntiAffinity(p3)
setPodAntiAffinity(p4)

// create fake client
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
npe := nodePodEvictedCount{}
npe[node] = 0
expectedEvictedPodCount := 3
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
}
p3.Spec.Affinity = &v1.Affinity{
npe[node] = 0
expectedEvictedPodCount = 1
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
}
}

func setPodAntiAffinity(inputPod *v1.Pod) {
inputPod.Spec.Affinity = &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
Expand All @@ -72,23 +86,4 @@ func TestPodAntiAffinity(t *testing.T) {
},
},
}
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3}}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
npe := nodePodEvictedCount{}
npe[node] = 0
expectedEvictedPodCount := 0
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted")
}
expectedEvictedPodCount = 1
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted")
}
}

0 comments on commit 065778e

Please sign in to comment.