Skip to content

Commit

Permalink
Merge branch 'kubernetes-sigs:master' into feat-return-node-fit-error
Browse files Browse the repository at this point in the history
  • Loading branch information
fanhaouu authored Jun 23, 2024
2 parents a2e871f + cdbd101 commit 666bf4d
Show file tree
Hide file tree
Showing 17 changed files with 243 additions and 66 deletions.
4 changes: 2 additions & 2 deletions charts/descheduler/Chart.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: v1
name: descheduler
version: 0.30.0
appVersion: 0.30.0
version: 0.30.1
appVersion: 0.30.1
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords:
- kubernetes
Expand Down
6 changes: 3 additions & 3 deletions docs/deprecated/v1alpha1.md
Original file line number Diff line number Diff line change
Expand Up @@ -109,17 +109,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku

Run As A Job
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.30.0' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.30.1' | kubectl apply -f -
```

Run As A CronJob
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.30.0' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.30.1' | kubectl apply -f -
```

Run As A Deployment
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.30.0' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.30.1' | kubectl apply -f -
```

## User Guide
Expand Down
1 change: 1 addition & 0 deletions docs/user-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ Starting with descheduler release v0.10.0 container images are available in the

Descheduler Version | Container Image | Architectures |
------------------- |-------------------------------------------------|-------------------------|
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
Expand Down
2 changes: 1 addition & 1 deletion kubernetes/cronjob/cronjob.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.30.0
image: registry.k8s.io/descheduler/descheduler:v0.30.1
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
Expand Down
2 changes: 1 addition & 1 deletion kubernetes/deployment/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.30.0
image: registry.k8s.io/descheduler/descheduler:v0.30.1
imagePullPolicy: IfNotPresent
command:
- "/bin/descheduler"
Expand Down
2 changes: 1 addition & 1 deletion kubernetes/job/job.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.30.0
image: registry.k8s.io/descheduler/descheduler:v0.30.1
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
Expand Down
7 changes: 3 additions & 4 deletions pkg/descheduler/descheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ type descheduler struct {
eventRecorder events.EventRecorder
}

func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
Expand Down Expand Up @@ -388,7 +388,6 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
defer span.End()

sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()

var nodeSelector string
if deschedulerPolicy.NodeSelector != nil {
Expand All @@ -404,7 +403,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown()

descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
descheduler, err := newDescheduler(rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
if err != nil {
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
return err
Expand All @@ -419,7 +418,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
// A next context is created here intentionally to avoid nesting the spans via context.
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
defer sSpan.End()
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, nodeLister, nodeSelector)
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.nodeLister, nodeSelector)
if err != nil {
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)
Expand Down
48 changes: 48 additions & 0 deletions pkg/framework/plugins/removeduplicates/validation_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
package removeduplicates

import (
"testing"

"sigs.k8s.io/descheduler/pkg/api"
)

func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
testCases := []struct {
description string
args *RemoveDuplicatesArgs
expectError bool
}{
{
description: "valid namespace args, no errors",
args: &RemoveDuplicatesArgs{
ExcludeOwnerKinds: []string{"Job"},
Namespaces: &api.Namespaces{
Include: []string{"default"},
},
},
expectError: false,
},
{
description: "invalid namespaces args, expects error",
args: &RemoveDuplicatesArgs{
ExcludeOwnerKinds: []string{"Job"},
Namespaces: &api.Namespaces{
Include: []string{"default"},
Exclude: []string{"kube-system"},
},
},
expectError: true,
},
}

for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemoveDuplicatesArgs(tc.args)

hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}
72 changes: 72 additions & 0 deletions pkg/framework/plugins/removefailedpods/validation_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
package removefailedpods

import (
"testing"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
)

func TestValidateRemoveFailedPodsArgs(t *testing.T) {
var oneHourPodLifetimeSeconds uint = 3600
testCases := []struct {
description string
args *RemoveFailedPodsArgs
expectError bool
}{
{
description: "valid namespace args, no errors",
args: &RemoveFailedPodsArgs{
Namespaces: &api.Namespaces{
Include: []string{"default"},
},
ExcludeOwnerKinds: []string{"Job"},
Reasons: []string{"ReasonDoesNotMatch"},
MinPodLifetimeSeconds: &oneHourPodLifetimeSeconds,
},
expectError: false,
},
{
description: "invalid namespaces args, expects error",
args: &RemoveFailedPodsArgs{
Namespaces: &api.Namespaces{
Include: []string{"default"},
Exclude: []string{"kube-system"},
},
},
expectError: true,
},
{
description: "valid label selector args, no errors",
args: &RemoveFailedPodsArgs{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
},
},
expectError: false,
},
{
description: "invalid label selector args, expects errors",
args: &RemoveFailedPodsArgs{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Operator: metav1.LabelSelectorOpIn,
},
},
},
},
expectError: true,
},
}

for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemoveFailedPodsArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
package removepodsviolatinginterpodantiaffinity

import (
"testing"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
)

func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
testCases := []struct {
description string
args *RemovePodsViolatingInterPodAntiAffinityArgs
expectError bool
}{
{
description: "valid namespace args, no errors",
args: &RemovePodsViolatingInterPodAntiAffinityArgs{
Namespaces: &api.Namespaces{
Include: []string{"default"},
},
},
expectError: false,
},
{
description: "invalid namespaces args, expects error",
args: &RemovePodsViolatingInterPodAntiAffinityArgs{
Namespaces: &api.Namespaces{
Include: []string{"default"},
Exclude: []string{"kube-system"},
},
},
expectError: true,
},
{
description: "valid label selector args, no errors",
args: &RemovePodsViolatingInterPodAntiAffinityArgs{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
},
},
expectError: false,
},
{
description: "invalid label selector args, expects errors",
args: &RemovePodsViolatingInterPodAntiAffinityArgs{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Operator: metav1.LabelSelectorOpIn,
},
},
},
},
expectError: true,
},
}

for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemovePodsViolatingInterPodAntiAffinityArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}
4 changes: 4 additions & 0 deletions pkg/utils/qos.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ func isSupportedQoSComputeResource(name v1.ResourceName) bool {
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
if len(pod.Status.QOSClass) != 0 {
return pod.Status.QOSClass
}

requests := v1.ResourceList{}
limits := v1.ResourceList{}
zeroQuantity := resource.MustParse("0")
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/e2e_duplicatepods_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@ import (
func TestRemoveDuplicates(t *testing.T) {
ctx := context.Background()

clientSet, sharedInformerFactory, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
clientSet, sharedInformerFactory, _, getPodsAssignedToNode := initializeClient(ctx, t)

nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
Expand Down
5 changes: 2 additions & 3 deletions test/e2e/e2e_failedpods_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ var oneHourPodLifetimeSeconds uint = 3600

func TestFailedPods(t *testing.T) {
ctx := context.Background()
clientSet, sharedInformerFactory, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
clientSet, sharedInformerFactory, _, getPodsAssignedToNode := initializeClient(ctx, t)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
Expand Down Expand Up @@ -154,7 +153,7 @@ func initFailedJob(name, namespace string) *batchv1.Job {

func waitForJobPodPhase(ctx context.Context, t *testing.T, clientSet clientset.Interface, job *batchv1.Job, phase v1.PodPhase) {
podClient := clientSet.CoreV1().Pods(job.Namespace)
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) {
t.Log(labels.FormatLabels(job.Labels))
if podList, err := podClient.List(ctx, metav1.ListOptions{LabelSelector: labels.FormatLabels(job.Labels)}); err != nil {
return false, err
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/e2e_leaderelection_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@ func TestLeaderElection(t *testing.T) {
descheduler.SetupPlugins()
ctx := context.Background()

clientSet, _, _, _, stopCh := initializeClient(t)
defer close(stopCh)
clientSet, _, _, _ := initializeClient(ctx, t)

ns1 := "e2e-" + strings.ToLower(t.Name()+"-a")
ns2 := "e2e-" + strings.ToLower(t.Name()+"-b")
Expand Down
Loading

0 comments on commit 666bf4d

Please sign in to comment.