diff --git a/pkg/util/testing/wrappers.go b/pkg/util/testing/wrappers.go index b8a8aca1ba..6df5edbe13 100644 --- a/pkg/util/testing/wrappers.go +++ b/pkg/util/testing/wrappers.go @@ -199,6 +199,11 @@ func (w *WorkloadWrapper) RuntimeClass(name string) *WorkloadWrapper { return w } +func (w *WorkloadWrapper) Priority(priority *int32) *WorkloadWrapper { + w.Spec.Priority = priority + return w +} + // AdmissionWrapper wraps an Admission type AdmissionWrapper struct{ kueue.Admission } diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index ab6ada1337..6fcbcf5799 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -462,8 +462,7 @@ var _ = ginkgo.Describe("Scheduler", func() { devBEClusterQ := testing.MakeClusterQueue("dev-be-cq"). Cohort("be"). Resource(testing.MakeResource(corev1.ResourceCPU). - Flavor(testing.MakeFlavor(onDemandFlavor.Name, - "5").Max("5").Obj()). + Flavor(testing.MakeFlavor(onDemandFlavor.Name, "5").Max("5").Obj()). Obj()). Obj() gomega.Expect(k8sClient.Create(ctx, devBEClusterQ)).Should(gomega.Succeed()) @@ -596,4 +595,41 @@ var _ = ginkgo.Describe("Scheduler", func() { framework.ExpectWorkloadsToBeAdmitted(ctx, k8sClient, prodBEClusterQ.Name, wl1) framework.ExpectWorkloadsToBeAdmitted(ctx, k8sClient, devBEClusterQ.Name, wl2) }) + + ginkgo.It("Should schedule workloads by their priority strictly in StrictFIFO", func() { + strictFIFOClusterQ := testing.MakeClusterQueue("strict-fifo-cq"). + QueueingStrategy(kueue.StrictFIFO). + Resource(testing.MakeResource(corev1.ResourceCPU). + Flavor(testing.MakeFlavor(onDemandFlavor.Name, + "5").Max("5").Obj()). + Obj()). + Obj() + gomega.Expect(k8sClient.Create(ctx, strictFIFOClusterQ)).Should(gomega.Succeed()) + defer func() { + gomega.Expect(framework.DeleteClusterQueue(ctx, k8sClient, strictFIFOClusterQ)).Should(gomega.Succeed()) + }() + + strictFIFOQueue := testing.MakeQueue("strict-fifo-q", ns.Name).ClusterQueue(strictFIFOClusterQ.Name).Obj() + ginkgo.By("Creating workloads") + wl1 := testing.MakeWorkload("wl1", ns.Name).Queue(strictFIFOQueue. + Name).Request(corev1.ResourceCPU, "2").Priority(pointer.Int32(100)).Obj() + gomega.Expect(k8sClient.Create(ctx, wl1)).Should(gomega.Succeed()) + wl2 := testing.MakeWorkload("wl2", ns.Name).Queue(strictFIFOQueue. + Name).Request(corev1.ResourceCPU, "5").Priority(pointer.Int32(10)).Obj() + gomega.Expect(k8sClient.Create(ctx, wl2)).Should(gomega.Succeed()) + // wl3 can't be scheduled before wl2 even though there is enough quota. + wl3 := testing.MakeWorkload("wl3", ns.Name).Queue(strictFIFOQueue. + Name).Request(corev1.ResourceCPU, "1").Priority(pointer.Int32(1)).Obj() + gomega.Expect(k8sClient.Create(ctx, wl3)).Should(gomega.Succeed()) + + gomega.Expect(k8sClient.Create(ctx, strictFIFOQueue)).Should(gomega.Succeed()) + + framework.ExpectWorkloadsToBeAdmitted(ctx, k8sClient, strictFIFOClusterQ.Name, wl1) + framework.ExpectWorkloadsToBePending(ctx, k8sClient, wl2) + gomega.Consistently(func() bool { + lookupKey := types.NamespacedName{Name: wl3.Name, Namespace: wl3.Namespace} + gomega.Expect(k8sClient.Get(ctx, lookupKey, wl3)).Should(gomega.Succeed()) + return wl3.Spec.Admission == nil + }, framework.ConsistentDuration, framework.Interval).Should(gomega.Equal(true)) + }) })