From c2d3d12dc84730831f8bbf2e73fc7d879529d5d4 Mon Sep 17 00:00:00 2001 From: Traian Schiau Date: Tue, 12 Sep 2023 17:12:12 +0300 Subject: [PATCH] [admissionChecks] Add preemption controller --- pkg/constants/constants.go | 2 + .../admissionchecks/preemption/preemption.go | 263 ++++++++++++ .../preemption/preemption_test.go | 377 ++++++++++++++++++ pkg/util/testing/wrappers.go | 15 + 4 files changed, 657 insertions(+) create mode 100644 pkg/controller/admissionchecks/preemption/preemption.go create mode 100644 pkg/controller/admissionchecks/preemption/preemption_test.go diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index d56ac40081..47ca0504a5 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -34,4 +34,6 @@ const ( // that do not specify any priority class and there is no priority class // marked as default. DefaultPriority = 0 + + PreemptionAdmissionCheckName = "kueue-preemption" ) diff --git a/pkg/controller/admissionchecks/preemption/preemption.go b/pkg/controller/admissionchecks/preemption/preemption.go new file mode 100644 index 0000000000..9ec0c3b932 --- /dev/null +++ b/pkg/controller/admissionchecks/preemption/preemption.go @@ -0,0 +1,263 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package preemption + +import ( + "context" + "time" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" + "sigs.k8s.io/kueue/pkg/cache" + "sigs.k8s.io/kueue/pkg/constants" + "sigs.k8s.io/kueue/pkg/scheduler/preemption" + "sigs.k8s.io/kueue/pkg/workload" +) + +const ( + controllerName = "PreemptionController" + throttleTimeout = 500 * time.Millisecond +) + +type updateStatusFnc func(context.Context, client.Client, *kueue.Workload, bool) error + +type Controller struct { + log logr.Logger + cache *cache.Cache + client client.Client + recorder record.EventRecorder + preemptor *preemption.Preemptor + ctx context.Context + triggerChan chan struct{} + + // stub, only for test + updateFnc updateStatusFnc +} + +func NewController(cache *cache.Cache) *Controller { + return &Controller{ + log: ctrl.Log.WithName(controllerName), + cache: cache, + triggerChan: make(chan struct{}), + updateFnc: updateCheckStatus, + } +} + +func (c *Controller) SetupWithManager(mgr ctrl.Manager) error { + c.client = mgr.GetClient() + c.recorder = mgr.GetEventRecorderFor(controllerName) + c.preemptor = preemption.New(c.client, c.recorder) + if err := mgr.Add(c); err != nil { + return err + } + return ctrl.NewControllerManagedBy(mgr). + //TODO: filter the events + For(&kueue.Workload{}). + Complete(c) +} + +var _ manager.Runnable = (*Controller)(nil) +var _ reconcile.Reconciler = (*Controller)(nil) + +func (c *Controller) Start(ctx context.Context) error { + c.log.V(5).Info("Staring main loop") + c.ctx = ctx + ticker := time.NewTicker(throttleTimeout) + trigger := false + timeout := false + for { + select { + case <-c.triggerChan: + trigger = true + case <-ticker.C: + timeout = true + case <-ctx.Done(): + c.log.V(5).Info("End main loop") + return nil + } + + if trigger && timeout { + c.run() + trigger = false + timeout = false + } + } +} + +func (c *Controller) Reconcile(_ context.Context, _ ctrl.Request) (ctrl.Result, error) { + select { + case c.triggerChan <- struct{}{}: + c.log.V(6).Info("Triggered") + default: + } + return ctrl.Result{}, nil +} + +func (c *Controller) run() { + snapshot := c.cache.Snapshot() + workloads := filterWorkloads(&snapshot) + for _, wl := range workloads { + //1. remove it from Snapshot + snapshot.RemoveWorkload(wl) + //2. check if preemption is needed + usage := totalRequestsForWorkload(wl) + needPreemption := resourcesNeedingPreemption(wl, usage, &snapshot) + log := c.log.WithValues("workload", klog.KObj(wl.Obj)) + + if len(needPreemption) == 0 { + // 2.1 - set the check to true + // the preemption is done , flip the condition + if err := c.updateFnc(c.ctx, c.client, wl.Obj, true); err != nil { + log.V(2).Error(err, "Unable to update the check state to True") + } else { + log.V(2).Info("Preemption ended") + } + } else { + // 2.2 - issue eviction + targets := c.preemptor.GetTargetsForResources(wl, needPreemption, usage, &snapshot) + if len(targets) == 0 { + //2.2.1 - preemption is no longer an option, flip the condition to false + if err := c.updateFnc(c.ctx, c.client, wl.Obj, false); err != nil { + log.V(2).Error(err, "Unable to update the check state to False") + } else { + log.V(2).Info("Preemption is no longer possible") + } + } else { + count, err := c.preemptor.IssuePreemptions(c.ctx, targets, snapshot.ClusterQueues[wl.ClusterQueue]) + if err != nil { + log.V(5).Error(err, "Unable to issue preemption") + } else { + log.V(5).Info("Preemption triggered", "count", count) + } + } + } + // 3 add it back to the Snapshot + snapshot.AddWorkload(wl) + } +} + +func filterWorkloads(snapsot *cache.Snapshot) []*workload.Info { + ret := []*workload.Info{} + for _, cq := range snapsot.ClusterQueues { + for _, wl := range cq.Workloads { + // if the workload has the preemption check set to unknown + if apimeta.IsStatusConditionPresentAndEqual(wl.Obj.Status.AdmissionChecks, constants.PreemptionAdmissionCheckName, metav1.ConditionUnknown) { + checkNow := true + for i := range wl.Obj.Status.AdmissionChecks { + c := &wl.Obj.Status.AdmissionChecks[i] + if c.Type == constants.PreemptionAdmissionCheckName { + continue + } + if snapsot.AdmissionChecks[c.Type] != kueue.AfterCheckPassedOrOnDemand { + continue + } + if c.Reason != kueue.CheckStateReady && c.Reason != kueue.CheckStatePreemptionRequired { + checkNow = false + break + } + } + if checkNow { + ret = append(ret, wl) + } + } + } + } + return ret +} + +func totalRequestsForWorkload(wl *workload.Info) cache.FlavorResourceQuantities { + usage := make(cache.FlavorResourceQuantities) + for _, ps := range wl.TotalRequests { + for res, q := range ps.Requests { + flv := ps.Flavors[res] + resUsage := usage[flv] + if resUsage == nil { + resUsage = make(map[corev1.ResourceName]int64) + usage[flv] = resUsage + } + resUsage[res] += q + } + } + return usage +} + +func resourcesNeedingPreemption(wl *workload.Info, usage cache.FlavorResourceQuantities, snap *cache.Snapshot) preemption.ResourcesPerFlavor { + ret := make(preemption.ResourcesPerFlavor) + + cq := snap.ClusterQueues[wl.ClusterQueue] + for _, rg := range cq.ResourceGroups { + for _, flvQuotas := range rg.Flavors { + flvReq, found := usage[flvQuotas.Name] + if !found { + // Workload doesn't request this flavor. + continue + } + cqResUsage := cq.Usage[flvQuotas.Name] + var cohortResUsage, cohortResRequestable map[corev1.ResourceName]int64 + if cq.Cohort != nil { + cohortResUsage = cq.Cohort.Usage[flvQuotas.Name] + cohortResRequestable = cq.Cohort.RequestableResources[flvQuotas.Name] + } + for rName, rReq := range flvReq { + limit := flvQuotas.Resources[rName].Nominal + if flvQuotas.Resources[rName].BorrowingLimit != nil { + limit += *flvQuotas.Resources[rName].BorrowingLimit + } + exceedsNominal := cqResUsage[rName]+rReq > limit + exceedsBorrowing := cq.Cohort != nil && cohortResUsage[rName]+rReq > cohortResRequestable[rName] + if exceedsNominal || exceedsBorrowing { + if _, found := ret[flvQuotas.Name]; !found { + ret[flvQuotas.Name] = sets.New(rName) + } else { + ret[flvQuotas.Name].Insert(rName) + } + } + } + } + } + return ret +} + +func updateCheckStatus(ctx context.Context, c client.Client, wl *kueue.Workload, successful bool) error { + cond := metav1.Condition{ + Type: constants.PreemptionAdmissionCheckName, + Status: metav1.ConditionTrue, + Reason: kueue.CheckStateReady, // the reason is not relevant, add this to keep it aligned wit the other checks + Message: "Preemption done", + } + + if !successful { + cond.Status = metav1.ConditionFalse + cond.Reason = kueue.CheckStateRetry + cond.Message = "Preemption is not possible" + } + + patch := workload.BaseSSAWorkload(wl) + apimeta.SetStatusCondition(&patch.Status.AdmissionChecks, cond) + return c.Status().Patch(ctx, patch, client.Apply, client.FieldOwner(controllerName), client.ForceOwnership) +} diff --git a/pkg/controller/admissionchecks/preemption/preemption_test.go b/pkg/controller/admissionchecks/preemption/preemption_test.go new file mode 100644 index 0000000000..595fde3ec7 --- /dev/null +++ b/pkg/controller/admissionchecks/preemption/preemption_test.go @@ -0,0 +1,377 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package preemption + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" + "sigs.k8s.io/kueue/pkg/cache" + "sigs.k8s.io/kueue/pkg/constants" + "sigs.k8s.io/kueue/pkg/scheduler/preemption" + utiltesting "sigs.k8s.io/kueue/pkg/util/testing" +) + +type preemptionUpdateRecord struct { + Workload string + State bool +} + +const ( + testingNamespace = "ns1" + testingCQName = "cq1" + testingFlavorName = "rf1" +) + +func TestOneRun(t *testing.T) { + + rf1 := utiltesting.MakeResourceFlavor(testingFlavorName).Obj() + + qCPU2b2 := utiltesting.MakeClusterQueue(testingCQName). + ResourceGroup( + *utiltesting.MakeFlavorQuotas(testingFlavorName).Resource(corev1.ResourceCPU, "2", "2").Obj(), + ). + Preemption(kueue.ClusterQueuePreemption{ + ReclaimWithinCohort: kueue.PreemptionPolicyAny, + WithinClusterQueue: kueue.PreemptionPolicyLowerPriority, + }). + Cohort("c1"). + Obj() + + resourceFlavors := []*kueue.ResourceFlavor{rf1} + clusterQueues := []*kueue.ClusterQueue{qCPU2b2} + + otherWl := utiltesting.MakeWorkload("wl2", testingNamespace). + Request(corev1.ResourceCPU, "2"). + Priority(2). + ReserveQuota( + utiltesting.MakeAdmission(testingCQName). + PodSets( + kueue.PodSetAssignment{ + Name: "main", + Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ + corev1.ResourceCPU: testingFlavorName, + }, + ResourceUsage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + }, + Count: ptr.To[int32](1), + }, + ). + Obj(), + ). + SetOrReplaceAdmissionCheck(constants.PreemptionAdmissionCheckName, metav1.ConditionTrue, kueue.CheckStateReady). + Obj() + + cases := map[string]struct { + workloads []*kueue.Workload + admissionChecks []*kueue.AdmissionCheck + + wantEvents []preemptionUpdateRecord + wantEvictedWorkloads []string + }{ + "nothing to do": { + workloads: []*kueue.Workload{ + utiltesting.MakeWorkload("wl1", testingNamespace). + Request(corev1.ResourceCPU, "1"). + ReserveQuota( + utiltesting.MakeAdmission(testingCQName). + PodSets( + kueue.PodSetAssignment{ + Name: "main", + Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ + corev1.ResourceCPU: testingFlavorName, + }, + ResourceUsage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + Count: ptr.To[int32](1), + }, + ). + Obj(), + ). + SetOrReplaceAdmissionCheck(constants.PreemptionAdmissionCheckName, metav1.ConditionTrue, kueue.CheckStateReady). + Obj(), + }, + }, + "finish the preemption": { + workloads: []*kueue.Workload{ + utiltesting.MakeWorkload("wl1", testingNamespace). + Request(corev1.ResourceCPU, "1"). + ReserveQuota( + utiltesting.MakeAdmission(testingCQName). + PodSets( + kueue.PodSetAssignment{ + Name: "main", + Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ + corev1.ResourceCPU: testingFlavorName, + }, + ResourceUsage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + Count: ptr.To[int32](1), + }, + ). + Obj(), + ). + SetOrReplaceAdmissionCheck(constants.PreemptionAdmissionCheckName, metav1.ConditionUnknown, kueue.CheckStatePending). + Obj(), + }, + wantEvents: []preemptionUpdateRecord{ + {Workload: "ns1/wl1", State: true}, + }, + }, + "the preemption is no longer possible": { + workloads: []*kueue.Workload{ + utiltesting.MakeWorkload("wl1", testingNamespace). + Request(corev1.ResourceCPU, "1"). + Priority(1). + ReserveQuota( + utiltesting.MakeAdmission(testingCQName). + PodSets( + kueue.PodSetAssignment{ + Name: "main", + Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ + corev1.ResourceCPU: testingFlavorName, + }, + ResourceUsage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + Count: ptr.To[int32](1), + }, + ). + Obj(), + ). + SetOrReplaceAdmissionCheck(constants.PreemptionAdmissionCheckName, metav1.ConditionUnknown, kueue.CheckStatePending). + Obj(), + otherWl.DeepCopy(), + }, + wantEvents: []preemptionUpdateRecord{ + {Workload: "ns1/wl1", State: false}, + }, + }, + "the eviction is triggered": { + workloads: []*kueue.Workload{ + utiltesting.MakeWorkload("wl1", testingNamespace). + Request(corev1.ResourceCPU, "1"). + Priority(3). + ReserveQuota( + utiltesting.MakeAdmission(testingCQName). + PodSets( + kueue.PodSetAssignment{ + Name: "main", + Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ + corev1.ResourceCPU: testingFlavorName, + }, + ResourceUsage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + Count: ptr.To[int32](1), + }, + ). + Obj(), + ). + SetOrReplaceAdmissionCheck(constants.PreemptionAdmissionCheckName, metav1.ConditionUnknown, kueue.CheckStatePending). + Obj(), + otherWl.DeepCopy(), + }, + wantEvictedWorkloads: []string{"ns1/wl2"}, + }, + "AfterCheckPassedOrOnDemand Pending": { + workloads: []*kueue.Workload{ + utiltesting.MakeWorkload("wl1", testingNamespace). + Request(corev1.ResourceCPU, "1"). + Priority(3). + ReserveQuota( + utiltesting.MakeAdmission(testingCQName). + PodSets( + kueue.PodSetAssignment{ + Name: "main", + Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ + corev1.ResourceCPU: testingFlavorName, + }, + ResourceUsage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + Count: ptr.To[int32](1), + }, + ). + Obj(), + ). + SetOrReplaceAdmissionCheck(constants.PreemptionAdmissionCheckName, metav1.ConditionUnknown, kueue.CheckStatePending). + SetOrReplaceAdmissionCheck("check1", metav1.ConditionUnknown, kueue.CheckStatePending). + Obj(), + otherWl.DeepCopy(), + }, + admissionChecks: []*kueue.AdmissionCheck{ + utiltesting.MakeAdmissionCheck("check1").Policy(kueue.AfterCheckPassedOrOnDemand).Obj(), + }, + }, + "AfterCheckPassedOrOnDemand CheckStatePreemptionRequired": { + workloads: []*kueue.Workload{ + utiltesting.MakeWorkload("wl1", testingNamespace). + Request(corev1.ResourceCPU, "1"). + Priority(3). + ReserveQuota( + utiltesting.MakeAdmission(testingCQName). + PodSets( + kueue.PodSetAssignment{ + Name: "main", + Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ + corev1.ResourceCPU: testingFlavorName, + }, + ResourceUsage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + Count: ptr.To[int32](1), + }, + ). + Obj(), + ). + SetOrReplaceAdmissionCheck(constants.PreemptionAdmissionCheckName, metav1.ConditionUnknown, kueue.CheckStatePending). + SetOrReplaceAdmissionCheck("check1", metav1.ConditionUnknown, kueue.CheckStatePreemptionRequired). + Obj(), + otherWl.DeepCopy(), + }, + admissionChecks: []*kueue.AdmissionCheck{ + utiltesting.MakeAdmissionCheck("check1").Policy(kueue.AfterCheckPassedOrOnDemand).Obj(), + }, + wantEvictedWorkloads: []string{"ns1/wl2"}, + }, + "AfterCheckPassedOrOnDemand Ready": { + workloads: []*kueue.Workload{ + utiltesting.MakeWorkload("wl1", testingNamespace). + Request(corev1.ResourceCPU, "1"). + Priority(3). + ReserveQuota( + utiltesting.MakeAdmission(testingCQName). + PodSets( + kueue.PodSetAssignment{ + Name: "main", + Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ + corev1.ResourceCPU: testingFlavorName, + }, + ResourceUsage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + Count: ptr.To[int32](1), + }, + ). + Obj(), + ). + SetOrReplaceAdmissionCheck(constants.PreemptionAdmissionCheckName, metav1.ConditionUnknown, kueue.CheckStatePending). + SetOrReplaceAdmissionCheck("check1", metav1.ConditionTrue, kueue.CheckStateReady). + Obj(), + otherWl.DeepCopy(), + }, + admissionChecks: []*kueue.AdmissionCheck{ + utiltesting.MakeAdmissionCheck("check1").Policy(kueue.AfterCheckPassedOrOnDemand).Obj(), + }, + wantEvictedWorkloads: []string{"ns1/wl2"}, + }, + "Anytime Pending": { + workloads: []*kueue.Workload{ + utiltesting.MakeWorkload("wl1", testingNamespace). + Request(corev1.ResourceCPU, "1"). + Priority(3). + ReserveQuota( + utiltesting.MakeAdmission(testingCQName). + PodSets( + kueue.PodSetAssignment{ + Name: "main", + Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ + corev1.ResourceCPU: testingFlavorName, + }, + ResourceUsage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + Count: ptr.To[int32](1), + }, + ). + Obj(), + ). + SetOrReplaceAdmissionCheck(constants.PreemptionAdmissionCheckName, metav1.ConditionUnknown, kueue.CheckStatePending). + SetOrReplaceAdmissionCheck("check1", metav1.ConditionUnknown, kueue.CheckStatePending). + Obj(), + otherWl.DeepCopy(), + }, + admissionChecks: []*kueue.AdmissionCheck{ + utiltesting.MakeAdmissionCheck("check1").Policy(kueue.Anytime).Obj(), + }, + wantEvictedWorkloads: []string{"ns1/wl2"}, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + ctx, _ := utiltesting.ContextWithLog(t) + fakeClient := utiltesting.NewFakeClient() + cache := cache.New(fakeClient) + for _, rf := range resourceFlavors { + cache.AddOrUpdateResourceFlavor(rf.DeepCopy()) + } + + for _, ac := range tc.admissionChecks { + cache.AddOrUpdateAdmissionCheck(ac.DeepCopy()) + } + + for _, cq := range clusterQueues { + cache.AddClusterQueue(ctx, cq.DeepCopy()) + } + + for _, wl := range tc.workloads { + cache.AddOrUpdateWorkload(wl) + } + + // create the controller + controller := NewController(cache) + + // setup any additional parts + controller.ctx = ctx + controller.preemptor = preemption.New(fakeClient, record.NewFakeRecorder(10)) + var updates []preemptionUpdateRecord + controller.updateFnc = func(_ context.Context, _ client.Client, wl *kueue.Workload, successful bool) error { + updates = append(updates, preemptionUpdateRecord{Workload: client.ObjectKeyFromObject(wl).String(), State: successful}) + return nil + } + + var evictedWorkloads []string + controller.preemptor.OverrideApply(func(_ context.Context, wl *kueue.Workload) error { + evictedWorkloads = append(evictedWorkloads, client.ObjectKeyFromObject(wl).String()) + return nil + }) + + controller.run() + + if diff := cmp.Diff(tc.wantEvents, updates); diff != "" { + t.Errorf("Unexpected events (-want/+got):\n%s", diff) + } + + if diff := cmp.Diff(tc.wantEvictedWorkloads, evictedWorkloads); diff != "" { + t.Errorf("Unexpected evicted workloads (-want/+got):\n%s", diff) + } + }) + } +} diff --git a/pkg/util/testing/wrappers.go b/pkg/util/testing/wrappers.go index bd3b5033fe..e00c462811 100644 --- a/pkg/util/testing/wrappers.go +++ b/pkg/util/testing/wrappers.go @@ -190,6 +190,16 @@ func (w *WorkloadWrapper) Labels(l map[string]string) *WorkloadWrapper { return w } +func (w *WorkloadWrapper) SetOrReplaceAdmissionCheck(name string, status metav1.ConditionStatus, reason string) *WorkloadWrapper { + cond := metav1.Condition{ + Type: name, + Status: status, + Reason: reason, + } + apimeta.SetStatusCondition(&w.Status.AdmissionChecks, cond) + return w +} + type PodSetWrapper struct{ kueue.PodSet } func MakePodSet(name string, count int) *PodSetWrapper { @@ -563,6 +573,11 @@ func MakeAdmissionCheck(name string) *AdmissionCheckWrapper { } } +func (ac *AdmissionCheckWrapper) Policy(p kueue.AdmissionCheckPreemptionPolicy) *AdmissionCheckWrapper { + ac.Spec.PreemptionPolicy = &p + return ac +} + func (ac *AdmissionCheckWrapper) Obj() *kueue.AdmissionCheck { return &ac.AdmissionCheck }