From bbdcea910d2c854f7145e2eb752270452dee671a Mon Sep 17 00:00:00 2001 From: Abdullah Gharaibeh Date: Fri, 25 Feb 2022 15:28:38 -0500 Subject: [PATCH] Add support for affinity matching --- pkg/capacity/capacity.go | 28 ++- pkg/capacity/snapshot.go | 1 + pkg/capacity/snapshot_test.go | 6 + pkg/scheduler/scheduler.go | 72 +++++- pkg/scheduler/scheduler_test.go | 221 ++++++++++++++++++- pkg/util/testing/wrappers.go | 9 +- test/integration/scheduler/scheduler_test.go | 37 ++++ 7 files changed, 367 insertions(+), 7 deletions(-) diff --git a/pkg/capacity/capacity.go b/pkg/capacity/capacity.go index e57745a062..1133883c34 100644 --- a/pkg/capacity/capacity.go +++ b/pkg/capacity/capacity.go @@ -23,6 +23,7 @@ import ( "sync" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" kueue "sigs.k8s.io/kueue/api/v1alpha1" @@ -77,6 +78,10 @@ type Capacity struct { RequestableResources map[corev1.ResourceName][]FlavorInfo UsedResources Resources Workloads map[string]*workload.Info + // The set of key labels from all flavors of a resource. + // Those keys define the affinity terms of a workload + // that can be matched against the flavors. + LabelKeys map[corev1.ResourceName]sets.String } // FlavorInfo holds processed flavor type. @@ -85,6 +90,7 @@ type FlavorInfo struct { Guaranteed int64 Ceiling int64 Taints []corev1.Taint + Labels map[string]string } func NewCapacity(cap *kueue.Capacity) *Capacity { @@ -95,17 +101,29 @@ func NewCapacity(cap *kueue.Capacity) *Capacity { Workloads: map[string]*workload.Info{}, } + labelKeys := map[corev1.ResourceName]sets.String{} for _, r := range cap.Spec.RequestableResources { if len(r.Flavors) == 0 { continue } + resKeys := sets.NewString() ts := make(map[string]int64, len(r.Flavors)) for _, t := range r.Flavors { + for k := range t.Labels { + resKeys.Insert(k) + } ts[t.Name] = 0 } + if len(resKeys) != 0 { + labelKeys[r.Name] = resKeys + } c.UsedResources[r.Name] = ts } + + if len(labelKeys) != 0 { + c.LabelKeys = labelKeys + } return c } @@ -351,12 +369,20 @@ func resourcesByName(in []kueue.Resource) map[corev1.ResourceName][]FlavorInfo { flavors := make([]FlavorInfo, len(r.Flavors)) for i := range flavors { f := &r.Flavors[i] - flavors[i] = FlavorInfo{ + fInfo := FlavorInfo{ Name: f.Name, Guaranteed: workload.ResourceValue(r.Name, f.Quota.Guaranteed), Ceiling: workload.ResourceValue(r.Name, f.Quota.Ceiling), Taints: append([]corev1.Taint(nil), f.Taints...), } + if len(f.Labels) != 0 { + fInfo.Labels = make(map[string]string, len(f.Labels)) + for k, v := range f.Labels { + fInfo.Labels[k] = v + } + } + flavors[i] = fInfo + } out[r.Name] = flavors } diff --git a/pkg/capacity/snapshot.go b/pkg/capacity/snapshot.go index 33a0cd4386..a5be026be2 100644 --- a/pkg/capacity/snapshot.go +++ b/pkg/capacity/snapshot.go @@ -54,6 +54,7 @@ func (c *Capacity) snapshot() *Capacity { RequestableResources: c.RequestableResources, // Shallow copy is enough. UsedResources: make(Resources, len(c.UsedResources)), Workloads: make(map[string]*workload.Info, len(c.Workloads)), + LabelKeys: c.LabelKeys, // Shallow copy is enough. } for res, flavors := range c.UsedResources { flavorsCopy := make(map[string]int64, len(flavors)) diff --git a/pkg/capacity/snapshot_test.go b/pkg/capacity/snapshot_test.go index 063bd6d1b2..8c3a9481a2 100644 --- a/pkg/capacity/snapshot_test.go +++ b/pkg/capacity/snapshot_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client/fake" kueue "sigs.k8s.io/kueue/api/v1alpha1" @@ -55,12 +56,14 @@ func TestSnapshot(t *testing.T) { Quota: kueue.Quota{ Guaranteed: resource.MustParse("100"), }, + Labels: map[string]string{"foo": "bar", "instance": "on-demand"}, }, { Name: "spot", Quota: kueue.Quota{ Guaranteed: resource.MustParse("200"), }, + Labels: map[string]string{"baz": "bar", "instance": "spot"}, }, }, }, @@ -221,10 +224,12 @@ func TestSnapshot(t *testing.T) { { Name: "demand", Guaranteed: 100_000, + Labels: map[string]string{"foo": "bar", "instance": "on-demand"}, }, { Name: "spot", Guaranteed: 200_000, + Labels: map[string]string{"baz": "bar", "instance": "spot"}, }, }, }, @@ -237,6 +242,7 @@ func TestSnapshot(t *testing.T) { Workloads: map[string]*workload.Info{ "/alpha": workload.NewInfo(&workloads[0]), }, + LabelKeys: map[corev1.ResourceName]sets.String{corev1.ResourceCPU: {"baz": {}, "foo": {}, "instance": {}}}, }, "foobar": { Name: "foobar", diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 44042ed6c0..2da49d932a 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -24,10 +24,12 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" corev1helpers "k8s.io/component-helpers/scheduling/corev1" + "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -146,7 +148,7 @@ func calculateRequirementsForAssignments(log logr.Logger, workloads []workload.I continue } e := entry{Info: w} - if !e.assignFlavors(c) { + if !e.assignFlavors(log, c) { log.V(2).Info("Workload didn't fit in remaining capacity even when borrowing") continue } @@ -160,14 +162,14 @@ func calculateRequirementsForAssignments(log logr.Logger, workloads []workload.I // borrow from the cohort. // It returns whether the entry would fit. If it doesn't fit, the object is // unmodified. -func (e *entry) assignFlavors(cap *capacity.Capacity) bool { +func (e *entry) assignFlavors(log logr.Logger, cap *capacity.Capacity) bool { flavoredRequests := make([]workload.PodSetResources, 0, len(e.TotalRequests)) wUsed := make(capacity.Resources) wBorrows := make(capacity.Resources) for i, podSet := range e.TotalRequests { flavors := make(map[corev1.ResourceName]string, len(podSet.Requests)) for resName, reqVal := range podSet.Requests { - rFlavor, borrow := findFlavorForResource(resName, reqVal, wUsed[resName], cap, &e.Obj.Spec.Pods[i].Spec) + rFlavor, borrow := findFlavorForResource(log, resName, reqVal, wUsed[resName], cap, &e.Obj.Spec.Pods[i].Spec) if rFlavor == "" { return false } @@ -239,7 +241,15 @@ func (s *Scheduler) assign(ctx context.Context, e *entry) error { // findFlavorForResources returns a flavor which can satisfy the resource request, // given that wUsed is the usage of flavors by previous podsets. // If it finds a flavor, also returns any borrowing required. -func findFlavorForResource(name corev1.ResourceName, val int64, wUsed map[string]int64, cap *capacity.Capacity, spec *corev1.PodSpec) (string, int64) { +func findFlavorForResource( + log logr.Logger, + name corev1.ResourceName, + val int64, + wUsed map[string]int64, + cap *capacity.Capacity, + spec *corev1.PodSpec) (string, int64) { + // We will only check against the flavors' labels for the resource. + selector := flavorSelector(spec, cap.LabelKeys[name]) for _, flavor := range cap.RequestableResources[name] { _, untolerated := corev1helpers.FindMatchingUntoleratedTaint(flavor.Taints, spec.Tolerations, func(t *corev1.Taint) bool { return t.Effect == corev1.TaintEffectNoSchedule || t.Effect == corev1.TaintEffectNoExecute @@ -247,6 +257,14 @@ func findFlavorForResource(name corev1.ResourceName, val int64, wUsed map[string if untolerated { continue } + if match, err := selector.Match(&corev1.Node{ObjectMeta: metav1.ObjectMeta{Labels: flavor.Labels}}); !match || err != nil { + if err != nil { + log.Error(err, "Matching workload affinity against flavor; no flavor assigned") + return "", 0 + } + continue + } + // Consider the usage assigned to previous pod sets. ok, borrow := fitsFlavorLimits(name, val+wUsed[flavor.Name], cap, &flavor) if ok { @@ -256,6 +274,52 @@ func findFlavorForResource(name corev1.ResourceName, val int64, wUsed map[string return "", 0 } +func flavorSelector(spec *corev1.PodSpec, allowedKeys sets.String) nodeaffinity.RequiredNodeAffinity { + // This function generally replicates the implementation of kube-scheduler's NodeAffintiy + // Filter plugin as of v1.24. + var specCopy corev1.PodSpec + + // Remove affinity constraints with irrelevant keys. + if len(spec.NodeSelector) != 0 { + specCopy.NodeSelector = map[string]string{} + for k, v := range spec.NodeSelector { + if allowedKeys.Has(k) { + specCopy.NodeSelector[k] = v + } + } + } + + affinity := spec.Affinity + if affinity != nil && affinity.NodeAffinity != nil && affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + var termsCopy []corev1.NodeSelectorTerm + for _, t := range affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { + var expCopy []corev1.NodeSelectorRequirement + for _, e := range t.MatchExpressions { + if allowedKeys.Has(e.Key) { + expCopy = append(expCopy, e) + } + } + // If a term becomes empty, it means node affinity matches any flavor since those terms are ORed, + // and so matching gets reduced to spec.NodeSelector + if len(expCopy) == 0 { + termsCopy = nil + break + } + termsCopy = append(termsCopy, corev1.NodeSelectorTerm{MatchExpressions: expCopy}) + } + if len(termsCopy) != 0 { + specCopy.Affinity = &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: termsCopy, + }, + }, + } + } + } + return nodeaffinity.GetRequiredNodeAffinity(&corev1.Pod{Spec: specCopy}) +} + // fitsFlavorLimits returns whether a requested resource fits in a specific flavor's quota limits. // If it fits, also returns any borrowing required. func fitsFlavorLimits(name corev1.ResourceName, val int64, cap *capacity.Capacity, flavor *capacity.FlavorInfo) (bool, int64) { diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index de9451cd49..09cf0d9e7e 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -875,6 +875,222 @@ func TestEntryAssignFlavors(t *testing.T) { }, }, }, + "multiple flavors, fits a node selector": { + wlPods: []kueue.PodSet{ + { + Count: 1, + Name: "main", + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, + }, + }, + }, + // ignored:foo should get ignored + NodeSelector: map[string]string{"cpuType": "two", "ignored1": "foo"}, + Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + // this expression should get ignored + Key: "ignored2", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"bar"}, + }, + }, + }, + }, + }}, + }, + }, + }, + }, + capacity: capacity.Capacity{ + RequestableResources: map[corev1.ResourceName][]capacity.FlavorInfo{ + corev1.ResourceCPU: noBorrowing([]capacity.FlavorInfo{ + {Name: "one", Guaranteed: 4000, Labels: map[string]string{"cpuType": "one"}}, + {Name: "two", Guaranteed: 4000, Labels: map[string]string{"cpuType": "two"}}, + }), + }, + LabelKeys: map[corev1.ResourceName]sets.String{corev1.ResourceCPU: sets.NewString("cpuType")}, + }, + wantFits: true, + wantFlavors: map[string]map[corev1.ResourceName]string{ + "main": { + corev1.ResourceCPU: "two", + }, + }, + }, + "multiple flavors, fits with node affinity": { + wlPods: []kueue.PodSet{ + { + Count: 1, + Name: "main", + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Mi"), + }, + }, + }, + }, + NodeSelector: map[string]string{"ignored1": "foo"}, + Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "cpuType", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"two"}, + }, + { + Key: "memType", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"two"}, + }, + }, + }, + }, + }}, + }, + }, + }, + }, + capacity: capacity.Capacity{ + RequestableResources: map[corev1.ResourceName][]capacity.FlavorInfo{ + corev1.ResourceCPU: noBorrowing([]capacity.FlavorInfo{ + {Name: "one", Guaranteed: 4000, Labels: map[string]string{"cpuType": "one", "group": "group1"}}, + {Name: "two", Guaranteed: 4000, Labels: map[string]string{"cpuType": "two"}}, + }), + corev1.ResourceMemory: noBorrowing([]capacity.FlavorInfo{ + {Name: "one", Guaranteed: utiltesting.Gi, Labels: map[string]string{"memType": "one"}}, + {Name: "two", Guaranteed: utiltesting.Gi, Labels: map[string]string{"memType": "two"}}, + }), + }, + LabelKeys: map[corev1.ResourceName]sets.String{ + corev1.ResourceCPU: sets.NewString("cpuType", "group"), + corev1.ResourceMemory: sets.NewString("memType"), + }, + }, + wantFits: true, + wantFlavors: map[string]map[corev1.ResourceName]string{ + "main": { + corev1.ResourceCPU: "two", + corev1.ResourceMemory: "two", + }, + }, + }, + "multiple flavors, node affinity fits any flavor": { + wlPods: []kueue.PodSet{ + { + Count: 1, + Name: "main", + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, + }, + }, + }, + Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "ignored2", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"bar"}, + }, + }, + }, + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + // although this terms selects two + // the first term practically matches + // any flavor; and since the terms + // are ORed, any flavor can be selected. + Key: "cpuType", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"two"}, + }, + }, + }, + }, + }}, + }, + }, + }, + }, + capacity: capacity.Capacity{ + RequestableResources: map[corev1.ResourceName][]capacity.FlavorInfo{ + corev1.ResourceCPU: noBorrowing([]capacity.FlavorInfo{ + {Name: "one", Guaranteed: 4000, Labels: map[string]string{"cpuType": "one"}}, + {Name: "two", Guaranteed: 4000, Labels: map[string]string{"cpuType": "two"}}, + }), + }, + LabelKeys: map[corev1.ResourceName]sets.String{corev1.ResourceCPU: sets.NewString("cpuType")}, + }, + wantFits: true, + wantFlavors: map[string]map[corev1.ResourceName]string{ + "main": { + corev1.ResourceCPU: "one", + }, + }, + }, + "multiple flavor, doesn't fit node affinity": { + wlPods: []kueue.PodSet{ + { + Count: 1, + Name: "main", + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, + }, + }, + }, + Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "cpuType", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"three"}, + }, + }, + }, + }, + }}, + }, + }, + }, + }, + capacity: capacity.Capacity{ + RequestableResources: map[corev1.ResourceName][]capacity.FlavorInfo{ + corev1.ResourceCPU: noBorrowing([]capacity.FlavorInfo{ + {Name: "one", Guaranteed: 4000, Labels: map[string]string{"cpuType": "one"}}, + {Name: "two", Guaranteed: 4000, Labels: map[string]string{"cpuType": "two"}}, + }), + }, + LabelKeys: map[corev1.ResourceName]sets.String{corev1.ResourceCPU: sets.NewString("cpuType")}, + }, + wantFits: false, + }, "multiple specs, fit different flavors": { wlPods: []kueue.PodSet{ { @@ -1043,6 +1259,9 @@ func TestEntryAssignFlavors(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { + log := logrtesting.NewTestLoggerWithOptions(t, logrtesting.Options{ + Verbosity: 2, + }) e := entry{ Info: *workload.NewInfo(&kueue.QueuedWorkload{ Spec: kueue.QueuedWorkloadSpec{ @@ -1050,7 +1269,7 @@ func TestEntryAssignFlavors(t *testing.T) { }, }), } - fits := e.assignFlavors(&tc.capacity) + fits := e.assignFlavors(log, &tc.capacity) if fits != tc.wantFits { t.Errorf("e.assignFlavors(_)=%t, want %t", fits, tc.wantFits) } diff --git a/pkg/util/testing/wrappers.go b/pkg/util/testing/wrappers.go index e1fcda9b60..f714f2e5f7 100644 --- a/pkg/util/testing/wrappers.go +++ b/pkg/util/testing/wrappers.go @@ -52,6 +52,7 @@ func MakeJob(name, ns string) *JobWrapper { Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{}}, }, }, + NodeSelector: map[string]string{}, }, }, }, @@ -81,12 +82,18 @@ func (j *JobWrapper) Queue(queue string) *JobWrapper { return j } -// AddToleration adds a toleration to the job. +// Toleration adds a toleration to the job. func (j *JobWrapper) Toleration(t corev1.Toleration) *JobWrapper { j.Spec.Template.Spec.Tolerations = append(j.Spec.Template.Spec.Tolerations, t) return j } +// NodeSelector adds a node selector to the job. +func (j *JobWrapper) NodeSelector(k, v string) *JobWrapper { + j.Spec.Template.Spec.NodeSelector[k] = v + return j +} + // AddResource adds a resource request to the default container. func (j *JobWrapper) AddResource(r corev1.ResourceName, v string) *JobWrapper { j.Spec.Template.Spec.Containers[0].Resources.Requests[r] = resource.MustParse(v) diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 650ba9f860..0759ba219c 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -199,4 +199,41 @@ var _ = ginkgo.Describe("Scheduler", func() { }, timeout, interval).Should(gomega.BeTrue()) gomega.Expect(createdJob.Spec.Template.Spec.NodeSelector[instanceKey]).Should(gomega.Equal(onDemandFlavor)) }) + + ginkgo.It("Should schedule jobs with affinity to specific flavor", func() { + ginkgo.By("creating capacity and queue") + capacity := testing.MakeCapacity("affinity-capacity"). + Resource(testing.MakeResource(corev1.ResourceCPU). + Flavor(testing.MakeFlavor(spotFlavor, "5").Label(instanceKey, spotFlavor).Obj()). + Flavor(testing.MakeFlavor(onDemandFlavor, "5").Label(instanceKey, onDemandFlavor).Obj()). + Obj()). + Obj() + gomega.Expect(k8sClient.Create(ctx, capacity)).Should(gomega.Succeed()) + queue := testing.MakeQueue("affinity-queue", namespace).Capacity(capacity.Name).Obj() + gomega.Expect(k8sClient.Create(ctx, queue)).Should(gomega.Succeed()) + + ginkgo.By("checking a job without affinity starts on the first flavor") + job1 := testing.MakeJob("no-affinity-job", namespace).Queue(queue.Name).AddResource(corev1.ResourceCPU, "1").Obj() + gomega.Expect(k8sClient.Create(ctx, job1)).Should(gomega.Succeed()) + createdJob1 := &batchv1.Job{} + gomega.Eventually(func() bool { + lookupKey := types.NamespacedName{Name: job1.Name, Namespace: job1.Namespace} + return k8sClient.Get(ctx, lookupKey, createdJob1) == nil && !*createdJob1.Spec.Suspend + }, timeout, interval).Should(gomega.BeTrue()) + gomega.Expect(createdJob1.Spec.Template.Spec.NodeSelector[instanceKey]).Should(gomega.Equal(spotFlavor)) + + ginkgo.By("checking a second job with affinity to on-demand") + job2 := testing.MakeJob("affinity-job", namespace).Queue(queue.Name). + NodeSelector(instanceKey, onDemandFlavor). + NodeSelector("foo", "bar"). + AddResource(corev1.ResourceCPU, "1").Obj() + gomega.Expect(k8sClient.Create(ctx, job2)).Should(gomega.Succeed()) + createdJob2 := &batchv1.Job{} + gomega.Eventually(func() bool { + lookupKey := types.NamespacedName{Name: job2.Name, Namespace: job2.Namespace} + return k8sClient.Get(ctx, lookupKey, createdJob2) == nil && !*createdJob2.Spec.Suspend + }, timeout, interval).Should(gomega.BeTrue()) + gomega.Expect(len(createdJob2.Spec.Template.Spec.NodeSelector)).Should(gomega.Equal(2)) + gomega.Expect(createdJob2.Spec.Template.Spec.NodeSelector[instanceKey]).Should(gomega.Equal(onDemandFlavor)) + }) })