From ac910b1cd5e2b5df5bdcd976d52d640f79bf8b84 Mon Sep 17 00:00:00 2001 From: Abdullah Gharaibeh Date: Mon, 11 Apr 2022 14:47:19 -0400 Subject: [PATCH] Renamed requestableResources to resources in clusterQueue --- apis/core/v1alpha1/clusterqueue_types.go | 10 ++-- apis/core/v1alpha1/zz_generated.deepcopy.go | 4 +- .../bases/kueue.x-k8s.io_clusterqueues.yaml | 46 +++++++++---------- config/samples/single-clusterqueue-setup.yaml | 6 +-- docs/concepts/cluster_queue.md | 14 +++--- docs/tasks/administer_cluster_quotas.md | 20 ++++---- pkg/cache/cache.go | 6 +-- pkg/cache/cache_test.go | 12 ++--- pkg/cache/snapshot_test.go | 6 +-- pkg/scheduler/scheduler_test.go | 6 +-- pkg/util/testing/wrappers.go | 6 +-- test/integration/scheduler/scheduler_test.go | 2 +- 12 files changed, 69 insertions(+), 69 deletions(-) diff --git a/apis/core/v1alpha1/clusterqueue_types.go b/apis/core/v1alpha1/clusterqueue_types.go index 1bb3bbdd24..6c441126b7 100644 --- a/apis/core/v1alpha1/clusterqueue_types.go +++ b/apis/core/v1alpha1/clusterqueue_types.go @@ -24,7 +24,7 @@ import ( // ClusterQueueSpec defines the desired state of ClusterQueue type ClusterQueueSpec struct { - // requestableResources represent the total pod requests of workloads dispatched + // resources represent the total pod requests of workloads dispatched // via this clusterQueue. This doesn’t guarantee the actual availability of // resources, although an integration with a resource provisioner like Cluster // Autoscaler is possible to achieve that. Example: @@ -40,7 +40,7 @@ type ClusterQueueSpec struct { // // +listType=map // +listMapKey=name - RequestableResources []Resource `json:"requestableResources,omitempty"` + Resources []Resource `json:"resources,omitempty"` // cohort that this ClusterQueue belongs to. QCs that belong to the // same cohort can borrow unused resources from each other. @@ -69,7 +69,7 @@ type ClusterQueueSpec struct { // name: tenantA // spec: // cohort: borrowing-cohort - // requestableResources: + // resources: // - name: cpu // - name: spot // quota: @@ -95,7 +95,7 @@ type ClusterQueueSpec struct { // name: tenantB // spec: // cohort: borrowing-cohort - // requestableResources: + // resources: // - name: cpu // - name: on-demand // quota: @@ -166,7 +166,7 @@ type Resource struct { // flavor and must set different values of a shared key. For example: // // spec: - // requestableResources: + // resources: // - name: nvidia.com/gpus // - name: k80 // quota: diff --git a/apis/core/v1alpha1/zz_generated.deepcopy.go b/apis/core/v1alpha1/zz_generated.deepcopy.go index 0b7bd03720..1f9767e829 100644 --- a/apis/core/v1alpha1/zz_generated.deepcopy.go +++ b/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -111,8 +111,8 @@ func (in *ClusterQueueList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterQueueSpec) DeepCopyInto(out *ClusterQueueSpec) { *out = *in - if in.RequestableResources != nil { - in, out := &in.RequestableResources, &out.RequestableResources + if in.Resources != nil { + in, out := &in.Resources, &out.Resources *out = make([]Resource, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) diff --git a/config/crd/bases/kueue.x-k8s.io_clusterqueues.yaml b/config/crd/bases/kueue.x-k8s.io_clusterqueues.yaml index 4c0b99191f..cab23f545c 100644 --- a/config/crd/bases/kueue.x-k8s.io_clusterqueues.yaml +++ b/config/crd/bases/kueue.x-k8s.io_clusterqueues.yaml @@ -75,18 +75,18 @@ spec: will only be eligible to consume on-demand cores (the next in the list of cpu flavors). 5. Before considering on-demand, the workload will get assigned spot if the quota can be borrowed from the cohort. - \n metadata: name: tenantA spec: cohort: borrowing-cohort requestableResources: + \n metadata: name: tenantA spec: cohort: borrowing-cohort resources: - name: cpu - name: spot quota: min: 1000 - name: on-demand quota: min: 100 - name: nvidia.com/gpus - name: k80 quota: min: 10 max: 20 labels: - cloud.provider.com/accelerator: nvidia-tesla-k80 - name: p100 quota: min: 10 max: 20 labels: - cloud.provider.com/accelerator: nvidia-tesla-p100 \n metadata: name: tenantB spec: cohort: borrowing-cohort - requestableResources: - name: cpu - name: on-demand quota: min: - 100 - name: nvidia.com/gpus - name: k80 quota: min: 10 max: 20 labels: - - cloud.provider.com/accelerator: nvidia-tesla-k80 \n If empty, - this ClusterQueue cannot borrow from any other ClusterQueue and - vice versa. \n The name style is similar to label keys. These are - just names to link QCs together, and they are meaningless otherwise." + resources: - name: cpu - name: on-demand quota: min: 100 - name: + nvidia.com/gpus - name: k80 quota: min: 10 max: 20 labels: - cloud.provider.com/accelerator: + nvidia-tesla-k80 \n If empty, this ClusterQueue cannot borrow from + any other ClusterQueue and vice versa. \n The name style is similar + to label keys. These are just names to link QCs together, and they + are meaningless otherwise." type: string namespaceSelector: description: namespaceSelector defines which namespaces are allowed @@ -151,13 +151,13 @@ spec: - StrictFIFO - BestEffortFIFO type: string - requestableResources: - description: "requestableResources represent the total pod requests - of workloads dispatched via this clusterQueue. This doesn’t guarantee - the actual availability of resources, although an integration with - a resource provisioner like Cluster Autoscaler is possible to achieve - that. Example: \n - name: cpu flavors: - quota: min: 100 - name: - memory flavors: - quota: min: 100Gi" + resources: + description: "resources represent the total pod requests of workloads + dispatched via this clusterQueue. This doesn’t guarantee the actual + availability of resources, although an integration with a resource + provisioner like Cluster Autoscaler is possible to achieve that. + Example: \n - name: cpu flavors: - quota: min: 100 - name: memory + flavors: - quota: min: 100Gi" items: properties: flavors: @@ -169,15 +169,15 @@ spec: \n For example, if the resource is nvidia.com/gpu, and we want to define different limits for different gpu models, then each model is mapped to a flavor and must set different - values of a shared key. For example: \n spec: requestableResources: - - name: nvidia.com/gpus - name: k80 quota: min: 10 - name: - p100 quota: min: 10 \n The flavors are evaluated in order, - selecting the first to satisfy a workload’s requirements. - Also the quantities are additive, in the example above the - GPU quota in total is 20 (10 k80 + 10 p100). A workload is - limited to the selected type by converting the labels to a - node selector that gets injected into the workload. This list - can’t be empty, at least one flavor must exist." + values of a shared key. For example: \n spec: resources: - + name: nvidia.com/gpus - name: k80 quota: min: 10 - name: p100 + quota: min: 10 \n The flavors are evaluated in order, selecting + the first to satisfy a workload’s requirements. Also the quantities + are additive, in the example above the GPU quota in total + is 20 (10 k80 + 10 p100). A workload is limited to the selected + type by converting the labels to a node selector that gets + injected into the workload. This list can’t be empty, at least + one flavor must exist." items: properties: name: diff --git a/config/samples/single-clusterqueue-setup.yaml b/config/samples/single-clusterqueue-setup.yaml index 7b11c89888..ae4f1e709f 100644 --- a/config/samples/single-clusterqueue-setup.yaml +++ b/config/samples/single-clusterqueue-setup.yaml @@ -9,15 +9,15 @@ metadata: name: cluster-total spec: namespaceSelector: {} - requestableResources: + resources: - name: "cpu" flavors: - - resourceFlavor: default + - name: default quota: min: 9 - name: "memory" flavors: - - resourceFlavor: default + - name: default quota: min: 36Gi --- diff --git a/docs/concepts/cluster_queue.md b/docs/concepts/cluster_queue.md index 843988f909..edd0b22c34 100644 --- a/docs/concepts/cluster_queue.md +++ b/docs/concepts/cluster_queue.md @@ -16,7 +16,7 @@ metadata: name: cluster-total spec: namespaceSelector: {} - requestableResources: + resources: - name: "cpu" flavors: - name: default @@ -103,10 +103,10 @@ taints: ``` You can use the `.metadata.name` to reference a flavor from a ClusterQueue in -the `.spec.requestableResources[*].flavors[*].name` field. +the `.spec.resources[*].flavors[*].name` field. For each resource of each [pod set](workload.md#pod-sets) in a Workload, Kueue -assigns the first flavor in the `.spec.requestableResources[*].resources.flavors` +assigns the first flavor in the `.spec.resources[*].flavors` list that has enough unused quota in the ClusterQueue or the ClusterQueue's [cohort](#cohort). @@ -176,7 +176,7 @@ ClusterQueue. When borrowing, Kueue satisfies the following semantics: - When assigning flavors, Kueue goes through the list of flavors in - `.spec.requestableResources[*].flavors`. For each flavor, Kueue attempts to + `.spec.resources[*].flavors`. For each flavor, Kueue attempts to fit the workload using the min quota of the ClusterQueue or the unused min quota of other ClusterQueues in the cohort, up to the max quota of the ClusterQueue. If the workload doesn't fit, Kueue proceeds evaluating the next @@ -196,7 +196,7 @@ metadata: spec: namespaceSelector: {} cohort: team-ab - requestableResources: + resources: - name: "cpu" flavors: - name: default @@ -217,7 +217,7 @@ metadata: spec: namespaceSelector: {} cohort: team-ab - requestableResources: + resources: - name: "cpu" flavors: - name: default @@ -247,7 +247,7 @@ No admitted workloads will be stopped to make space for new workloads. ### Max quotas To limit the amount of resources that a ClusterQueue can borrow from others, -you can set the `.spec.requestableResources[*].flavors[*].quota.max` +you can set the `.spec.resources[*].flavors[*].quota.max` [quantity](https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/) field. `max` must be greater than or equal to `min`. diff --git a/docs/tasks/administer_cluster_quotas.md b/docs/tasks/administer_cluster_quotas.md index d659ebd713..afdfef433c 100644 --- a/docs/tasks/administer_cluster_quotas.md +++ b/docs/tasks/administer_cluster_quotas.md @@ -40,7 +40,7 @@ metadata: name: cluster-total spec: namespaceSelector: {} # match all. - requestableResources: + resources: - name: "cpu" flavors: - name: default @@ -90,7 +90,7 @@ To create the ResourceFlavor, run the following command: kubectl apply -f default-flavor.yaml ``` -The `.metadata.name` matches the `.spec.requestableResources[*].flavors[0].resourceFlavor` +The `.metadata.name` matches the `.spec.resources[*].flavors[0].resourceFlavor` field in the ClusterQueue. ### 3. Create [Queues](/docs/concepts/queue.md) @@ -129,7 +129,7 @@ architectures, namely `x86` and `arm`, specified in the node label `cpu-arch`. **Limitations** -- Using the same flavors in multiple `.requestableResources` of a ClusterQueue +- Using the same flavors in multiple `.resources` of a ClusterQueue is [not supported](https://github.com/kubernetes-sigs/kueue/issues/167). ### 1. Create ResourceFlavors @@ -181,7 +181,7 @@ metadata: name: cluster-total spec: namespaceSelector: {} - requestableResources: + resources: - name: "cpu" flavors: - name: x86 @@ -197,7 +197,7 @@ spec: min: 84Gi ``` -The flavor names in the fields `.spec.requestableResources[*].flavors[*].resourceFlavor` +The flavor names in the fields `.spec.resources[*].flavors[*].resourceFlavor` should match the names of the ResourceFlavors created earlier. Note that `memory` is referencing the `default` flavor created in the [single flavor setup](#single-clusterqueue-and-single-resourceflavor-setup) @@ -227,7 +227,7 @@ metadata: spec: namespaceSelector: {} cohort: team-ab - requestableResources: + resources: - name: "cpu" flavors: - name: default @@ -251,7 +251,7 @@ metadata: spec: namespaceSelector: {} cohort: team-ab - requestableResources: + resources: - name: "cpu" flavors: - name: default @@ -295,7 +295,7 @@ metadata: spec: namespaceSelector: {} cohort: team-ab - requestableResources: + resources: - name: "cpu" flavors: - name: arm @@ -321,7 +321,7 @@ metadata: spec: namespaceSelector: {} cohort: team-ab - requestableResources: + resources: - name: "cpu" flavors: - name: arm @@ -347,7 +347,7 @@ metadata: spec: namespaceSelector: {} cohort: team-ab - requestableResources: + resources: - name: "cpu" flavors: - name: x86 diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 9e0990a5c3..669cb14667 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -115,15 +115,15 @@ func (c *Cache) newClusterQueue(cq *kueue.ClusterQueue) (*ClusterQueue, error) { } func (c *ClusterQueue) update(in *kueue.ClusterQueue, resourceFlavors map[string]*kueue.ResourceFlavor) error { - c.RequestableResources = resourceLimitsByName(in.Spec.RequestableResources) + c.RequestableResources = resourceLimitsByName(in.Spec.Resources) nsSelector, err := metav1.LabelSelectorAsSelector(in.Spec.NamespaceSelector) if err != nil { return err } c.NamespaceSelector = nsSelector - usedResources := make(Resources, len(in.Spec.RequestableResources)) - for _, r := range in.Spec.RequestableResources { + usedResources := make(Resources, len(in.Spec.Resources)) + for _, r := range in.Spec.Resources { if len(r.Flavors) == 0 { continue } diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index 4f0615256b..e2d86ce01a 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -42,7 +42,7 @@ func TestCacheClusterQueueOperations(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{Name: "a"}, Spec: kueue.ClusterQueueSpec{ - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: corev1.ResourceCPU, Flavors: []kueue.Flavor{{ @@ -60,7 +60,7 @@ func TestCacheClusterQueueOperations(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{Name: "b"}, Spec: kueue.ClusterQueueSpec{ - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: corev1.ResourceCPU, Flavors: []kueue.Flavor{{ @@ -203,7 +203,7 @@ func TestCacheClusterQueueOperations(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{Name: "a"}, Spec: kueue.ClusterQueueSpec{ - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: corev1.ResourceCPU, Flavors: []kueue.Flavor{ @@ -336,7 +336,7 @@ func TestCacheWorkloadOperations(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{Name: "one"}, Spec: kueue.ClusterQueueSpec{ - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: "cpu", Flavors: []kueue.Flavor{ @@ -350,7 +350,7 @@ func TestCacheWorkloadOperations(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{Name: "two"}, Spec: kueue.ClusterQueueSpec{ - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: "cpu", Flavors: []kueue.Flavor{ @@ -796,7 +796,7 @@ func TestClusterQueueUsage(t *testing.T) { cq := kueue.ClusterQueue{ ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: kueue.ClusterQueueSpec{ - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: corev1.ResourceCPU, Flavors: []kueue.Flavor{ diff --git a/pkg/cache/snapshot_test.go b/pkg/cache/snapshot_test.go index 6f230fd678..e06503c962 100644 --- a/pkg/cache/snapshot_test.go +++ b/pkg/cache/snapshot_test.go @@ -48,7 +48,7 @@ func TestSnapshot(t *testing.T) { }, Spec: kueue.ClusterQueueSpec{ Cohort: "foo", - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: corev1.ResourceCPU, Flavors: []kueue.Flavor{ @@ -75,7 +75,7 @@ func TestSnapshot(t *testing.T) { }, Spec: kueue.ClusterQueueSpec{ Cohort: "foo", - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: corev1.ResourceCPU, Flavors: []kueue.Flavor{ @@ -106,7 +106,7 @@ func TestSnapshot(t *testing.T) { Name: "bar", }, Spec: kueue.ClusterQueueSpec{ - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: corev1.ResourceCPU, Flavors: []kueue.Flavor{ diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 3791afecef..0442bdcf17 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -70,7 +70,7 @@ func TestSchedule(t *testing.T) { }, }, QueueingStrategy: kueue.StrictFIFO, - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: corev1.ResourceCPU, Flavors: []kueue.Flavor{ @@ -100,7 +100,7 @@ func TestSchedule(t *testing.T) { }, }, QueueingStrategy: kueue.StrictFIFO, - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: corev1.ResourceCPU, Flavors: []kueue.Flavor{ @@ -137,7 +137,7 @@ func TestSchedule(t *testing.T) { }, }, QueueingStrategy: kueue.StrictFIFO, - RequestableResources: []kueue.Resource{ + Resources: []kueue.Resource{ { Name: corev1.ResourceCPU, Flavors: []kueue.Flavor{ diff --git a/pkg/util/testing/wrappers.go b/pkg/util/testing/wrappers.go index 7ecf6a0390..c392897346 100644 --- a/pkg/util/testing/wrappers.go +++ b/pkg/util/testing/wrappers.go @@ -269,7 +269,7 @@ func (c *ClusterQueueWrapper) Cohort(cohort string) *ClusterQueueWrapper { // Resource adds a resource with flavors. func (c *ClusterQueueWrapper) Resource(r *kueue.Resource) *ClusterQueueWrapper { - c.Spec.RequestableResources = append(c.Spec.RequestableResources, *r) + c.Spec.Resources = append(c.Spec.Resources, *r) return c } @@ -285,10 +285,10 @@ func (c *ClusterQueueWrapper) NamespaceSelector(s *metav1.LabelSelector) *Cluste return c } -// ResourceWrapper wraps a requestable resource. +// ResourceWrapper wraps a resource. type ResourceWrapper struct{ kueue.Resource } -// MakeResource creates a wrapper for a requestable resource. +// MakeResource creates a wrapper for a resource. func MakeResource(name corev1.ResourceName) *ResourceWrapper { return &ResourceWrapper{kueue.Resource{ Name: name, diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 003d0a57a8..5f10290957 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -502,7 +502,7 @@ var _ = ginkgo.Describe("Scheduler", func() { gomega.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: devBEClusterQ.Name}, devCq)).Should(gomega.Succeed()) updatedResource := testing.MakeResource(corev1.ResourceCPU).Flavor(testing.MakeFlavor(onDemandFlavor.Name, "13").Max("13").Obj()).Obj() - devCq.Spec.RequestableResources = []kueue.Resource{*updatedResource} + devCq.Spec.Resources = []kueue.Resource{*updatedResource} gomega.Expect(k8sClient.Update(ctx, devCq)).Should(gomega.Succeed()) gomega.Eventually(func() *bool {