Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature] Support Kubeflow MPIJob in MultiKueue #2880

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 14 additions & 3 deletions Makefile-deps.mk
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,19 @@ mpi-operator-crd: ## Copy the CRDs from the mpi-operator to the dep-crds directo
KF_TRAINING_ROOT = $(shell $(GO_CMD) list -m -mod=readonly -f "{{.Dir}}" github.com/kubeflow/training-operator)
.PHONY: kf-training-operator-crd
kf-training-operator-crd: ## Copy the CRDs from the training-operator to the dep-crds directory.
mkdir -p $(EXTERNAL_CRDS_DIR)/training-operator/
cp -f $(KF_TRAINING_ROOT)/manifests/base/crds/* $(EXTERNAL_CRDS_DIR)/training-operator/
## Removing kubeflow.org_mpijobs.yaml is required as the version of MPIJob is conflicting between training-operator and mpi-operator - in integration tests.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why is it only a problem for integration and not E2E?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let me explain.
Integration tests suit requires to specify the CRD directory and it doesn't care about the customise.yaml there, only plain CRDs -

DepCRDPaths: []string{filepath.Join("..", "..", "..", "dep-crds", "jobset-operator"), filepath.Join("..", "..", "..", "dep-crds", "training-operator")},

Thus we can't have MPIJob CRD from training-operator there, we need to physically remove the file from the folder.

E2E doesn't care about the MPIJob file, because we have our own kustomize in test/e2e/config/multikueue, where we patch it so MPIJob CRD is deleted.

As you can see we have different needs depending on the test, that can't be satisfied with the current approach.
Previously I had combined the two by using sed to remove MPIJob file entry from original kustomize and also delete the fie from the folder.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the details!

It makes me think that we need a note on this page https://kueue.sigs.k8s.io/docs/tasks/run/kubeflow/mpijobs/ explaining that you need to disable the MPI from training-operator if using both.

mkdir -p $(EXTERNAL_CRDS_DIR)/training-operator-crds/
find $(KF_TRAINING_ROOT)/manifests/base/crds/* -type f -not -name "kubeflow.org_mpijobs.yaml" -exec cp -pf {} $(EXTERNAL_CRDS_DIR)/training-operator-crds/ \;

.PHONY: kf-training-operator-manifests
kf-training-operator-manifests: ## Copy whole manifests folder from the training-operator to the dep-crds directory.
## Full version of the manifest is required for e2e multikueue tests.
if [ -d "$(EXTERNAL_CRDS_DIR)/training-operator" ]; then \
chmod -R u+w "$(EXTERNAL_CRDS_DIR)/training-operator" && \
rm -rf "$(EXTERNAL_CRDS_DIR)/training-operator"; \
fi
mkdir -p "$(EXTERNAL_CRDS_DIR)/training-operator"
cp -rf "$(KF_TRAINING_ROOT)/manifests" "$(EXTERNAL_CRDS_DIR)/training-operator"

RAY_ROOT = $(shell $(GO_CMD) list -m -mod=readonly -f "{{.Dir}}" github.com/ray-project/kuberay/ray-operator)
.PHONY: ray-operator-crd
Expand All @@ -133,7 +144,7 @@ cluster-autoscaler-crd: ## Copy the CRDs from the cluster-autoscaler to the dep-
cp -f $(CLUSTER_AUTOSCALER_ROOT)/config/crd/* $(EXTERNAL_CRDS_DIR)/cluster-autoscaler/

.PHONY: dep-crds
dep-crds: mpi-operator-crd kf-training-operator-crd ray-operator-crd jobset-operator-crd cluster-autoscaler-crd ## Copy the CRDs from the external operators to the dep-crds directory.
dep-crds: mpi-operator-crd kf-training-operator-crd ray-operator-crd jobset-operator-crd cluster-autoscaler-crd kf-training-operator-manifests ## Copy the CRDs from the external operators to the dep-crds directory.
@echo "Copying CRDs from external operators to dep-crds directory"

.PHONY: kueuectl-docs
Expand Down
11 changes: 6 additions & 5 deletions Makefile-test.mk
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ IMAGE_TAG ?= $(IMAGE_REPO):$(GIT_TAG)
# JobSet Version
JOBSET_VERSION = $(shell $(GO_CMD) list -m -f "{{.Version}}" sigs.k8s.io/jobset)
KUBEFLOW_VERSION = $(shell $(GO_CMD) list -m -f "{{.Version}}" github.com/kubeflow/training-operator)
KUBEFLOW_MPI_VERSION = $(shell $(GO_CMD) list -m -f "{{.Version}}" github.com/kubeflow/mpi-operator)

##@ Tests

Expand All @@ -67,7 +68,7 @@ test: gotestsum ## Run tests.
$(GOTESTSUM) --junitfile $(ARTIFACTS)/junit.xml -- $(GO_TEST_FLAGS) $(shell $(GO_CMD) list ./... | grep -v '/test/') -coverpkg=./... -coverprofile $(ARTIFACTS)/cover.out

.PHONY: test-integration
test-integration: gomod-download envtest ginkgo mpi-operator-crd ray-operator-crd jobset-operator-crd kf-training-operator-crd cluster-autoscaler-crd kueuectl ## Run tests.
test-integration: gomod-download envtest ginkgo dep-crds kueuectl ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" \
KUEUE_BIN=$(PROJECT_DIR)/bin \
ENVTEST_K8S_VERSION=$(ENVTEST_K8S_VERSION) \
Expand All @@ -76,10 +77,10 @@ test-integration: gomod-download envtest ginkgo mpi-operator-crd ray-operator-cr

CREATE_KIND_CLUSTER ?= true
.PHONY: test-e2e
test-e2e: kustomize ginkgo yq gomod-download jobset-operator-crd kf-training-operator-crd kueuectl run-test-e2e-$(E2E_KIND_VERSION:kindest/node:v%=%)
test-e2e: kustomize ginkgo yq gomod-download dep-crds kueuectl run-test-e2e-$(E2E_KIND_VERSION:kindest/node:v%=%)

.PHONY: test-multikueue-e2e
test-multikueue-e2e: kustomize ginkgo yq gomod-download jobset-operator-crd kf-training-operator-crd run-test-multikueue-e2e-$(E2E_KIND_VERSION:kindest/node:v%=%)
test-multikueue-e2e: kustomize ginkgo yq gomod-download dep-crds run-test-multikueue-e2e-$(E2E_KIND_VERSION:kindest/node:v%=%)


E2E_TARGETS := $(addprefix run-test-e2e-,${E2E_K8S_VERSIONS})
Expand All @@ -92,12 +93,12 @@ FORCE:
run-test-e2e-%: K8S_VERSION = $(@:run-test-e2e-%=%)
run-test-e2e-%: FORCE
@echo Running e2e for k8s ${K8S_VERSION}
E2E_KIND_VERSION="kindest/node:v$(K8S_VERSION)" KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) CREATE_KIND_CLUSTER=$(CREATE_KIND_CLUSTER) ARTIFACTS="$(ARTIFACTS)/$@" IMAGE_TAG=$(IMAGE_TAG) GINKGO_ARGS="$(GINKGO_ARGS)" JOBSET_VERSION=$(JOBSET_VERSION) KUBEFLOW_VERSION=$(KUBEFLOW_VERSION) ./hack/e2e-test.sh
E2E_KIND_VERSION="kindest/node:v$(K8S_VERSION)" KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) CREATE_KIND_CLUSTER=$(CREATE_KIND_CLUSTER) ARTIFACTS="$(ARTIFACTS)/$@" IMAGE_TAG=$(IMAGE_TAG) GINKGO_ARGS="$(GINKGO_ARGS)" JOBSET_VERSION=$(JOBSET_VERSION) KUBEFLOW_VERSION=$(KUBEFLOW_VERSION) KUBEFLOW_MPI_VERSION=$(KUBEFLOW_MPI_VERSION) ./hack/e2e-test.sh

run-test-multikueue-e2e-%: K8S_VERSION = $(@:run-test-multikueue-e2e-%=%)
run-test-multikueue-e2e-%: FORCE
@echo Running multikueue e2e for k8s ${K8S_VERSION}
E2E_KIND_VERSION="kindest/node:v$(K8S_VERSION)" KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) CREATE_KIND_CLUSTER=$(CREATE_KIND_CLUSTER) ARTIFACTS="$(ARTIFACTS)/$@" IMAGE_TAG=$(IMAGE_TAG) GINKGO_ARGS="$(GINKGO_ARGS)" JOBSET_VERSION=$(JOBSET_VERSION) KUBEFLOW_VERSION=$(KUBEFLOW_VERSION) ./hack/multikueue-e2e-test.sh
E2E_KIND_VERSION="kindest/node:v$(K8S_VERSION)" KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) CREATE_KIND_CLUSTER=$(CREATE_KIND_CLUSTER) ARTIFACTS="$(ARTIFACTS)/$@" IMAGE_TAG=$(IMAGE_TAG) GINKGO_ARGS="$(GINKGO_ARGS)" JOBSET_VERSION=$(JOBSET_VERSION) KUBEFLOW_VERSION=$(KUBEFLOW_VERSION) KUBEFLOW_MPI_VERSION=$(KUBEFLOW_MPI_VERSION) ./hack/multikueue-e2e-test.sh

SCALABILITY_RUNNER := $(PROJECT_DIR)/bin/performance-scheduler-runner
.PHONY: performance-scheduler-runner
Expand Down
1 change: 1 addition & 0 deletions charts/kueue/templates/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,7 @@ rules:
- mpijobs/status
verbs:
- get
- patch
- update
- apiGroups:
- kubeflow.org
Expand Down
1 change: 1 addition & 0 deletions config/components/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,7 @@ rules:
- mpijobs/status
verbs:
- get
- patch
- update
- apiGroups:
- kubeflow.org
Expand Down
24 changes: 18 additions & 6 deletions hack/e2e-common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,15 @@ export JOBSET_MANIFEST="https://github.com/kubernetes-sigs/jobset/releases/downl
export JOBSET_IMAGE=registry.k8s.io/jobset/jobset:${JOBSET_VERSION}
export JOBSET_CRDS=${ROOT_DIR}/dep-crds/jobset-operator/

export KUBEFLOW_MANIFEST="https://github.com/kubeflow/training-operator/manifests/overlays/standalone?ref=${KUBEFLOW_VERSION}"
#no matching semver tag unfortunately
export KUBEFLOW_IMAGE=kubeflow/training-operator:v1-855e096
export KUBEFLOW_CRDS=${ROOT_DIR}/dep-crds/training-operator/
export KUBEFLOW_MANIFEST_MANAGER=${ROOT_DIR}/test/e2e/config/multikueue/manager
export KUBEFLOW_MANIFEST_WORKER=${ROOT_DIR}/test/e2e/config/multikueue/worker
KUBEFLOW_IMAGE_VERSION=$($KUSTOMIZE build "$KUBEFLOW_MANIFEST_WORKER" | $YQ e 'select(.kind == "Deployment") | .spec.template.spec.containers[0].image | split(":") | .[1]')
export KUBEFLOW_IMAGE_VERSION
export KUBEFLOW_IMAGE=kubeflow/training-operator:${KUBEFLOW_IMAGE_VERSION}

export KUBEFLOW_MPI_MANIFEST="https://raw.githubusercontent.com/kubeflow/mpi-operator/${KUBEFLOW_MPI_VERSION}/deploy/v2beta1/mpi-operator.yaml"
export KUBEFLOW_MPI_IMAGE=mpioperator/mpi-operator:${KUBEFLOW_MPI_VERSION/#v}
export KUBEFLOW_MPI_CRD=${ROOT_DIR}/dep-crds/mpi-operator/kubeflow.org_mpijobs.yaml

# $1 - cluster name
function cluster_cleanup {
Expand Down Expand Up @@ -74,9 +79,16 @@ function install_jobset {

#$1 - cluster name
function install_kubeflow {
cluster_kind_load_image "${1}" ${KUBEFLOW_IMAGE}
cluster_kind_load_image "${1}" "${KUBEFLOW_IMAGE}"
kubectl config use-context "kind-${1}"
kubectl apply -k "${KUBEFLOW_MANIFEST_WORKER}"
}

#$1 - cluster name
function install_mpi {
cluster_kind_load_image "${1}" "${KUBEFLOW_MPI_IMAGE/#v}"
kubectl config use-context "kind-${1}"
kubectl apply -k "${KUBEFLOW_MANIFEST}"
kubectl apply --server-side -f "${KUBEFLOW_MPI_MANIFEST}"
}

INITIAL_IMAGE=$($YQ '.images[] | select(.name == "controller") | [.newName, .newTag] | join(":")' config/components/manager/kustomization.yaml)
Expand Down
17 changes: 15 additions & 2 deletions hack/multikueue-e2e-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ function cleanup {
fi
#do the image restore here for the case when an error happened during deploy
restore_managers_image
# Remove the `newTag` for the `kubeflow/training-operator` to revert to the default version
$YQ eval 'del(.images[] | select(.name == "kubeflow/training-operator").newTag)' -i "$KUBEFLOW_MANIFEST_MANAGER/kustomization.yaml"
}


Expand Down Expand Up @@ -80,16 +82,27 @@ function kind_load {
install_jobset "$WORKER2_KIND_CLUSTER_NAME"

# KUBEFLOW SETUP
# In order for MPI-operator and Training-operator to work on the same cluster it is required that:
# 1. 'kubeflow.org_mpijobs.yaml' is removed from base/crds/kustomization.yaml - https://github.com/kubeflow/training-operator/issues/1930
# 2. Training-operator deployment is modified to enable all kubeflow jobs except for mpi - https://github.com/kubeflow/training-operator/issues/1777

# Modify the `newTag` for the `kubeflow/training-operator` to use the one training-operator version
$YQ eval '(.images[] | select(.name == "kubeflow/training-operator").newTag) = env(KUBEFLOW_IMAGE_VERSION)' -i "$KUBEFLOW_MANIFEST_MANAGER/kustomization.yaml"
Copy link
Contributor

@mbobrovskyi mbobrovskyi Sep 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are you reverting these changes after testing?

Like here:

(cd config/components/manager && $KUSTOMIZE edit set image controller="$IMAGE_TAG")

kueue/hack/e2e-common.sh

Lines 97 to 99 in 467b4ee

function restore_managers_image {
(cd config/components/manager && $KUSTOMIZE edit set image controller="$INITIAL_IMAGE")
}

If not, users may accidentally commit unnecessary changes to this file.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't, good point I will add the restore

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

isn't this file outside of the tracked files?

Copy link
Contributor

@mbobrovskyi mbobrovskyi Sep 12, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No. It's on the git and yq will edit this file.

# MANAGER
# Only install the CRDs and not the controller to be able to
# have Kubeflow Jobs admitted without execution in the manager cluster.
kubectl config use-context "kind-${MANAGER_KIND_CLUSTER_NAME}"
kubectl apply -k "${KUBEFLOW_CRDS}"
kubectl apply -k "${KUBEFLOW_MANIFEST_MANAGER}"
## MPI
kubectl apply --server-side -f "${KUBEFLOW_MPI_CRD}"

# WORKERS
docker pull kubeflow/training-operator:v1-855e096
docker pull "${KUBEFLOW_IMAGE}"
docker pull "${KUBEFLOW_MPI_IMAGE}"
install_kubeflow "$WORKER1_KIND_CLUSTER_NAME"
install_kubeflow "$WORKER2_KIND_CLUSTER_NAME"
install_mpi "$WORKER1_KIND_CLUSTER_NAME"
install_mpi "$WORKER2_KIND_CLUSTER_NAME"

fi
}
Expand Down
10 changes: 5 additions & 5 deletions pkg/controller/jobframework/reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (

"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
kubeflow "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1"
kfmpi "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -59,7 +59,7 @@ func TestIsParentJobManaged(t *testing.T) {
},
"child job has ownerReference with known non-existing workload owner": {
job: testingjob.MakeJob(childJobName, jobNamespace).
OwnerReference(parentJobName, kubeflow.SchemeGroupVersionKind).
OwnerReference(parentJobName, kfmpi.SchemeGroupVersionKind).
Obj(),
wantErr: ErrWorkloadOwnerNotFound,
},
Expand All @@ -69,7 +69,7 @@ func TestIsParentJobManaged(t *testing.T) {
Queue("test-q").
Obj(),
job: testingjob.MakeJob(childJobName, jobNamespace).
OwnerReference(parentJobName, kubeflow.SchemeGroupVersionKind).
OwnerReference(parentJobName, kfmpi.SchemeGroupVersionKind).
Obj(),
wantManaged: true,
},
Expand All @@ -78,14 +78,14 @@ func TestIsParentJobManaged(t *testing.T) {
UID(parentJobName).
Obj(),
job: testingjob.MakeJob(childJobName, jobNamespace).
OwnerReference(parentJobName, kubeflow.SchemeGroupVersionKind).
OwnerReference(parentJobName, kfmpi.SchemeGroupVersionKind).
Obj(),
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
t.Cleanup(EnableIntegrationsForTest(t, "kubeflow.org/mpijob"))
builder := utiltesting.NewClientBuilder(kubeflow.AddToScheme)
builder := utiltesting.NewClientBuilder(kfmpi.AddToScheme)
if tc.parentJob != nil {
builder = builder.WithObjects(tc.parentJob)
}
Expand Down
16 changes: 8 additions & 8 deletions pkg/controller/jobframework/setup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (

"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
kubeflow "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1"
kfmpi "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1"
kftraining "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1"
rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1"
batchv1 "k8s.io/api/batch/v1"
Expand Down Expand Up @@ -58,7 +58,7 @@ func TestSetupControllers(t *testing.T) {
"kubeflow.org/mpijob": {
NewReconciler: testNewReconciler,
SetupWebhook: testSetupWebhook,
JobType: &kubeflow.MPIJob{},
JobType: &kfmpi.MPIJob{},
SetupIndexes: testSetupIndexes,
AddToScheme: testAddToScheme,
CanSupportIntegration: testCanSupportIntegration,
Expand Down Expand Up @@ -98,7 +98,7 @@ func TestSetupControllers(t *testing.T) {
},
mapperGVKs: []schema.GroupVersionKind{
batchv1.SchemeGroupVersion.WithKind("Job"),
kubeflow.SchemeGroupVersionKind,
kfmpi.SchemeGroupVersionKind,
},
wantEnabledIntegrations: []string{"batch/job", "kubeflow.org/mpijob"},
},
Expand All @@ -117,7 +117,7 @@ func TestSetupControllers(t *testing.T) {
},
mapperGVKs: []schema.GroupVersionKind{
batchv1.SchemeGroupVersion.WithKind("Job"),
kubeflow.SchemeGroupVersionKind,
kfmpi.SchemeGroupVersionKind,
// Not including RayCluster
},
delayedGVKs: []*schema.GroupVersionKind{
Expand All @@ -137,7 +137,7 @@ func TestSetupControllers(t *testing.T) {
}

ctx, logger := utiltesting.ContextWithLog(t)
k8sClient := utiltesting.NewClientBuilder(jobset.AddToScheme, kubeflow.AddToScheme, kftraining.AddToScheme, rayv1.AddToScheme).Build()
k8sClient := utiltesting.NewClientBuilder(jobset.AddToScheme, kfmpi.AddToScheme, kftraining.AddToScheme, rayv1.AddToScheme).Build()

mgrOpts := ctrlmgr.Options{
Scheme: k8sClient.Scheme(),
Expand Down Expand Up @@ -245,16 +245,16 @@ func TestSetupIndexes(t *testing.T) {
"kubeflow.org/mpijob is disabled in the configAPI": {
workloads: []kueue.Workload{
*utiltesting.MakeWorkload("alpha-wl", testNamespace).
ControllerReference(kubeflow.SchemeGroupVersionKind, "alpha", "mpijob").
ControllerReference(kfmpi.SchemeGroupVersionKind, "alpha", "mpijob").
Obj(),
*utiltesting.MakeWorkload("beta-wl", testNamespace).
ControllerReference(kubeflow.SchemeGroupVersionKind, "beta", "mpijob").
ControllerReference(kfmpi.SchemeGroupVersionKind, "beta", "mpijob").
Obj(),
},
opts: []Option{
WithEnabledFrameworks([]string{"batch/job"}),
},
filter: client.MatchingFields{GetOwnerKey(kubeflow.SchemeGroupVersionKind): "alpha"},
filter: client.MatchingFields{GetOwnerKey(kfmpi.SchemeGroupVersionKind): "alpha"},
wantFieldMatcherError: true,
},
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/controller/jobframework/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"fmt"
"strings"

kfmpi "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1"
kftraining "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1"
batchv1 "k8s.io/api/batch/v1"
apivalidation "k8s.io/apimachinery/pkg/api/validation"
Expand All @@ -42,7 +43,8 @@ var (
kftraining.SchemeGroupVersion.WithKind(kftraining.TFJobKind).String(),
kftraining.SchemeGroupVersion.WithKind(kftraining.PaddleJobKind).String(),
kftraining.SchemeGroupVersion.WithKind(kftraining.PyTorchJobKind).String(),
kftraining.SchemeGroupVersion.WithKind(kftraining.XGBoostJobKind).String())
kftraining.SchemeGroupVersion.WithKind(kftraining.XGBoostJobKind).String(),
kfmpi.SchemeGroupVersion.WithKind(kfmpi.Kind).String())
)

// ValidateJobOnCreate encapsulates all GenericJob validations that must be performed on a Create operation
Expand Down
Loading