Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature] Support Kubeflow MPIJob in MultiKueue #2880

Merged
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion Makefile-deps.mk
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,10 @@ KF_TRAINING_ROOT = $(shell $(GO_CMD) list -m -mod=readonly -f "{{.Dir}}" github.
.PHONY: kf-training-operator-crd
kf-training-operator-crd: ## Copy the CRDs from the training-operator to the dep-crds directory.
mkdir -p $(EXTERNAL_CRDS_DIR)/training-operator/
cp -f $(KF_TRAINING_ROOT)/manifests/base/crds/* $(EXTERNAL_CRDS_DIR)/training-operator/
# Remove `kubeflow.org_mpijobs.yaml` version v1 from the kustomization.yaml, kueue uses v2beta1 CRD from mpi-operator
cp -prf $(KF_TRAINING_ROOT)/manifests/* $(EXTERNAL_CRDS_DIR)/training-operator/
chmod -R u+w "${EXTERNAL_CRDS_DIR}/training-operator/"
sed -i '/kubeflow.org_mpijobs.yaml/d' $(EXTERNAL_CRDS_DIR)/training-operator/base/crds/kustomization.yaml

RAY_ROOT = $(shell $(GO_CMD) list -m -mod=readonly -f "{{.Dir}}" github.com/ray-project/kuberay/ray-operator)
.PHONY: ray-operator-crd
Expand Down
9 changes: 5 additions & 4 deletions Makefile-test.mk
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ IMAGE_TAG ?= $(IMAGE_REPO):$(GIT_TAG)
# JobSet Version
JOBSET_VERSION = $(shell $(GO_CMD) list -m -f "{{.Version}}" sigs.k8s.io/jobset)
KUBEFLOW_VERSION = $(shell $(GO_CMD) list -m -f "{{.Version}}" github.com/kubeflow/training-operator)
KUBEFLOW_MPI_VERSION = $(shell $(GO_CMD) list -m -f "{{.Version}}" github.com/kubeflow/mpi-operator)

##@ Tests

Expand All @@ -76,10 +77,10 @@ test-integration: gomod-download envtest ginkgo mpi-operator-crd ray-operator-cr

CREATE_KIND_CLUSTER ?= true
.PHONY: test-e2e
test-e2e: kustomize ginkgo yq gomod-download jobset-operator-crd kf-training-operator-crd kueuectl run-test-e2e-$(E2E_KIND_VERSION:kindest/node:v%=%)
test-e2e: kustomize ginkgo yq gomod-download dep-crds kueuectl run-test-e2e-$(E2E_KIND_VERSION:kindest/node:v%=%)

.PHONY: test-multikueue-e2e
test-multikueue-e2e: kustomize ginkgo yq gomod-download jobset-operator-crd kf-training-operator-crd run-test-multikueue-e2e-$(E2E_KIND_VERSION:kindest/node:v%=%)
test-multikueue-e2e: kustomize ginkgo yq gomod-download dep-crds run-test-multikueue-e2e-$(E2E_KIND_VERSION:kindest/node:v%=%)


E2E_TARGETS := $(addprefix run-test-e2e-,${E2E_K8S_VERSIONS})
Expand All @@ -92,12 +93,12 @@ FORCE:
run-test-e2e-%: K8S_VERSION = $(@:run-test-e2e-%=%)
run-test-e2e-%: FORCE
@echo Running e2e for k8s ${K8S_VERSION}
E2E_KIND_VERSION="kindest/node:v$(K8S_VERSION)" KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) CREATE_KIND_CLUSTER=$(CREATE_KIND_CLUSTER) ARTIFACTS="$(ARTIFACTS)/$@" IMAGE_TAG=$(IMAGE_TAG) GINKGO_ARGS="$(GINKGO_ARGS)" JOBSET_VERSION=$(JOBSET_VERSION) KUBEFLOW_VERSION=$(KUBEFLOW_VERSION) ./hack/e2e-test.sh
E2E_KIND_VERSION="kindest/node:v$(K8S_VERSION)" KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) CREATE_KIND_CLUSTER=$(CREATE_KIND_CLUSTER) ARTIFACTS="$(ARTIFACTS)/$@" IMAGE_TAG=$(IMAGE_TAG) GINKGO_ARGS="$(GINKGO_ARGS)" JOBSET_VERSION=$(JOBSET_VERSION) KUBEFLOW_VERSION=$(KUBEFLOW_VERSION) KUBEFLOW_MPI_VERSION=$(KUBEFLOW_MPI_VERSION) ./hack/e2e-test.sh

run-test-multikueue-e2e-%: K8S_VERSION = $(@:run-test-multikueue-e2e-%=%)
run-test-multikueue-e2e-%: FORCE
@echo Running multikueue e2e for k8s ${K8S_VERSION}
E2E_KIND_VERSION="kindest/node:v$(K8S_VERSION)" KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) CREATE_KIND_CLUSTER=$(CREATE_KIND_CLUSTER) ARTIFACTS="$(ARTIFACTS)/$@" IMAGE_TAG=$(IMAGE_TAG) GINKGO_ARGS="$(GINKGO_ARGS)" JOBSET_VERSION=$(JOBSET_VERSION) KUBEFLOW_VERSION=$(KUBEFLOW_VERSION) ./hack/multikueue-e2e-test.sh
E2E_KIND_VERSION="kindest/node:v$(K8S_VERSION)" KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) CREATE_KIND_CLUSTER=$(CREATE_KIND_CLUSTER) ARTIFACTS="$(ARTIFACTS)/$@" IMAGE_TAG=$(IMAGE_TAG) GINKGO_ARGS="$(GINKGO_ARGS)" JOBSET_VERSION=$(JOBSET_VERSION) KUBEFLOW_VERSION=$(KUBEFLOW_VERSION) KUBEFLOW_MPI_VERSION=$(KUBEFLOW_MPI_VERSION) ./hack/multikueue-e2e-test.sh

SCALABILITY_RUNNER := $(PROJECT_DIR)/bin/performance-scheduler-runner
.PHONY: performance-scheduler-runner
Expand Down
1 change: 1 addition & 0 deletions charts/kueue/templates/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,7 @@ rules:
- mpijobs/status
verbs:
- get
- patch
- update
- apiGroups:
- kubeflow.org
Expand Down
1 change: 1 addition & 0 deletions config/components/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,7 @@ rules:
- mpijobs/status
verbs:
- get
- patch
- update
- apiGroups:
- kubeflow.org
Expand Down
44 changes: 41 additions & 3 deletions hack/e2e-common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,15 @@ export JOBSET_MANIFEST="https://github.com/kubernetes-sigs/jobset/releases/downl
export JOBSET_IMAGE=registry.k8s.io/jobset/jobset:${JOBSET_VERSION}
export JOBSET_CRDS=${ROOT_DIR}/dep-crds/jobset-operator/

export KUBEFLOW_MANIFEST="https://github.com/kubeflow/training-operator/manifests/overlays/standalone?ref=${KUBEFLOW_VERSION}"
#no matching semver tag unfortunately
export KUBEFLOW_IMAGE=kubeflow/training-operator:v1-855e096
export KUBEFLOW_CRDS=${ROOT_DIR}/dep-crds/training-operator/
export KUBEFLOW_CRDS=${ROOT_DIR}/dep-crds/training-operator
export KUBEFLOW_CRDS_BASE=${KUBEFLOW_CRDS}/base/crds
export KUBEFLOW_MANIFEST=${KUBEFLOW_CRDS}/overlays/standalone

export KUBEFLOW_MPI_MANIFEST="https://raw.githubusercontent.com/kubeflow/mpi-operator/${KUBEFLOW_MPI_VERSION}/deploy/v2beta1/mpi-operator.yaml"
export KUBEFLOW_MPI_IMAGE=mpioperator/mpi-operator:${KUBEFLOW_MPI_VERSION/#v}
export KUBEFLOW_MPI_CRD=${ROOT_DIR}/dep-crds/mpi-operator/kubeflow.org_mpijobs.yaml

# $1 - cluster name
function cluster_cleanup {
Expand Down Expand Up @@ -72,13 +77,46 @@ function install_jobset {
kubectl apply --server-side -f "${JOBSET_MANIFEST}"
}

function patch_kubeflow_manifest {
# In order for MPI-operator and Training-operator to work on the same cluster it is required that:
# 1. 'kubeflow.org_mpijobs.yaml' is removed from base/crds/kustomization.yaml - https://github.com/kubeflow/training-operator/issues/1930
# Done already in Makefile-deps.mk target `kf-training-operator-crd`
# 2. Training-operator deployment file is patched and manually enabled for all kubeflow jobs except for mpi - https://github.com/kubeflow/training-operator/issues/1777
KUBEFLOW_DEPLOYMENT="${KUBEFLOW_CRDS}/base/deployment.yaml"

# Find the line after which to insert the args
INSERT_LINE=$(grep -n "^ *- /manager" "${KUBEFLOW_DEPLOYMENT}" | head -n 1 | cut -d ':' -f 1)

# Prepare patch with the args after the specified line
# EOF must be indented most left - doesn't work otherwise
ARGS_PATCH=$(cat <<EOF
args:
- --enable-scheme=tfjob
- --enable-scheme=pytorchjob
- --enable-scheme=xgboostjob
- --enable-scheme=paddlejob
EOF
)
# Apply the patch
sed -i -e "${INSERT_LINE}r /dev/stdin" "${KUBEFLOW_DEPLOYMENT}" << EOF
${ARGS_PATCH}
EOF
}

#$1 - cluster name
function install_kubeflow {
cluster_kind_load_image "${1}" ${KUBEFLOW_IMAGE}
cluster_kind_load_image "${1}" "${KUBEFLOW_IMAGE}"
kubectl config use-context "kind-${1}"
kubectl apply -k "${KUBEFLOW_MANIFEST}"
}

#$1 - cluster name
function install_mpi {
cluster_kind_load_image "${1}" "${KUBEFLOW_MPI_IMAGE/#v}"
kubectl config use-context "kind-${1}"
kubectl apply --server-side -f "${KUBEFLOW_MPI_MANIFEST}"
}

INITIAL_IMAGE=$($YQ '.images[] | select(.name == "controller") | [.newName, .newTag] | join(":")' config/components/manager/kustomization.yaml)
export INITIAL_IMAGE

Expand Down
8 changes: 7 additions & 1 deletion hack/multikueue-e2e-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -84,12 +84,18 @@ function kind_load {
# Only install the CRDs and not the controller to be able to
# have Kubeflow Jobs admitted without execution in the manager cluster.
kubectl config use-context "kind-${MANAGER_KIND_CLUSTER_NAME}"
kubectl apply -k "${KUBEFLOW_CRDS}"
kubectl apply -k "${KUBEFLOW_CRDS_BASE}"
## MPI
kubectl apply --server-side -f "${KUBEFLOW_MPI_CRD}"

# WORKERS
docker pull kubeflow/training-operator:v1-855e096
docker pull "mpioperator/mpi-operator:${KUBEFLOW_MPI_VERSION/#v}"
patch_kubeflow_manifest
install_kubeflow "$WORKER1_KIND_CLUSTER_NAME"
install_kubeflow "$WORKER2_KIND_CLUSTER_NAME"
install_mpi "$WORKER1_KIND_CLUSTER_NAME"
install_mpi "$WORKER2_KIND_CLUSTER_NAME"

fi
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/controller/jobframework/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"fmt"
"strings"

kubeflow "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1"
kftraining "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1"
batchv1 "k8s.io/api/batch/v1"
apivalidation "k8s.io/apimachinery/pkg/api/validation"
Expand All @@ -42,7 +43,8 @@ var (
kftraining.SchemeGroupVersion.WithKind(kftraining.TFJobKind).String(),
kftraining.SchemeGroupVersion.WithKind(kftraining.PaddleJobKind).String(),
kftraining.SchemeGroupVersion.WithKind(kftraining.PyTorchJobKind).String(),
kftraining.SchemeGroupVersion.WithKind(kftraining.XGBoostJobKind).String())
kftraining.SchemeGroupVersion.WithKind(kftraining.XGBoostJobKind).String(),
kubeflow.SchemeGroupVersion.WithKind(kubeflow.Kind).String())
)

// ValidateJobOnCreate encapsulates all GenericJob validations that must be performed on a Create operation
Expand Down
5 changes: 3 additions & 2 deletions pkg/controller/jobs/mpijob/mpijob_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,14 @@ func init() {
JobType: &kubeflow.MPIJob{},
AddToScheme: kubeflow.AddToScheme,
IsManagingObjectsOwner: isMPIJob,
MultiKueueAdapter: &multikueueAdapter{},
}))
}

// +kubebuilder:rbac:groups=scheduling.k8s.io,resources=priorityclasses,verbs=list;get;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;watch;update;patch
// +kubebuilder:rbac:groups=kubeflow.org,resources=mpijobs,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=kubeflow.org,resources=mpijobs/status,verbs=get;update
// +kubebuilder:rbac:groups=kubeflow.org,resources=mpijobs/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=kubeflow.org,resources=mpijobs/finalizers,verbs=get;update
// +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=workloads/status,verbs=get;update;patch
Expand All @@ -72,7 +73,7 @@ func NewJob() jobframework.GenericJob {
var NewReconciler = jobframework.NewGenericReconcilerFactory(NewJob)

func isMPIJob(owner *metav1.OwnerReference) bool {
return owner.Kind == "MPIJob" && strings.HasPrefix(owner.APIVersion, "kubeflow.org/v2")
return owner.Kind == "MPIJob" && strings.HasPrefix(owner.APIVersion, kubeflow.SchemeGroupVersion.Group)
}

type MPIJob kubeflow.MPIJob
Expand Down
16 changes: 8 additions & 8 deletions pkg/controller/jobs/mpijob/mpijob_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,8 +211,8 @@ func TestReconciler(t *testing.T) {
reconcilerOptions: []jobframework.Option{
jobframework.WithManageJobsWithoutQueueName(true),
},
job: testingmpijob.MakeMPIJob("mpijob", "ns").Parallelism(2).Obj(),
wantJob: testingmpijob.MakeMPIJob("mpijob", "ns").Parallelism(2).Obj(),
job: testingmpijob.MakeMPIJob("mpijob", "ns").MPIJobReplicaSpecsDefault().Parallelism(2).Obj(),
wantJob: testingmpijob.MakeMPIJob("mpijob", "ns").MPIJobReplicaSpecsDefault().Parallelism(2).Obj(),
wantWorkloads: []kueue.Workload{
*utiltesting.MakeWorkload("mpijob", "ns").
PodSets(
Expand All @@ -226,11 +226,11 @@ func TestReconciler(t *testing.T) {
reconcilerOptions: []jobframework.Option{
jobframework.WithManageJobsWithoutQueueName(true),
},
job: testingmpijob.MakeMPIJob("mpijob", "ns").Parallelism(2).WorkloadPriorityClass("test-wpc").Obj(),
job: testingmpijob.MakeMPIJob("mpijob", "ns").MPIJobReplicaSpecsDefault().Parallelism(2).WorkloadPriorityClass("test-wpc").Obj(),
priorityClasses: []client.Object{
baseWPCWrapper.Obj(),
},
wantJob: testingmpijob.MakeMPIJob("mpijob", "ns").Parallelism(2).WorkloadPriorityClass("test-wpc").Obj(),
wantJob: testingmpijob.MakeMPIJob("mpijob", "ns").MPIJobReplicaSpecsDefault().Parallelism(2).WorkloadPriorityClass("test-wpc").Obj(),
wantWorkloads: []kueue.Workload{
*utiltesting.MakeWorkload("mpijob", "ns").
PodSets(
Expand All @@ -245,11 +245,11 @@ func TestReconciler(t *testing.T) {
reconcilerOptions: []jobframework.Option{
jobframework.WithManageJobsWithoutQueueName(true),
},
job: testingmpijob.MakeMPIJob("mpijob", "ns").Parallelism(2).PriorityClass("test-pc").Obj(),
job: testingmpijob.MakeMPIJob("mpijob", "ns").MPIJobReplicaSpecsDefault().Parallelism(2).PriorityClass("test-pc").Obj(),
priorityClasses: []client.Object{
basePCWrapper.Obj(),
},
wantJob: testingmpijob.MakeMPIJob("mpijob", "ns").Parallelism(2).PriorityClass("test-pc").Obj(),
wantJob: testingmpijob.MakeMPIJob("mpijob", "ns").MPIJobReplicaSpecsDefault().Parallelism(2).PriorityClass("test-pc").Obj(),
wantWorkloads: []kueue.Workload{
*utiltesting.MakeWorkload("mpijob", "ns").
PodSets(
Expand All @@ -264,12 +264,12 @@ func TestReconciler(t *testing.T) {
reconcilerOptions: []jobframework.Option{
jobframework.WithManageJobsWithoutQueueName(true),
},
job: testingmpijob.MakeMPIJob("mpijob", "ns").Parallelism(2).
job: testingmpijob.MakeMPIJob("mpijob", "ns").MPIJobReplicaSpecsDefault().Parallelism(2).
WorkloadPriorityClass("test-wpc").PriorityClass("test-pc").Obj(),
priorityClasses: []client.Object{
basePCWrapper.Obj(), baseWPCWrapper.Obj(),
},
wantJob: testingmpijob.MakeMPIJob("mpijob", "ns").Parallelism(2).
wantJob: testingmpijob.MakeMPIJob("mpijob", "ns").MPIJobReplicaSpecsDefault().Parallelism(2).
WorkloadPriorityClass("test-wpc").PriorityClass("test-pc").Obj(),
wantWorkloads: []kueue.Workload{
*utiltesting.MakeWorkload("mpijob", "ns").
Expand Down
117 changes: 117 additions & 0 deletions pkg/controller/jobs/mpijob/mpijob_multikueue_adapter.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
/*
Copyright 2024 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package mpijob

import (
"context"
"errors"
"fmt"

kubeflow "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"

kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1"
"sigs.k8s.io/kueue/pkg/controller/constants"
"sigs.k8s.io/kueue/pkg/controller/jobframework"
"sigs.k8s.io/kueue/pkg/util/api"
clientutil "sigs.k8s.io/kueue/pkg/util/client"
)

type multikueueAdapter struct{}

var _ jobframework.MultiKueueAdapter = (*multikueueAdapter)(nil)

func (b *multikueueAdapter) SyncJob(ctx context.Context, localClient client.Client, remoteClient client.Client, key types.NamespacedName, workloadName, origin string) error {
localJob := kubeflow.MPIJob{}
err := localClient.Get(ctx, key, &localJob)
if err != nil {
return err
}

remoteJob := kubeflow.MPIJob{}
err = remoteClient.Get(ctx, key, &remoteJob)
if client.IgnoreNotFound(err) != nil {
return err
}

// if the remote exists, just copy the status
if err == nil {
return clientutil.PatchStatus(ctx, localClient, &localJob, func() (bool, error) {
localJob.Status = remoteJob.Status
return true, nil
})
}

remoteJob = kubeflow.MPIJob{
ObjectMeta: api.CloneObjectMetaForCreation(&localJob.ObjectMeta),
Spec: *localJob.Spec.DeepCopy(),
}

// add the prebuilt workload
if remoteJob.Labels == nil {
remoteJob.Labels = map[string]string{}
}
remoteJob.Labels[constants.PrebuiltWorkloadLabel] = workloadName
remoteJob.Labels[kueuealpha.MultiKueueOriginLabel] = origin

return remoteClient.Create(ctx, &remoteJob)
}

func (b *multikueueAdapter) DeleteRemoteObject(ctx context.Context, remoteClient client.Client, key types.NamespacedName) error {
job := kubeflow.MPIJob{}
err := remoteClient.Get(ctx, key, &job)
if err != nil {
return client.IgnoreNotFound(err)
}
return client.IgnoreNotFound(remoteClient.Delete(ctx, &job))
}

func (b *multikueueAdapter) KeepAdmissionCheckPending() bool {
return false
}

func (b *multikueueAdapter) IsJobManagedByKueue(context.Context, client.Client, types.NamespacedName) (bool, string, error) {
return true, "", nil
}

func (b *multikueueAdapter) GVK() schema.GroupVersionKind {
return gvk
}

var _ jobframework.MultiKueueWatcher = (*multikueueAdapter)(nil)

func (*multikueueAdapter) GetEmptyList() client.ObjectList {
return &kubeflow.MPIJobList{}
}

func (*multikueueAdapter) WorkloadKeyFor(o runtime.Object) (types.NamespacedName, error) {
job, isJob := o.(*kubeflow.MPIJob)
if !isJob {
return types.NamespacedName{}, errors.New("not a mpijob")
}

prebuiltWl, hasPrebuiltWorkload := job.Labels[constants.PrebuiltWorkloadLabel]
if !hasPrebuiltWorkload {
return types.NamespacedName{}, fmt.Errorf("no prebuilt workload found for mpijob: %s", klog.KObj(job))
}

return types.NamespacedName{Name: prebuiltWl, Namespace: job.Namespace}, nil
}
Loading