From 2f4d6e98bfcb8953d4e403b3fba0d43d2c0a4f09 Mon Sep 17 00:00:00 2001 From: Yikun Jiang Date: Mon, 15 Nov 2021 16:11:56 +0800 Subject: [PATCH 1/3] Add the ability to creating resources before driver pod --- .../spark/deploy/k8s/KubernetesDriverSpec.scala | 1 + .../features/KubernetesFeatureConfigStep.scala | 10 +++++++++- .../submit/KubernetesClientApplication.scala | 17 ++++++++++++++++- .../k8s/submit/KubernetesDriverBuilder.scala | 3 +++ .../DriverCommandFeatureStepSuite.scala | 2 +- .../spark/deploy/k8s/submit/ClientSuite.scala | 1 + 6 files changed, 31 insertions(+), 3 deletions(-) diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesDriverSpec.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesDriverSpec.scala index fce8c6a4bf49..a603cb08ba9a 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesDriverSpec.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesDriverSpec.scala @@ -20,5 +20,6 @@ import io.fabric8.kubernetes.api.model.HasMetadata private[spark] case class KubernetesDriverSpec( pod: SparkPod, + driverPreKubernetesResources: Seq[HasMetadata], driverKubernetesResources: Seq[HasMetadata], systemProperties: Map[String, String]) diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/KubernetesFeatureConfigStep.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/KubernetesFeatureConfigStep.scala index 3fec92644b95..fcb789f4ae7f 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/KubernetesFeatureConfigStep.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/KubernetesFeatureConfigStep.scala @@ -70,7 +70,15 @@ trait KubernetesFeatureConfigStep { /** * Return any additional Kubernetes resources that should be added to support this feature. Only - * applicable when creating the driver in cluster mode. + * applicable when creating the driver in cluster mode. Resources would be setup/refresh before + * Pod creation. + */ + def getAdditionalPreKubernetesResources(): Seq[HasMetadata] = Seq.empty + + /** + * Return any additional Kubernetes resources that should be added to support this feature. Only + * applicable when creating the driver in cluster mode. Resources would be setup/refresh after + * Pod creation. */ def getAdditionalKubernetesResources(): Seq[HasMetadata] = Seq.empty } diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala index 3c3c4258ad9c..9ee08fedef14 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala @@ -133,22 +133,37 @@ private[spark] class Client( .build() val driverPodName = resolvedDriverPod.getMetadata.getName + // setup resources before pod creation + val preKubernetesResources = resolvedDriverSpec.driverPreKubernetesResources + try { + kubernetesClient.resourceList(preKubernetesResources: _*).createOrReplace() + } catch { + case NonFatal(e) => + kubernetesClient.resourceList(preKubernetesResources: _*).delete() + throw e + } + var watch: Watch = null var createdDriverPod: Pod = null try { createdDriverPod = kubernetesClient.pods().create(resolvedDriverPod) } catch { case NonFatal(e) => + kubernetesClient.resourceList(preKubernetesResources: _*).delete() logError("Please check \"kubectl auth can-i create pod\" first. It should be yes.") throw e } + + // setup resources after pod creation, and refresh all resources owner references try { - val otherKubernetesResources = resolvedDriverSpec.driverKubernetesResources ++ Seq(configMap) + val otherKubernetesResources = resolvedDriverSpec.driverKubernetesResources ++ + preKubernetesResources ++ Seq(configMap) addOwnerReference(createdDriverPod, otherKubernetesResources) kubernetesClient.resourceList(otherKubernetesResources: _*).createOrReplace() } catch { case NonFatal(e) => kubernetesClient.pods().delete(createdDriverPod) + kubernetesClient.resourceList(preKubernetesResources: _*).delete() throw e } diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilder.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilder.scala index b8b93bb22b13..f0c78f371d6d 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilder.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilder.scala @@ -57,15 +57,18 @@ private[spark] class KubernetesDriverBuilder { val spec = KubernetesDriverSpec( initialPod, + driverPreKubernetesResources = Seq.empty, driverKubernetesResources = Seq.empty, conf.sparkConf.getAll.toMap) features.foldLeft(spec) { case (spec, feature) => val configuredPod = feature.configurePod(spec.pod) val addedSystemProperties = feature.getAdditionalPodSystemProperties() + val addedPreResources = feature.getAdditionalPreKubernetesResources() val addedResources = feature.getAdditionalKubernetesResources() KubernetesDriverSpec( configuredPod, + spec.driverPreKubernetesResources ++ addedPreResources, spec.driverKubernetesResources ++ addedResources, spec.systemProperties ++ addedSystemProperties) } diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/DriverCommandFeatureStepSuite.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/DriverCommandFeatureStepSuite.scala index ebbb42f225c5..c92bf803ec55 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/DriverCommandFeatureStepSuite.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/DriverCommandFeatureStepSuite.scala @@ -175,7 +175,7 @@ class DriverCommandFeatureStepSuite extends SparkFunSuite { } val pod = step.configurePod(SparkPod.initialPod()) val props = step.getAdditionalPodSystemProperties() - KubernetesDriverSpec(pod, Nil, props) + KubernetesDriverSpec(pod, Nil, Nil, props) } } diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala index a5b85938af3b..59a8c90c10c3 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala @@ -64,6 +64,7 @@ class ClientSuite extends SparkFunSuite with BeforeAndAfter { private val BUILT_KUBERNETES_SPEC = KubernetesDriverSpec( SparkPod(BUILT_DRIVER_POD, BUILT_DRIVER_CONTAINER), + Nil, ADDITIONAL_RESOURCES, RESOLVED_JAVA_OPTIONS) From ebbfea6927cc191b926c6606f90df1f5e2548115 Mon Sep 17 00:00:00 2001 From: Yikun Jiang Date: Thu, 16 Dec 2021 16:39:04 +0800 Subject: [PATCH 2/3] Address comments and add test cases --- .../submit/KubernetesClientApplication.scala | 17 +++-- .../spark/deploy/k8s/PodBuilderSuite.scala | 4 +- .../spark/deploy/k8s/submit/ClientSuite.scala | 71 ++++++++++++++++++- .../submit/KubernetesDriverBuilderSuite.scala | 45 ++++++++++++ 4 files changed, 130 insertions(+), 7 deletions(-) diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala index 9ee08fedef14..eb32c2c23500 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala @@ -154,16 +154,25 @@ private[spark] class Client( throw e } - // setup resources after pod creation, and refresh all resources owner references + // Refresh all pre-resources' owner references try { - val otherKubernetesResources = resolvedDriverSpec.driverKubernetesResources ++ - preKubernetesResources ++ Seq(configMap) + addOwnerReference(createdDriverPod, preKubernetesResources) + kubernetesClient.resourceList(preKubernetesResources: _*).createOrReplace() + } catch { + case NonFatal(e) => + kubernetesClient.pods().delete(createdDriverPod) + kubernetesClient.resourceList(preKubernetesResources: _*).delete() + throw e + } + + // setup resources after pod creation, and refresh all resources' owner references + try { + val otherKubernetesResources = resolvedDriverSpec.driverKubernetesResources ++ Seq(configMap) addOwnerReference(createdDriverPod, otherKubernetesResources) kubernetesClient.resourceList(otherKubernetesResources: _*).createOrReplace() } catch { case NonFatal(e) => kubernetesClient.pods().delete(createdDriverPod) - kubernetesClient.resourceList(preKubernetesResources: _*).delete() throw e } diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/PodBuilderSuite.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/PodBuilderSuite.scala index 2051c932e539..a8a3ca4eea96 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/PodBuilderSuite.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/PodBuilderSuite.scala @@ -37,7 +37,7 @@ abstract class PodBuilderSuite extends SparkFunSuite { protected def buildPod(sparkConf: SparkConf, client: KubernetesClient): SparkPod - private val baseConf = new SparkConf(false) + protected val baseConf = new SparkConf(false) .set(Config.CONTAINER_IMAGE, "spark-executor:latest") test("use empty initial pod if template is not specified") { @@ -80,7 +80,7 @@ abstract class PodBuilderSuite extends SparkFunSuite { assert(exception.getMessage.contains("Could not load pod from template file.")) } - private def mockKubernetesClient(pod: Pod = podWithSupportedFeatures()): KubernetesClient = { + protected def mockKubernetesClient(pod: Pod = podWithSupportedFeatures()): KubernetesClient = { val kubernetesClient = mock(classOf[KubernetesClient]) val pods = mock(classOf[MixedOperation[Pod, PodList, PodResource[Pod]]]) diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala index 59a8c90c10c3..4fe1df4314fb 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala @@ -23,6 +23,7 @@ import java.nio.file.Files import scala.collection.JavaConverters._ import io.fabric8.kubernetes.api.model._ +import io.fabric8.kubernetes.api.model.apiextensions.v1.{CustomResourceDefinition, CustomResourceDefinitionBuilder} import io.fabric8.kubernetes.client.{KubernetesClient, Watch} import io.fabric8.kubernetes.client.dsl.PodResource import org.mockito.{ArgumentCaptor, Mock, MockitoAnnotations} @@ -31,7 +32,7 @@ import org.scalatest.BeforeAndAfter import org.scalatestplus.mockito.MockitoSugar._ import org.apache.spark.{SparkConf, SparkFunSuite} -import org.apache.spark.deploy.k8s._ +import org.apache.spark.deploy.k8s.{Config, _} import org.apache.spark.deploy.k8s.Constants._ import org.apache.spark.deploy.k8s.Fabric8Aliases._ import org.apache.spark.util.Utils @@ -62,11 +63,19 @@ class ClientSuite extends SparkFunSuite with BeforeAndAfter { private val ADDITIONAL_RESOURCES = Seq( new SecretBuilder().withNewMetadata().withName("secret").endMetadata().build()) + private val PRE_RESOURCES = Seq( + new CustomResourceDefinitionBuilder().withNewMetadata().withName("preCRD").endMetadata().build() + ) private val BUILT_KUBERNETES_SPEC = KubernetesDriverSpec( SparkPod(BUILT_DRIVER_POD, BUILT_DRIVER_CONTAINER), Nil, ADDITIONAL_RESOURCES, RESOLVED_JAVA_OPTIONS) + private val BUILT_KUBERNETES_SPEC_WITH_PRERES = KubernetesDriverSpec( + SparkPod(BUILT_DRIVER_POD, BUILT_DRIVER_CONTAINER), + PRE_RESOURCES, + ADDITIONAL_RESOURCES, + RESOLVED_JAVA_OPTIONS) private val FULL_EXPECTED_CONTAINER = new ContainerBuilder(BUILT_DRIVER_CONTAINER) .addNewEnv() @@ -119,6 +128,20 @@ class ClientSuite extends SparkFunSuite with BeforeAndAfter { .build() } + private val PRE_ADDITIONAL_RESOURCES_WITH_OWNER_REFERENCES = PRE_RESOURCES.map { crd => + new CustomResourceDefinitionBuilder(crd) + .editMetadata() + .addNewOwnerReference() + .withName(POD_NAME) + .withApiVersion(DRIVER_POD_API_VERSION) + .withKind(DRIVER_POD_KIND) + .withController(true) + .withUid(DRIVER_POD_UID) + .endOwnerReference() + .endMetadata() + .build() + } + @Mock private var kubernetesClient: KubernetesClient = _ @@ -193,6 +216,52 @@ class ClientSuite extends SparkFunSuite with BeforeAndAfter { assert(configMap.getData.get(SPARK_CONF_FILE_NAME).contains("conf2key=conf2value")) } + test("The client should create Kubernetes resources with pre resources") { + val sparkConf = new SparkConf(false) + .set(Config.CONTAINER_IMAGE, "spark-executor:latest") + .set(Config.KUBERNETES_DRIVER_POD_FEATURE_STEPS.key, + "org.apache.spark.deploy.k8s.TestStepTwo," + + "org.apache.spark.deploy.k8s.TestStep") + val preResKconf: KubernetesDriverConf = KubernetesTestConf.createDriverConf( + sparkConf = sparkConf, + resourceNamePrefix = Some(KUBERNETES_RESOURCE_PREFIX) + ) + + when(driverBuilder.buildFromFeatures(preResKconf, kubernetesClient)) + .thenReturn(BUILT_KUBERNETES_SPEC_WITH_PRERES) + val submissionClient = new Client( + preResKconf, + driverBuilder, + kubernetesClient, + loggingPodStatusWatcher) + submissionClient.run() + val otherCreatedResources = createdResourcesArgumentCaptor.getAllValues + + // 2 for pre-resource creation/update, 1 for resource creation, 1 for config map + assert(otherCreatedResources.size === 4) + val preRes = otherCreatedResources.toArray + .filter(_.isInstanceOf[CustomResourceDefinition]).toSeq + + // Make sure pre-resource creation/owner reference as expected + assert(preRes.size === 2) + assert(preRes.last === PRE_ADDITIONAL_RESOURCES_WITH_OWNER_REFERENCES.head) + + // Make sure original resource and config map process are not affected + val secrets = otherCreatedResources.toArray.filter(_.isInstanceOf[Secret]).toSeq + assert(secrets === ADDITIONAL_RESOURCES_WITH_OWNER_REFERENCES) + val configMaps = otherCreatedResources.toArray + .filter(_.isInstanceOf[ConfigMap]).map(_.asInstanceOf[ConfigMap]) + assert(secrets.nonEmpty) + assert(configMaps.nonEmpty) + val configMap = configMaps.head + assert(configMap.getMetadata.getName === + KubernetesClientUtils.configMapNameDriver) + assert(configMap.getImmutable()) + assert(configMap.getData.containsKey(SPARK_CONF_FILE_NAME)) + assert(configMap.getData.get(SPARK_CONF_FILE_NAME).contains("conf1key=conf1value")) + assert(configMap.getData.get(SPARK_CONF_FILE_NAME).contains("conf2key=conf2value")) + } + test("All files from SPARK_CONF_DIR, " + "except templates, spark config, binary files and are within size limit, " + "should be populated to pod's configMap.") { diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilderSuite.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilderSuite.scala index f9802ff967f8..81b23e66fd97 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilderSuite.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilderSuite.scala @@ -16,10 +16,13 @@ */ package org.apache.spark.deploy.k8s.submit +import io.fabric8.kubernetes.api.model.HasMetadata +import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinitionBuilder import io.fabric8.kubernetes.client.KubernetesClient import org.apache.spark.SparkConf import org.apache.spark.deploy.k8s._ +import org.apache.spark.deploy.k8s.features.KubernetesFeatureConfigStep import org.apache.spark.internal.config.ConfigEntry class KubernetesDriverBuilderSuite extends PodBuilderSuite { @@ -36,4 +39,46 @@ class KubernetesDriverBuilderSuite extends PodBuilderSuite { val conf = KubernetesTestConf.createDriverConf(sparkConf = sparkConf) new KubernetesDriverBuilder().buildFromFeatures(conf, client).pod } + + private val ADDITION_PRE_RESOURCES = Seq( + new CustomResourceDefinitionBuilder().withNewMetadata().withName("preCRD").endMetadata().build() + ) + + test("check driver pre kubernetes resource, empty by default") { + val sparkConf = new SparkConf(false) + .set(Config.CONTAINER_IMAGE, "spark-driver:latest") + val client = mockKubernetesClient() + val conf = KubernetesTestConf.createDriverConf(sparkConf) + val spec = new KubernetesDriverBuilder().buildFromFeatures(conf, client) + assert(spec.driverPreKubernetesResources.size === 0) + } + + test("check driver pre kubernetes resource, pre kuberenetes resource") { + val sparkConf = new SparkConf(false) + .set(Config.CONTAINER_IMAGE, "spark-driver:latest") + .set(Config.KUBERNETES_DRIVER_POD_FEATURE_STEPS.key, + "org.apache.spark.deploy.k8s.submit.TestStep") + val client = mockKubernetesClient() + val conf = KubernetesTestConf.createDriverConf( + sparkConf = sparkConf + ) + val spec = new KubernetesDriverBuilder().buildFromFeatures(conf, client) + assert(spec.driverPreKubernetesResources.size === 1) + assert(spec.driverPreKubernetesResources === ADDITION_PRE_RESOURCES) + } +} + +class TestStep extends KubernetesFeatureConfigStep { + + override def configurePod(pod: SparkPod): SparkPod = { + pod + } + + override def getAdditionalPreKubernetesResources(): Seq[HasMetadata] = Seq( + new CustomResourceDefinitionBuilder() + .withNewMetadata() + .withName("preCRD") + .endMetadata() + .build() + ) } From 2613a924312bc89161e06ee40c4efc0dd99f9d7e Mon Sep 17 00:00:00 2001 From: Yikun Jiang Date: Fri, 17 Dec 2021 11:16:29 +0800 Subject: [PATCH 3/3] Address nits --- .../spark/deploy/k8s/submit/KubernetesClientApplication.scala | 2 ++ .../org/apache/spark/deploy/k8s/submit/ClientSuite.scala | 2 +- .../deploy/k8s/submit/KubernetesDriverBuilderSuite.scala | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala index eb32c2c23500..96c19bbb3da6 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala @@ -139,6 +139,8 @@ private[spark] class Client( kubernetesClient.resourceList(preKubernetesResources: _*).createOrReplace() } catch { case NonFatal(e) => + logError("Please check \"kubectl auth can-i create [resource]\" first." + + " It should be yes. And please also check your feature step implementation.") kubernetesClient.resourceList(preKubernetesResources: _*).delete() throw e } diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala index 4fe1df4314fb..bd4a78b3bdf9 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala @@ -216,7 +216,7 @@ class ClientSuite extends SparkFunSuite with BeforeAndAfter { assert(configMap.getData.get(SPARK_CONF_FILE_NAME).contains("conf2key=conf2value")) } - test("The client should create Kubernetes resources with pre resources") { + test("SPARK-37331: The client should create Kubernetes resources with pre resources") { val sparkConf = new SparkConf(false) .set(Config.CONTAINER_IMAGE, "spark-executor:latest") .set(Config.KUBERNETES_DRIVER_POD_FEATURE_STEPS.key, diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilderSuite.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilderSuite.scala index 81b23e66fd97..8bf43d909dee 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilderSuite.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilderSuite.scala @@ -44,7 +44,7 @@ class KubernetesDriverBuilderSuite extends PodBuilderSuite { new CustomResourceDefinitionBuilder().withNewMetadata().withName("preCRD").endMetadata().build() ) - test("check driver pre kubernetes resource, empty by default") { + test("SPARK-37331: check driver pre kubernetes resource, empty by default") { val sparkConf = new SparkConf(false) .set(Config.CONTAINER_IMAGE, "spark-driver:latest") val client = mockKubernetesClient() @@ -53,7 +53,7 @@ class KubernetesDriverBuilderSuite extends PodBuilderSuite { assert(spec.driverPreKubernetesResources.size === 0) } - test("check driver pre kubernetes resource, pre kuberenetes resource") { + test("SPARK-37331: check driver pre kubernetes resource as expected") { val sparkConf = new SparkConf(false) .set(Config.CONTAINER_IMAGE, "spark-driver:latest") .set(Config.KUBERNETES_DRIVER_POD_FEATURE_STEPS.key,