Skip to content

Commit 82b79a7

Browse files
committed
Fix some style concerns
1 parent 488c535 commit 82b79a7

File tree

6 files changed

+154
-151
lines changed

6 files changed

+154
-151
lines changed

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/ConfigurationUtils.scala

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,16 +35,18 @@ private[spark] object ConfigurationUtils {
3535
}
3636

3737
def requireBothOrNeitherDefined(
38-
opt1: Option[_],
39-
opt2: Option[_],
40-
errMessageWhenFirstIsMissing: String,
41-
errMessageWhenSecondIsMissing: String): Unit = {
38+
opt1: Option[_],
39+
opt2: Option[_],
40+
errMessageWhenFirstIsMissing: String,
41+
errMessageWhenSecondIsMissing: String): Unit = {
4242
requireSecondIfFirstIsDefined(opt1, opt2, errMessageWhenSecondIsMissing)
4343
requireSecondIfFirstIsDefined(opt2, opt1, errMessageWhenFirstIsMissing)
4444
}
4545

4646
def requireSecondIfFirstIsDefined(
47-
opt1: Option[_], opt2: Option[_], errMessageWhenSecondIsMissing: String): Unit = {
47+
opt1: Option[_],
48+
opt2: Option[_],
49+
errMessageWhenSecondIsMissing: String): Unit = {
4850
opt1.foreach { _ =>
4951
require(opt2.isDefined, errMessageWhenSecondIsMissing)
5052
}

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/SparkKubernetesClientFactory.scala

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -49,38 +49,38 @@ private[spark] object SparkKubernetesClientFactory {
4949
.orElse(maybeServiceAccountToken)
5050
val oauthTokenValue = sparkConf.getOption(oauthTokenConf)
5151
ConfigurationUtils.requireNandDefined(
52-
oauthTokenFile,
53-
oauthTokenValue,
54-
s"Cannot specify OAuth token through both a file $oauthTokenFileConf and a" +
55-
s" value $oauthTokenConf.")
52+
oauthTokenFile,
53+
oauthTokenValue,
54+
s"Cannot specify OAuth token through both a file $oauthTokenFileConf and a" +
55+
s" value $oauthTokenConf.")
5656

5757
val caCertFile = sparkConf
58-
.getOption(s"$kubernetesAuthConfPrefix.$CA_CERT_FILE_CONF_SUFFIX")
59-
.orElse(maybeServiceAccountCaCert.map(_.getAbsolutePath))
58+
.getOption(s"$kubernetesAuthConfPrefix.$CA_CERT_FILE_CONF_SUFFIX")
59+
.orElse(maybeServiceAccountCaCert.map(_.getAbsolutePath))
6060
val clientKeyFile = sparkConf
61-
.getOption(s"$kubernetesAuthConfPrefix.$CLIENT_KEY_FILE_CONF_SUFFIX")
61+
.getOption(s"$kubernetesAuthConfPrefix.$CLIENT_KEY_FILE_CONF_SUFFIX")
6262
val clientCertFile = sparkConf
63-
.getOption(s"$kubernetesAuthConfPrefix.$CLIENT_CERT_FILE_CONF_SUFFIX")
63+
.getOption(s"$kubernetesAuthConfPrefix.$CLIENT_CERT_FILE_CONF_SUFFIX")
6464
val dispatcher = new Dispatcher(
65-
ThreadUtils.newDaemonCachedThreadPool("kubernetes-dispatcher"))
65+
ThreadUtils.newDaemonCachedThreadPool("kubernetes-dispatcher"))
6666
val config = new ConfigBuilder()
67-
.withApiVersion("v1")
68-
.withMasterUrl(master)
69-
.withWebsocketPingInterval(0)
70-
.withOption(oauthTokenValue) {
71-
(token, configBuilder) => configBuilder.withOauthToken(token)
72-
}.withOption(oauthTokenFile) {
73-
(file, configBuilder) =>
74-
configBuilder.withOauthToken(Files.toString(file, Charsets.UTF_8))
75-
}.withOption(caCertFile) {
76-
(file, configBuilder) => configBuilder.withCaCertFile(file)
77-
}.withOption(clientKeyFile) {
78-
(file, configBuilder) => configBuilder.withClientKeyFile(file)
79-
}.withOption(clientCertFile) {
80-
(file, configBuilder) => configBuilder.withClientCertFile(file)
81-
}.withOption(namespace) {
82-
(ns, configBuilder) => configBuilder.withNamespace(ns)
83-
}.build()
67+
.withApiVersion("v1")
68+
.withMasterUrl(master)
69+
.withWebsocketPingInterval(0)
70+
.withOption(oauthTokenValue) {
71+
(token, configBuilder) => configBuilder.withOauthToken(token)
72+
}.withOption(oauthTokenFile) {
73+
(file, configBuilder) =>
74+
configBuilder.withOauthToken(Files.toString(file, Charsets.UTF_8))
75+
}.withOption(caCertFile) {
76+
(file, configBuilder) => configBuilder.withCaCertFile(file)
77+
}.withOption(clientKeyFile) {
78+
(file, configBuilder) => configBuilder.withClientKeyFile(file)
79+
}.withOption(clientCertFile) {
80+
(file, configBuilder) => configBuilder.withClientCertFile(file)
81+
}.withOption(namespace) {
82+
(ns, configBuilder) => configBuilder.withNamespace(ns)
83+
}.build()
8484
val baseHttpClient = HttpClientUtils.createHttpClient(config)
8585
val httpClientWithCustomDispatcher = baseHttpClient.newBuilder()
8686
.dispatcher(dispatcher)

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodFactory.scala

Lines changed: 33 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,10 @@ import org.apache.spark.deploy.k8s.config._
2626
import org.apache.spark.deploy.k8s.constants._
2727
import org.apache.spark.util.Utils
2828

29-
// Configures executor pods. Construct one of these with a SparkConf to set up properties that are
30-
// common across all executors. Then, pass in dynamic parameters into createExecutorPod.
29+
/**
30+
* Configures executor pods. Construct one of these with a SparkConf to set up properties that are
31+
* common across all executors. Then, pass in dynamic parameters into createExecutorPod.
32+
*/
3133
private[spark] trait ExecutorPodFactory {
3234
def createExecutorPod(
3335
executorId: String,
@@ -44,52 +46,52 @@ private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf)
4446
import ExecutorPodFactoryImpl._
4547

4648
private val executorExtraClasspath = sparkConf.get(
47-
org.apache.spark.internal.config.EXECUTOR_CLASS_PATH)
49+
org.apache.spark.internal.config.EXECUTOR_CLASS_PATH)
4850
private val executorJarsDownloadDir = sparkConf.get(INIT_CONTAINER_JARS_DOWNLOAD_LOCATION)
4951

5052
private val executorLabels = ConfigurationUtils.parsePrefixedKeyValuePairs(
51-
sparkConf,
52-
KUBERNETES_EXECUTOR_LABEL_PREFIX,
53-
"executor label")
53+
sparkConf,
54+
KUBERNETES_EXECUTOR_LABEL_PREFIX,
55+
"executor label")
5456
require(
55-
!executorLabels.contains(SPARK_APP_ID_LABEL),
56-
s"Custom executor labels cannot contain $SPARK_APP_ID_LABEL as it is reserved for Spark.")
57+
!executorLabels.contains(SPARK_APP_ID_LABEL),
58+
s"Custom executor labels cannot contain $SPARK_APP_ID_LABEL as it is reserved for Spark.")
5759
require(
58-
!executorLabels.contains(SPARK_EXECUTOR_ID_LABEL),
59-
s"Custom executor labels cannot contain $SPARK_EXECUTOR_ID_LABEL as it is reserved for" +
60-
s" Spark.")
60+
!executorLabels.contains(SPARK_EXECUTOR_ID_LABEL),
61+
s"Custom executor labels cannot contain $SPARK_EXECUTOR_ID_LABEL as it is reserved for" +
62+
s" Spark.")
6163

6264
private val executorAnnotations =
63-
ConfigurationUtils.parsePrefixedKeyValuePairs (
64-
sparkConf,
65-
KUBERNETES_EXECUTOR_ANNOTATION_PREFIX,
66-
"executor annotation")
65+
ConfigurationUtils.parsePrefixedKeyValuePairs(
66+
sparkConf,
67+
KUBERNETES_EXECUTOR_ANNOTATION_PREFIX,
68+
"executor annotation")
6769
private val nodeSelector =
68-
ConfigurationUtils.parsePrefixedKeyValuePairs(
69-
sparkConf,
70-
KUBERNETES_NODE_SELECTOR_PREFIX,
71-
"node selector")
70+
ConfigurationUtils.parsePrefixedKeyValuePairs(
71+
sparkConf,
72+
KUBERNETES_NODE_SELECTOR_PREFIX,
73+
"node selector")
7274

7375
private val executorDockerImage = sparkConf.get(EXECUTOR_DOCKER_IMAGE)
7476
private val dockerImagePullPolicy = sparkConf.get(DOCKER_IMAGE_PULL_POLICY)
7577
private val executorPort = sparkConf.getInt("spark.executor.port", DEFAULT_STATIC_PORT)
7678
private val blockmanagerPort = sparkConf
77-
.getInt("spark.blockmanager.port", DEFAULT_BLOCKMANAGER_PORT)
79+
.getInt("spark.blockmanager.port", DEFAULT_BLOCKMANAGER_PORT)
7880
private val kubernetesDriverPodName = sparkConf
79-
.get(KUBERNETES_DRIVER_POD_NAME)
80-
.getOrElse(throw new SparkException("Must specify the driver pod name"))
81+
.get(KUBERNETES_DRIVER_POD_NAME)
82+
.getOrElse(throw new SparkException("Must specify the driver pod name"))
8183

8284
private val executorPodNamePrefix = sparkConf.get(KUBERNETES_EXECUTOR_POD_NAME_PREFIX)
8385

8486
private val executorMemoryMiB = sparkConf.get(org.apache.spark.internal.config.EXECUTOR_MEMORY)
8587
private val executorMemoryString = sparkConf.get(
86-
org.apache.spark.internal.config.EXECUTOR_MEMORY.key,
87-
org.apache.spark.internal.config.EXECUTOR_MEMORY.defaultValueString)
88+
org.apache.spark.internal.config.EXECUTOR_MEMORY.key,
89+
org.apache.spark.internal.config.EXECUTOR_MEMORY.defaultValueString)
8890

8991
private val memoryOverheadMiB = sparkConf
90-
.get(KUBERNETES_EXECUTOR_MEMORY_OVERHEAD)
91-
.getOrElse(math.max((MEMORY_OVERHEAD_FACTOR * executorMemoryMiB).toInt,
92-
MEMORY_OVERHEAD_MIN_MIB))
92+
.get(KUBERNETES_EXECUTOR_MEMORY_OVERHEAD)
93+
.getOrElse(math.max((MEMORY_OVERHEAD_FACTOR * executorMemoryMiB).toInt,
94+
MEMORY_OVERHEAD_MIN_MIB))
9395
private val executorMemoryWithOverhead = executorMemoryMiB + memoryOverheadMiB
9496

9597
private val executorCores = sparkConf.getDouble("spark.executor.cores", 1d)
@@ -109,10 +111,10 @@ private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf)
109111
// executorId and applicationId
110112
val hostname = name.substring(Math.max(0, name.length - 63))
111113
val resolvedExecutorLabels = Map(
112-
SPARK_EXECUTOR_ID_LABEL -> executorId,
113-
SPARK_APP_ID_LABEL -> applicationId,
114-
SPARK_ROLE_LABEL -> SPARK_POD_EXECUTOR_ROLE) ++
115-
executorLabels
114+
SPARK_EXECUTOR_ID_LABEL -> executorId,
115+
SPARK_APP_ID_LABEL -> applicationId,
116+
SPARK_ROLE_LABEL -> SPARK_POD_EXECUTOR_ROLE) ++
117+
executorLabels
116118
val executorMemoryQuantity = new QuantityBuilder(false)
117119
.withAmount(s"${executorMemoryMiB}Mi")
118120
.build()

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterManager.scala

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -41,25 +41,25 @@ private[spark] class KubernetesClusterManager extends ExternalClusterManager wit
4141
val sparkConf = sc.getConf
4242

4343
val kubernetesClient = SparkKubernetesClientFactory.createKubernetesClient(
44-
KUBERNETES_MASTER_INTERNAL_URL,
45-
Some(sparkConf.get(KUBERNETES_NAMESPACE)),
46-
APISERVER_AUTH_DRIVER_MOUNTED_CONF_PREFIX,
47-
sparkConf,
48-
Some(new File(Config.KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH)),
49-
Some(new File(Config.KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH)))
44+
KUBERNETES_MASTER_INTERNAL_URL,
45+
Some(sparkConf.get(KUBERNETES_NAMESPACE)),
46+
APISERVER_AUTH_DRIVER_MOUNTED_CONF_PREFIX,
47+
sparkConf,
48+
Some(new File(Config.KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH)),
49+
Some(new File(Config.KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH)))
5050

5151
val executorPodFactory = new ExecutorPodFactoryImpl(sparkConf)
5252
val allocatorExecutor = ThreadUtils
53-
.newDaemonSingleThreadScheduledExecutor("kubernetes-pod-allocator")
53+
.newDaemonSingleThreadScheduledExecutor("kubernetes-pod-allocator")
5454
val requestExecutorsService = ThreadUtils.newDaemonCachedThreadPool(
55-
"kubernetes-executor-requests")
55+
"kubernetes-executor-requests")
5656
new KubernetesClusterSchedulerBackend(
57-
scheduler.asInstanceOf[TaskSchedulerImpl],
58-
sc.env.rpcEnv,
59-
executorPodFactory,
60-
kubernetesClient,
61-
allocatorExecutor,
62-
requestExecutorsService)
57+
scheduler.asInstanceOf[TaskSchedulerImpl],
58+
sc.env.rpcEnv,
59+
executorPodFactory,
60+
kubernetesClient,
61+
allocatorExecutor,
62+
requestExecutorsService)
6363
}
6464

6565
override def initialize(scheduler: TaskScheduler, backend: SchedulerBackend): Unit = {

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ private[spark] class KubernetesClusterSchedulerBackend(
6565
.getOrElse(
6666
throw new SparkException("Must specify the driver pod name"))
6767
private implicit val requestExecutorContext = ExecutionContext.fromExecutorService(
68-
requestExecutorsService)
68+
requestExecutorsService)
6969

7070
private val driverPod = try {
7171
kubernetesClient.pods()
@@ -89,9 +89,9 @@ private[spark] class KubernetesClusterSchedulerBackend(
8989
protected var totalExpectedExecutors = new AtomicInteger(0)
9090

9191
private val driverUrl = RpcEndpointAddress(
92-
conf.get("spark.driver.host"),
93-
conf.getInt("spark.driver.port", DEFAULT_DRIVER_PORT),
94-
CoarseGrainedSchedulerBackend.ENDPOINT_NAME).toString
92+
conf.get("spark.driver.host"),
93+
conf.getInt("spark.driver.port", DEFAULT_DRIVER_PORT),
94+
CoarseGrainedSchedulerBackend.ENDPOINT_NAME).toString
9595

9696
private val initialExecutors = getInitialTargetExecutorNumber()
9797

@@ -121,7 +121,7 @@ private[spark] class KubernetesClusterSchedulerBackend(
121121
} else {
122122
val nodeToLocalTaskCount = getNodesWithLocalTaskCounts
123123
for (i <- 0 until math.min(
124-
totalExpectedExecutors.get - runningExecutorsToPods.size, podAllocationSize)) {
124+
totalExpectedExecutors.get - runningExecutorsToPods.size, podAllocationSize)) {
125125
val (executorId, pod) = allocateNewExecutorPod(nodeToLocalTaskCount)
126126
runningExecutorsToPods.put(executorId, pod)
127127
runningPodsToExecutors.put(pod.getMetadata.getName, executorId)
@@ -202,13 +202,13 @@ private[spark] class KubernetesClusterSchedulerBackend(
202202
override def start(): Unit = {
203203
super.start()
204204
executorWatchResource.set(
205-
kubernetesClient
206-
.pods()
207-
.withLabel(SPARK_APP_ID_LABEL, applicationId())
208-
.watch(new ExecutorPodsWatcher()))
205+
kubernetesClient
206+
.pods()
207+
.withLabel(SPARK_APP_ID_LABEL, applicationId())
208+
.watch(new ExecutorPodsWatcher()))
209209

210210
allocatorExecutor.scheduleWithFixedDelay(
211-
allocatorRunnable, 0L, podAllocationInterval, TimeUnit.SECONDS)
211+
allocatorRunnable, 0L, podAllocationInterval, TimeUnit.SECONDS)
212212

213213
if (!Utils.isDynamicAllocationEnabled(conf)) {
214214
doRequestTotalExecutors(initialExecutors)
@@ -281,12 +281,12 @@ private[spark] class KubernetesClusterSchedulerBackend(
281281
private def allocateNewExecutorPod(nodeToLocalTaskCount: Map[String, Int]): (String, Pod) = {
282282
val executorId = EXECUTOR_ID_COUNTER.incrementAndGet().toString
283283
val executorPod = executorPodFactory.createExecutorPod(
284-
executorId,
285-
applicationId(),
286-
driverUrl,
287-
conf.getExecutorEnv,
288-
driverPod,
289-
nodeToLocalTaskCount)
284+
executorId,
285+
applicationId(),
286+
driverUrl,
287+
conf.getExecutorEnv,
288+
driverPod,
289+
nodeToLocalTaskCount)
290290
try {
291291
(executorId, kubernetesClient.pods.create(executorPod))
292292
} catch {

0 commit comments

Comments
 (0)