Skip to content

Commit 76714e7

Browse files
committed
Update blocklist to excluded
1 parent 1a3237a commit 76714e7

File tree

4 files changed

+7
-6
lines changed

4 files changed

+7
-6
lines changed

core/src/main/scala/org/apache/spark/scheduler/HealthTracker.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ import org.apache.spark.util.{Clock, SystemClock, Utils}
3939
* * many small stages -- this may prevent a bad executor for having many failures within one
4040
* stage, but still many failures over the entire application
4141
* * "flaky" executors -- they don't fail every task, but are still faulty enough to merit
42-
* blocking
42+
* excluding
4343
*
4444
* See the design doc on SPARK-8425 for a more in-depth discussion.
4545
*
@@ -162,7 +162,7 @@ private[scheduler] class HealthTracker (
162162

163163
private def killExcludedExecutor(exec: String): Unit = {
164164
if (conf.get(config.EXCLUDE_ON_FAILURE_KILL_ENABLED)) {
165-
killExecutor(exec, s"Killing blocked executor id $exec since " +
165+
killExecutor(exec, s"Killing excluded executor id $exec since " +
166166
s"${config.EXCLUDE_ON_FAILURE_KILL_ENABLED.key} is set.")
167167
}
168168
}

resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ private[spark] class MesosCoarseGrainedSchedulerBackend(
6363
with MesosScheduler
6464
with MesosSchedulerUtils {
6565

66-
// Exclude a agent after this many failures
66+
// Exclude an agent after this many failures
6767
private val MAX_AGENT_FAILURES = 2
6868

6969
private val maxCoresOption = conf.get(config.CORES_MAX)

resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocatorNodeHealthTracker.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ private[spark] class YarnAllocatorNodeHealthTracker(
121121
}
122122

123123
private def synchronizeExcludedNodesWithYarn(nodesToExclude: Set[String]): Unit = {
124-
// Update excluded node information to YARN ResourceManager for this application,
124+
// Update YARN with the nodes that are excluded for this application,
125125
// in order to avoid allocating new Containers on the problematic nodes.
126126
val additions = (nodesToExclude -- currentExcludededYarnNodes).toList.sorted
127127
val removals = (currentExcludededYarnNodes -- nodesToExclude).toList.sorted
@@ -132,7 +132,8 @@ private[spark] class YarnAllocatorNodeHealthTracker(
132132
logInfo(s"removing nodes from YARN application master's excluded node list: $removals")
133133
}
134134
if (additions.nonEmpty || removals.nonEmpty) {
135-
// TODO - need to update once Hadoop changes -
135+
// Note YARNs api for excluding nodes is updateBlacklist.
136+
// TODO - We need to update once Hadoop changes -
136137
// https://issues.apache.org/jira/browse/HADOOP-17169
137138
amClient.updateBlacklist(additions.asJava, removals.asJava)
138139
}

resource-managers/yarn/src/test/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackendSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ class YarnSchedulerBackendSuite extends SparkFunSuite with MockitoSugar with Loc
7878
val request = Map(defaultResourceProf -> numRequested)
7979
val req = yarnSchedulerBackendExtended.prepareRequestExecutors(request)
8080
assert(req.resourceProfileToTotalExecs(defaultResourceProf) === numRequested)
81-
assert(req.nodeBlocklist === excludelist)
81+
assert(req.excludedNodes === excludelist)
8282
val hosts =
8383
req.hostToLocalTaskCount(ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID).keySet
8484
assert(hosts.intersect(excludelist).isEmpty)

0 commit comments

Comments
 (0)