Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 3 additions & 7 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -48,20 +48,20 @@ import org.apache.mesos.MesosNativeLibrary
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.executor.{ExecutorEndpoint, TriggerThreadDump}
import org.apache.spark.input.{StreamInputFormat, PortableDataStream, WholeTextFileInputFormat,
FixedLengthBinaryInputFormat}
import org.apache.spark.io.CompressionCodec
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd._
import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef}
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend,
SparkDeploySchedulerBackend, SimrSchedulerBackend}
import org.apache.spark.scheduler.cluster.mesos.{CoarseMesosSchedulerBackend, MesosSchedulerBackend}
import org.apache.spark.scheduler.local.LocalBackend
import org.apache.spark.storage._
import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump
import org.apache.spark.ui.{SparkUI, ConsoleProgressBar}
import org.apache.spark.ui.jobs.JobProgressListener
import org.apache.spark.util._
Expand Down Expand Up @@ -619,11 +619,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
if (executorId == SparkContext.DRIVER_IDENTIFIER) {
Some(Utils.getThreadDump())
} else {
val (host, port) = env.blockManager.master.getRpcHostPortForExecutor(executorId).get
val endpointRef = env.rpcEnv.setupEndpointRef(
SparkEnv.executorActorSystemName,
RpcAddress(host, port),
ExecutorEndpoint.EXECUTOR_ENDPOINT_NAME)
val endpointRef = env.blockManager.master.getExecutorEndpointRef(executorId).get
Some(endpointRef.askWithRetry[Array[ThreadStackTrace]](TriggerThreadDump))
}
} catch {
Expand Down
5 changes: 0 additions & 5 deletions core/src/main/scala/org/apache/spark/executor/Executor.scala
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,6 @@ private[spark] class Executor(
env.blockManager.initialize(conf.getAppId)
}

// Create an RpcEndpoint for receiving RPCs from the driver
private val executorEndpoint = env.rpcEnv.setupEndpoint(
ExecutorEndpoint.EXECUTOR_ENDPOINT_NAME, new ExecutorEndpoint(env.rpcEnv, executorId))

// Whether to load classes in user jars before those in Spark jars
private val userClassPathFirst = conf.getBoolean("spark.executor.userClassPathFirst", false)

Expand Down Expand Up @@ -136,7 +132,6 @@ private[spark] class Executor(

def stop(): Unit = {
env.metricsSystem.report()
env.rpcEnv.stop(executorEndpoint)
heartbeater.shutdown()
heartbeater.awaitTermination(10, TimeUnit.SECONDS)
threadPool.shutdown()
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ class BlockManagerMaster(
driverEndpoint.askWithRetry[Seq[BlockManagerId]](GetPeers(blockManagerId))
}

def getRpcHostPortForExecutor(executorId: String): Option[(String, Int)] = {
driverEndpoint.askWithRetry[Option[(String, Int)]](GetRpcHostPortForExecutor(executorId))
def getExecutorEndpointRef(executorId: String): Option[RpcEndpointRef] = {
driverEndpoint.askWithRetry[Option[RpcEndpointRef]](GetExecutorEndpointRef(executorId))
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package org.apache.spark.storage

import java.util.{HashMap => JHashMap}

import scala.collection.immutable.HashSet
import scala.collection.mutable
import scala.collection.JavaConverters._
import scala.concurrent.{ExecutionContext, Future}
Expand Down Expand Up @@ -75,8 +74,8 @@ class BlockManagerMasterEndpoint(
case GetPeers(blockManagerId) =>
context.reply(getPeers(blockManagerId))

case GetRpcHostPortForExecutor(executorId) =>
context.reply(getRpcHostPortForExecutor(executorId))
case GetExecutorEndpointRef(executorId) =>
context.reply(getExecutorEndpointRef(executorId))

case GetMemoryStatus =>
context.reply(memoryStatus)
Expand Down Expand Up @@ -388,15 +387,14 @@ class BlockManagerMasterEndpoint(
}

/**
* Returns the hostname and port of an executor, based on the [[RpcEnv]] address of its
* [[BlockManagerSlaveEndpoint]].
* Returns an [[RpcEndpointRef]] of the [[BlockManagerSlaveEndpoint]] for sending RPC messages.
*/
private def getRpcHostPortForExecutor(executorId: String): Option[(String, Int)] = {
private def getExecutorEndpointRef(executorId: String): Option[RpcEndpointRef] = {
for (
blockManagerId <- blockManagerIdByExecutor.get(executorId);
info <- blockManagerInfo.get(blockManagerId)
) yield {
(info.slaveEndpoint.address.host, info.slaveEndpoint.address.port)
info.slaveEndpoint
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,11 @@ private[spark] object BlockManagerMessages {
case class RemoveBroadcast(broadcastId: Long, removeFromDriver: Boolean = true)
extends ToBlockManagerSlave

/**
* Driver -> Executor message to trigger a thread dump.
*/
case object TriggerThreadDump extends ToBlockManagerSlave

//////////////////////////////////////////////////////////////////////////////////
// Messages from slaves to the master.
//////////////////////////////////////////////////////////////////////////////////
Expand Down Expand Up @@ -90,7 +95,7 @@ private[spark] object BlockManagerMessages {

case class GetPeers(blockManagerId: BlockManagerId) extends ToBlockManagerMaster

case class GetRpcHostPortForExecutor(executorId: String) extends ToBlockManagerMaster
case class GetExecutorEndpointRef(executorId: String) extends ToBlockManagerMaster

case class RemoveExecutor(execId: String) extends ToBlockManagerMaster

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ package org.apache.spark.storage

import scala.concurrent.{ExecutionContext, Future}

import org.apache.spark.rpc.{ThreadSafeRpcEndpoint, RpcEnv, RpcCallContext, RpcEndpoint}
import org.apache.spark.util.ThreadUtils
import org.apache.spark.{Logging, MapOutputTracker, SparkEnv}
import org.apache.spark.rpc.{RpcCallContext, RpcEnv, ThreadSafeRpcEndpoint}
import org.apache.spark.storage.BlockManagerMessages._
import org.apache.spark.util.{ThreadUtils, Utils}

/**
* An RpcEndpoint to take commands from the master to execute options. For example,
Expand Down Expand Up @@ -70,6 +70,9 @@ class BlockManagerSlaveEndpoint(

case GetMatchingBlockIds(filter, _) =>
context.reply(blockManager.getMatchingBlockIds(filter))

case TriggerThreadDump =>
context.reply(Utils.getThreadDump())
}

private def doAsync[T](actionMessage: String, context: RpcCallContext)(body: => T) {
Expand Down
8 changes: 8 additions & 0 deletions project/MimaExcludes.scala
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,14 @@ object MimaExcludes {
// SPARK-4557 Changed foreachRDD to use VoidFunction
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.streaming.api.java.JavaDStreamLike.foreachRDD")
) ++ Seq(
// SPARK-11996 Make the executor thread dump work again
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.executor.ExecutorEndpoint"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.executor.ExecutorEndpoint$"),
ProblemFilters.exclude[MissingClassProblem](
"org.apache.spark.storage.BlockManagerMessages$GetRpcHostPortForExecutor"),
ProblemFilters.exclude[MissingClassProblem](
"org.apache.spark.storage.BlockManagerMessages$GetRpcHostPortForExecutor$")
)
case v if v.startsWith("1.5") =>
Seq(
Expand Down