diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala index fb232101114b..099875771545 100644 --- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala +++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala @@ -82,7 +82,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S var driverCores: String = null var submissionToKill: String = null var submissionToRequestStatusFor: String = null - var useRest: Boolean = true // used internally + var useRest: Boolean = false // used internally /** Default properties present in the currently defined defaults file. */ lazy val defaultSparkProperties: HashMap[String, String] = { @@ -115,6 +115,8 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S // Use `sparkProperties` map along with env vars to fill in any missing parameters loadEnvironmentArguments() + useRest = sparkProperties.getOrElse("spark.master.rest.enabled", "false").toBoolean + validateArguments() /** diff --git a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala index 2c78c15773af..e1184248af46 100644 --- a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala +++ b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala @@ -121,10 +121,18 @@ private[deploy] class Master( } // Alternative application submission gateway that is stable across Spark versions - private val restServerEnabled = conf.getBoolean("spark.master.rest.enabled", true) + private val restServerEnabled = conf.getBoolean("spark.master.rest.enabled", false) private var restServer: Option[StandaloneRestServer] = None private var restServerBoundPort: Option[Int] = None + { + val authKey = SecurityManager.SPARK_AUTH_SECRET_CONF + require(conf.getOption(authKey).isEmpty || !restServerEnabled, + s"The RestSubmissionServer does not support authentication via ${authKey}. Either turn " + + "off the RestSubmissionServer with spark.master.rest.enabled=false, or do not use " + + "authentication.") + } + override def onStart(): Unit = { logInfo("Starting Spark master at " + masterUrl) logInfo(s"Running Spark version ${org.apache.spark.SPARK_VERSION}") diff --git a/core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionServer.scala b/core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionServer.scala index 3d99d085408c..e59bf3f0eaf4 100644 --- a/core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionServer.scala +++ b/core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionServer.scala @@ -51,6 +51,7 @@ private[spark] abstract class RestSubmissionServer( val host: String, val requestedPort: Int, val masterConf: SparkConf) extends Logging { + protected val submitRequestServlet: SubmitRequestServlet protected val killRequestServlet: KillRequestServlet protected val statusRequestServlet: StatusRequestServlet diff --git a/docs/running-on-mesos.md b/docs/running-on-mesos.md index 66ffb1794984..3e76d47608c7 100644 --- a/docs/running-on-mesos.md +++ b/docs/running-on-mesos.md @@ -174,6 +174,8 @@ can find the results of the driver from the Mesos Web UI. To use cluster mode, you must start the `MesosClusterDispatcher` in your cluster via the `sbin/start-mesos-dispatcher.sh` script, passing in the Mesos master URL (e.g: mesos://host:5050). This starts the `MesosClusterDispatcher` as a daemon running on the host. +Note that the `MesosClusterDispatcher` does not support authentication. You should ensure that all network access to it is +protected (port 7077 by default). By setting the Mesos proxy config property (requires mesos version >= 1.4), `--conf spark.mesos.proxy.baseURL=http://localhost:5050` when launching the dispatcher, the mesos sandbox URI for each driver is added to the mesos dispatcher UI. diff --git a/docs/security.md b/docs/security.md index 1de1d6318939..c8eec730889c 100644 --- a/docs/security.md +++ b/docs/security.md @@ -22,7 +22,12 @@ secrets to be secure. For other resource managers, `spark.authenticate.secret` must be configured on each of the nodes. This secret will be shared by all the daemons and applications, so this deployment configuration is -not as secure as the above, especially when considering multi-tenant clusters. +not as secure as the above, especially when considering multi-tenant clusters. In this +configuration, a user with the secret can effectively impersonate any other user. + +The Rest Submission Server and the MesosClusterDispatcher do not support authentication. You should +ensure that all network access to the REST API & MesosClusterDispatcher (port 6066 and 7077 +respectively by default) are restricted to hosts that are trusted to submit jobs. diff --git a/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcher.scala b/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcher.scala index ccf33e8d4283..64698b55c6bb 100644 --- a/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcher.scala +++ b/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcher.scala @@ -51,6 +51,14 @@ private[mesos] class MesosClusterDispatcher( conf: SparkConf) extends Logging { + { + // This doesn't support authentication because the RestSubmissionServer doesn't support it. + val authKey = SecurityManager.SPARK_AUTH_SECRET_CONF + require(conf.getOption(authKey).isEmpty, + s"The MesosClusterDispatcher does not support authentication via ${authKey}. It is not " + + s"currently possible to run jobs in cluster mode with authentication on.") + } + private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host) private val recoveryMode = conf.get(RECOVERY_MODE).toUpperCase() logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode)
Property NameDefaultMeaning