diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala index 58d3b846fd80d..f023e4b21cb40 100644 --- a/core/src/main/scala/org/apache/spark/SparkConf.scala +++ b/core/src/main/scala/org/apache/spark/SparkConf.scala @@ -621,7 +621,7 @@ private[spark] object SparkConf extends Logging { /** * Return whether the given config should be passed to an executor on start-up. * - * Certain akka and authentication configs are required of the executor when it connects to + * Certain akka and authentication configs are required from the executor when it connects to * the scheduler, while the rest of the spark configs can be inherited from the driver later. */ def isExecutorStartupConf(name: String): Boolean = { diff --git a/core/src/main/scala/org/apache/spark/metrics/MetricsSystem.scala b/core/src/main/scala/org/apache/spark/metrics/MetricsSystem.scala index 48afe3ae3511f..fdf76d312db3b 100644 --- a/core/src/main/scala/org/apache/spark/metrics/MetricsSystem.scala +++ b/core/src/main/scala/org/apache/spark/metrics/MetricsSystem.scala @@ -197,7 +197,7 @@ private[spark] class MetricsSystem private ( } } catch { case e: Exception => { - logError("Sink class " + classPath + " cannot be instantialized") + logError("Sink class " + classPath + " cannot be instantiated") throw e } } diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala index f25f3ed0d9037..cb9a3008107d7 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala @@ -22,7 +22,8 @@ import org.apache.spark.executor.TaskMetrics import org.apache.spark.storage.BlockManagerId /** - * Low-level task scheduler interface, currently implemented exclusively by TaskSchedulerImpl. + * Low-level task scheduler interface, currently implemented exclusively by + * [[org.apache.spark.scheduler.TaskSchedulerImpl]]. * This interface allows plugging in different task schedulers. Each TaskScheduler schedules tasks * for a single SparkContext. These schedulers get sets of tasks submitted to them from the * DAGScheduler for each stage, and are responsible for sending the tasks to the cluster, running diff --git a/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala b/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala index 15e7519d708c6..53283448c87b1 100644 --- a/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala +++ b/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala @@ -80,7 +80,7 @@ private[spark] object ThreadUtils { } /** - * Wrapper over newSingleThreadScheduledExecutor. + * Wrapper over ScheduledThreadPoolExecutor. */ def newDaemonSingleThreadScheduledExecutor(threadName: String): ScheduledExecutorService = { val threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat(threadName).build()