diff --git a/core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala b/core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala index 117f51c5b8f2..f6b20593462c 100644 --- a/core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala +++ b/core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala @@ -24,7 +24,7 @@ package org.apache.spark.rpc private[spark] trait RpcCallContext { /** - * Reply a message to the sender. If the sender is [[RpcEndpoint]], its [[RpcEndpoint.receive]] + * Reply a message to the sender. If the sender is [[RpcEndpoint]], its `RpcEndpoint.receive` * will be called. */ def reply(response: Any): Unit diff --git a/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala b/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala index 84c2ad48f1f2..83f76db7e89d 100644 --- a/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala +++ b/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala @@ -77,7 +77,7 @@ private[spark] trait UIRoot { /** * Runs some code with the current SparkUI instance for the app / attempt. * - * @throws NoSuchElementException If the app / attempt pair does not exist. + * @throws java.util.NoSuchElementException If the app / attempt pair does not exist. */ def withSparkUI[T](appId: String, attemptId: Option[String])(fn: SparkUI => T): T @@ -85,8 +85,8 @@ private[spark] trait UIRoot { def getApplicationInfo(appId: String): Option[ApplicationInfo] /** - * Write the event logs for the given app to the [[ZipOutputStream]] instance. If attemptId is - * [[None]], event logs for all attempts of this application will be written out. + * Write the event logs for the given app to the `ZipOutputStream` instance. If attemptId is + * `None`, event logs for all attempts of this application will be written out. */ def writeEventLogs(appId: String, attemptId: Option[String], zipStream: ZipOutputStream): Unit = { Response.serverError() diff --git a/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala b/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala index 3bfdf95db84c..8212cb931db5 100644 --- a/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala +++ b/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala @@ -33,7 +33,7 @@ import org.apache.spark.util.collection.OpenHashSet /** * A trait that allows a class to give [[SizeEstimator]] more accurate size estimation. * When a class extends it, [[SizeEstimator]] will query the `estimatedSize` first. - * If `estimatedSize` does not return [[None]], [[SizeEstimator]] will use the returned size + * If `estimatedSize` does not return `None`, [[SizeEstimator]] will use the returned size * as the size of the object. Otherwise, [[SizeEstimator]] will do the estimation work. * The difference between a [[KnownSizeEstimation]] and * [[org.apache.spark.util.collection.SizeTracker]] is that, a diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/SparkAWSCredentials.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/SparkAWSCredentials.scala index 9facfe8ff2b0..97400c195476 100644 --- a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/SparkAWSCredentials.scala +++ b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/SparkAWSCredentials.scala @@ -102,8 +102,8 @@ object SparkAWSCredentials { * * @note The given AWS keypair will be saved in DStream checkpoints if checkpointing is * enabled. Make sure that your checkpoint directory is secure. Prefer using the - * [[http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default default provider chain]] - * instead if possible. + * default provider chain instead if possible + * (http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default). * * @param accessKeyId AWS access key ID * @param secretKey AWS secret key diff --git a/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala b/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala index 014ff07c2115..2b4b0fc55b95 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala @@ -371,7 +371,7 @@ private[ann] trait TopologyModel extends Serializable { def forward(data: BDM[Double], includeLastLayer: Boolean): Array[BDM[Double]] /** - * Prediction of the model. See {@link ProbabilisticClassificationModel} + * Prediction of the model. See `ProbabilisticClassificationModel`` * * @param features input features * @return prediction @@ -379,7 +379,7 @@ private[ann] trait TopologyModel extends Serializable { def predict(features: Vector): Vector /** - * Raw prediction of the model. See {@link ProbabilisticClassificationModel} + * Raw prediction of the model. See `ProbabilisticClassificationModel` * * @param features input features * @return raw prediction @@ -389,7 +389,7 @@ private[ann] trait TopologyModel extends Serializable { def predictRaw(features: Vector): Vector /** - * Probability of the model. See {@link ProbabilisticClassificationModel} + * Probability of the model. See `ProbabilisticClassificationModel` * * @param rawPrediction raw prediction vector * @return probability diff --git a/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala b/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala index 1cd2b1ad8409..756dd677cb57 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala @@ -121,7 +121,7 @@ sealed abstract class Attribute extends Serializable { private[attribute] trait AttributeFactory { /** - * Creates an [[Attribute]] from a [[Metadata]] instance. + * Creates an [[Attribute]] from a `Metadata` instance. */ private[attribute] def fromMetadata(metadata: Metadata): Attribute diff --git a/mllib/src/main/scala/org/apache/spark/ml/stat/Correlation.scala b/mllib/src/main/scala/org/apache/spark/ml/stat/Correlation.scala index 6e885d7c8aec..8167ea68a715 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/stat/Correlation.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/stat/Correlation.scala @@ -49,7 +49,7 @@ object Correlation { * Supported: `pearson` (default), `spearman` * @return A dataframe that contains the correlation matrix of the column of vectors. This * dataframe contains a single row and a single column of name - * '$METHODNAME($COLUMN)'. + * `$METHODNAME($COLUMN)`. * @throws IllegalArgumentException if the column is not a valid column in the dataset, or if * the content of this column is not of type Vector. * diff --git a/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala b/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala index 00157fe63af4..767c151c8293 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala @@ -40,43 +40,43 @@ private[ml] trait DecisionTreeParams extends PredictorParams with HasCheckpointInterval with HasSeed { /** - * Maximum depth of the tree (>= 0). + * Maximum depth of the tree (nonnegative). * E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes. * (default = 5) * @group param */ final val maxDepth: IntParam = - new IntParam(this, "maxDepth", "Maximum depth of the tree. (>= 0)" + + new IntParam(this, "maxDepth", "Maximum depth of the tree. (Nonnegative)" + " E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.", ParamValidators.gtEq(0)) /** * Maximum number of bins used for discretizing continuous features and for choosing how to split * on features at each node. More bins give higher granularity. - * Must be >= 2 and >= number of categories in any categorical feature. + * Must be at least 2 and at least number of categories in any categorical feature. * (default = 32) * @group param */ final val maxBins: IntParam = new IntParam(this, "maxBins", "Max number of bins for" + - " discretizing continuous features. Must be >=2 and >= number of categories for any" + - " categorical feature.", ParamValidators.gtEq(2)) + " discretizing continuous features. Must be at least 2 and at least number of categories" + + " for any categorical feature.", ParamValidators.gtEq(2)) /** * Minimum number of instances each child must have after split. * If a split causes the left or right child to have fewer than minInstancesPerNode, * the split will be discarded as invalid. - * Should be >= 1. + * Must be at least 1. * (default = 1) * @group param */ final val minInstancesPerNode: IntParam = new IntParam(this, "minInstancesPerNode", "Minimum" + " number of instances each child must have after split. If a split causes the left or right" + " child to have fewer than minInstancesPerNode, the split will be discarded as invalid." + - " Should be >= 1.", ParamValidators.gtEq(1)) + " Must be at least 1.", ParamValidators.gtEq(1)) /** * Minimum information gain for a split to be considered at a tree node. - * Should be >= 0.0. + * Should be at least 0.0. * (default = 0.0) * @group param */ @@ -372,7 +372,7 @@ private[ml] trait TreeEnsembleParams extends DecisionTreeParams { * Supported options: * - "auto": Choose automatically for task: * If numTrees == 1, set to "all." - * If numTrees > 1 (forest), set to "sqrt" for classification and + * If numTrees greater than 1 (forest), set to "sqrt" for classification and * to "onethird" for regression. * - "all": use all features * - "onethird": use 1/3 of the features @@ -424,8 +424,8 @@ private[ml] trait TreeEnsembleParams extends DecisionTreeParams { private[ml] trait RandomForestParams extends TreeEnsembleParams { /** - * Number of trees to train (>= 1). - * If 1, then no bootstrapping is used. If > 1, then bootstrapping is done. + * Number of trees to train (at least 1). + * If 1, then no bootstrapping is used. If greater than 1, then bootstrapping is done. * TODO: Change to always do bootstrapping (simpler). SPARK-7130 * (default = 20) * @@ -434,7 +434,8 @@ private[ml] trait RandomForestParams extends TreeEnsembleParams { * are a bit different. * @group param */ - final val numTrees: IntParam = new IntParam(this, "numTrees", "Number of trees to train (>= 1)", + final val numTrees: IntParam = + new IntParam(this, "numTrees", "Number of trees to train (at least 1)", ParamValidators.gtEq(1)) setDefault(numTrees -> 20) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala index 14ac14d6d61f..8f3d0f8b3214 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala @@ -33,7 +33,7 @@ import org.apache.spark.util.StatCounter /** * Significance testing methods for [[StreamingTest]]. New 2-sample statistical significance tests * should extend [[StreamingTestMethod]] and introduce a new entry in - * [[StreamingTestMethod.TEST_NAME_TO_OBJECT]] + * `StreamingTestMethod.TEST_NAME_TO_OBJECT` */ private[stat] sealed trait StreamingTestMethod extends Serializable { diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala index f69717441d61..e1280d024638 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala @@ -41,7 +41,7 @@ private[hive] trait HiveClient { /** * Return the associated Hive SessionState of this [[HiveClientImpl]] - * @return [[Any]] not SessionState to avoid linkage error + * @return `Any` not SessionState to avoid linkage error */ def getState: Any @@ -76,7 +76,7 @@ private[hive] trait HiveClient { /** Return whether a table/view with the specified name exists. */ def tableExists(dbName: String, tableName: String): Boolean - /** Returns the specified table, or throws [[NoSuchTableException]]. */ + /** Returns the specified table, or throws `NoSuchTableException`. */ final def getTable(dbName: String, tableName: String): CatalogTable = { getTableOption(dbName, tableName).getOrElse(throw new NoSuchTableException(dbName, tableName)) } @@ -166,7 +166,7 @@ private[hive] trait HiveClient { table: String, newParts: Seq[CatalogTablePartition]): Unit - /** Returns the specified partition, or throws [[NoSuchPartitionException]]. */ + /** Returns the specified partition, or throws `NoSuchPartitionException`. */ final def getPartition( dbName: String, tableName: String,