Skip to content

Commit 8c2fcbf

Browse files
committed
Also try to fix a few doc issues while we're here, that still cause warnings
1 parent d07e957 commit 8c2fcbf

File tree

9 files changed

+26
-25
lines changed

9 files changed

+26
-25
lines changed

core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ package org.apache.spark.rpc
2424
private[spark] trait RpcCallContext {
2525

2626
/**
27-
* Reply a message to the sender. If the sender is [[RpcEndpoint]], its [[RpcEndpoint.receive]]
27+
* Reply a message to the sender. If the sender is [[RpcEndpoint]], its `RpcEndpoint.receive`
2828
* will be called.
2929
*/
3030
def reply(response: Any): Unit

core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,8 @@ private[spark] trait UIRoot {
8585
def getApplicationInfo(appId: String): Option[ApplicationInfo]
8686

8787
/**
88-
* Write the event logs for the given app to the [[ZipOutputStream]] instance. If attemptId is
89-
* [[None]], event logs for all attempts of this application will be written out.
88+
* Write the event logs for the given app to the `ZipOutputStream` instance. If attemptId is
89+
* `None`, event logs for all attempts of this application will be written out.
9090
*/
9191
def writeEventLogs(appId: String, attemptId: Option[String], zipStream: ZipOutputStream): Unit = {
9292
Response.serverError()

core/src/main/scala/org/apache/spark/util/SizeEstimator.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ import org.apache.spark.util.collection.OpenHashSet
3434
/**
3535
* A trait that allows a class to give [[SizeEstimator]] more accurate size estimation.
3636
* When a class extends it, [[SizeEstimator]] will query the `estimatedSize` first.
37-
* If `estimatedSize` does not return [[None]], [[SizeEstimator]] will use the returned size
37+
* If `estimatedSize` does not return `None`, [[SizeEstimator]] will use the returned size
3838
* as the size of the object. Otherwise, [[SizeEstimator]] will do the estimation work.
3939
* The difference between a [[KnownSizeEstimation]] and
4040
* [[org.apache.spark.util.collection.SizeTracker]] is that, a

mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -371,15 +371,15 @@ private[ann] trait TopologyModel extends Serializable {
371371
def forward(data: BDM[Double], includeLastLayer: Boolean): Array[BDM[Double]]
372372

373373
/**
374-
* Prediction of the model. See {@link ProbabilisticClassificationModel}
374+
* Prediction of the model. See `ProbabilisticClassificationModel``
375375
*
376376
* @param features input features
377377
* @return prediction
378378
*/
379379
def predict(features: Vector): Vector
380380

381381
/**
382-
* Raw prediction of the model. See {@link ProbabilisticClassificationModel}
382+
* Raw prediction of the model. See `ProbabilisticClassificationModel`
383383
*
384384
* @param features input features
385385
* @return raw prediction
@@ -389,7 +389,7 @@ private[ann] trait TopologyModel extends Serializable {
389389
def predictRaw(features: Vector): Vector
390390

391391
/**
392-
* Probability of the model. See {@link ProbabilisticClassificationModel}
392+
* Probability of the model. See `ProbabilisticClassificationModel`
393393
*
394394
* @param rawPrediction raw prediction vector
395395
* @return probability

mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ sealed abstract class Attribute extends Serializable {
121121
private[attribute] trait AttributeFactory {
122122

123123
/**
124-
* Creates an [[Attribute]] from a [[Metadata]] instance.
124+
* Creates an [[Attribute]] from a `Metadata` instance.
125125
*/
126126
private[attribute] def fromMetadata(metadata: Metadata): Attribute
127127

mllib/src/main/scala/org/apache/spark/ml/stat/Correlation.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ object Correlation {
4949
* Supported: `pearson` (default), `spearman`
5050
* @return A dataframe that contains the correlation matrix of the column of vectors. This
5151
* dataframe contains a single row and a single column of name
52-
* '$METHODNAME($COLUMN)'.
52+
* `$METHODNAME($COLUMN)`.
5353
* @throws IllegalArgumentException if the column is not a valid column in the dataset, or if
5454
* the content of this column is not of type Vector.
5555
*

mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -40,39 +40,39 @@ private[ml] trait DecisionTreeParams extends PredictorParams
4040
with HasCheckpointInterval with HasSeed with HasWeightCol {
4141

4242
/**
43-
* Maximum depth of the tree (>= 0).
43+
* Maximum depth of the tree (nonnegative).
4444
* E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.
4545
* (default = 5)
4646
* @group param
4747
*/
4848
final val maxDepth: IntParam =
49-
new IntParam(this, "maxDepth", "Maximum depth of the tree. (>= 0)" +
49+
new IntParam(this, "maxDepth", "Maximum depth of the tree. (Nonnegative)" +
5050
" E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.",
5151
ParamValidators.gtEq(0))
5252

5353
/**
5454
* Maximum number of bins used for discretizing continuous features and for choosing how to split
5555
* on features at each node. More bins give higher granularity.
56-
* Must be >= 2 and >= number of categories in any categorical feature.
56+
* Must be at least 2 and at least number of categories in any categorical feature.
5757
* (default = 32)
5858
* @group param
5959
*/
6060
final val maxBins: IntParam = new IntParam(this, "maxBins", "Max number of bins for" +
61-
" discretizing continuous features. Must be >=2 and >= number of categories for any" +
62-
" categorical feature.", ParamValidators.gtEq(2))
61+
" discretizing continuous features. Must be at least 2 and at least number of categories" +
62+
" for any categorical feature.", ParamValidators.gtEq(2))
6363

6464
/**
6565
* Minimum number of instances each child must have after split.
6666
* If a split causes the left or right child to have fewer than minInstancesPerNode,
6767
* the split will be discarded as invalid.
68-
* Should be >= 1.
68+
* Must be at least 1.
6969
* (default = 1)
7070
* @group param
7171
*/
7272
final val minInstancesPerNode: IntParam = new IntParam(this, "minInstancesPerNode", "Minimum" +
7373
" number of instances each child must have after split. If a split causes the left or right" +
7474
" child to have fewer than minInstancesPerNode, the split will be discarded as invalid." +
75-
" Should be >= 1.", ParamValidators.gtEq(1))
75+
" Must be at least 1.", ParamValidators.gtEq(1))
7676

7777
/**
7878
* Minimum fraction of the weighted sample count that each child must have after split.
@@ -91,7 +91,7 @@ private[ml] trait DecisionTreeParams extends PredictorParams
9191

9292
/**
9393
* Minimum information gain for a split to be considered at a tree node.
94-
* Should be >= 0.0.
94+
* Should be at least 0.0.
9595
* (default = 0.0)
9696
* @group param
9797
*/
@@ -316,7 +316,7 @@ private[ml] trait TreeEnsembleParams extends DecisionTreeParams {
316316
* Supported options:
317317
* - "auto": Choose automatically for task:
318318
* If numTrees == 1, set to "all."
319-
* If numTrees > 1 (forest), set to "sqrt" for classification and
319+
* If numTrees greater than 1 (forest), set to "sqrt" for classification and
320320
* to "onethird" for regression.
321321
* - "all": use all features
322322
* - "onethird": use 1/3 of the features
@@ -361,8 +361,8 @@ private[ml] trait TreeEnsembleParams extends DecisionTreeParams {
361361
private[ml] trait RandomForestParams extends TreeEnsembleParams {
362362

363363
/**
364-
* Number of trees to train (>= 1).
365-
* If 1, then no bootstrapping is used. If > 1, then bootstrapping is done.
364+
* Number of trees to train (at least 1).
365+
* If 1, then no bootstrapping is used. If greater than 1, then bootstrapping is done.
366366
* TODO: Change to always do bootstrapping (simpler). SPARK-7130
367367
* (default = 20)
368368
*
@@ -371,7 +371,8 @@ private[ml] trait RandomForestParams extends TreeEnsembleParams {
371371
* are a bit different.
372372
* @group param
373373
*/
374-
final val numTrees: IntParam = new IntParam(this, "numTrees", "Number of trees to train (>= 1)",
374+
final val numTrees: IntParam =
375+
new IntParam(this, "numTrees", "Number of trees to train (at least 1)",
375376
ParamValidators.gtEq(1))
376377

377378
setDefault(numTrees -> 20)

mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ import org.apache.spark.util.StatCounter
3333
/**
3434
* Significance testing methods for [[StreamingTest]]. New 2-sample statistical significance tests
3535
* should extend [[StreamingTestMethod]] and introduce a new entry in
36-
* [[StreamingTestMethod.TEST_NAME_TO_OBJECT]]
36+
* `StreamingTestMethod.TEST_NAME_TO_OBJECT`
3737
*/
3838
private[stat] sealed trait StreamingTestMethod extends Serializable {
3939

sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ private[hive] trait HiveClient {
4141

4242
/**
4343
* Return the associated Hive SessionState of this [[HiveClientImpl]]
44-
* @return [[Any]] not SessionState to avoid linkage error
44+
* @return `Any` not SessionState to avoid linkage error
4545
*/
4646
def getState: Any
4747

@@ -76,7 +76,7 @@ private[hive] trait HiveClient {
7676
/** Return whether a table/view with the specified name exists. */
7777
def tableExists(dbName: String, tableName: String): Boolean
7878

79-
/** Returns the specified table, or throws [[NoSuchTableException]]. */
79+
/** Returns the specified table, or throws `NoSuchTableException`. */
8080
final def getTable(dbName: String, tableName: String): CatalogTable = {
8181
getTableOption(dbName, tableName).getOrElse(throw new NoSuchTableException(dbName, tableName))
8282
}
@@ -166,7 +166,7 @@ private[hive] trait HiveClient {
166166
table: String,
167167
newParts: Seq[CatalogTablePartition]): Unit
168168

169-
/** Returns the specified partition, or throws [[NoSuchPartitionException]]. */
169+
/** Returns the specified partition, or throws `NoSuchPartitionException`. */
170170
final def getPartition(
171171
dbName: String,
172172
tableName: String,

0 commit comments

Comments
 (0)