@@ -69,10 +69,11 @@ setClass("NaiveBayesModel", representation(jobj = "jobj"))
6969# ' @param aggregationDepth The depth for treeAggregate (greater than or equal to 2). If the dimensions of features
7070# ' or the number of partitions are large, this param could be adjusted to a larger size.
7171# ' This is an expert parameter. Default value should be good for most cases.
72- # ' @param handleInvalid How to handle invalid data (unseen labels or NULL values) in classification model.
73- # ' Supported options: "skip" (filter out rows with invalid data),
74- # ' "error" (throw an error), "keep" (put invalid data in a special additional
75- # ' bucket, at index numLabels). Default is "error".
72+ # ' @param handleInvalid How to handle invalid data (unseen labels or NULL values) in features and label
73+ # ' column of string type.
74+ # ' Supported options: "skip" (filter out rows with invalid data),
75+ # ' "error" (throw an error), "keep" (put invalid data in a special additional
76+ # ' bucket, at index numLabels). Default is "error".
7677# ' @param ... additional arguments passed to the method.
7778# ' @return \code{spark.svmLinear} returns a fitted linear SVM model.
7879# ' @rdname spark.svmLinear
@@ -225,10 +226,11 @@ function(object, path, overwrite = FALSE) {
225226# ' @param upperBoundsOnIntercepts The upper bounds on intercepts if fitting under bound constrained optimization.
226227# ' The bound vector size must be equal to 1 for binomial regression, or the number
227228# ' of classes for multinomial regression.
228- # ' @param handleInvalid How to handle invalid data (unseen labels or NULL values) in classification model.
229- # ' Supported options: "skip" (filter out rows with invalid data),
230- # ' "error" (throw an error), "keep" (put invalid data in a special additional
231- # ' bucket, at index numLabels). Default is "error".
229+ # ' @param handleInvalid How to handle invalid data (unseen labels or NULL values) in features and label
230+ # ' column of string type.
231+ # ' Supported options: "skip" (filter out rows with invalid data),
232+ # ' "error" (throw an error), "keep" (put invalid data in a special additional
233+ # ' bucket, at index numLabels). Default is "error".
232234# ' @param ... additional arguments passed to the method.
233235# ' @return \code{spark.logit} returns a fitted logistic regression model.
234236# ' @rdname spark.logit
@@ -410,10 +412,11 @@ setMethod("write.ml", signature(object = "LogisticRegressionModel", path = "char
410412# ' @param seed seed parameter for weights initialization.
411413# ' @param initialWeights initialWeights parameter for weights initialization, it should be a
412414# ' numeric vector.
413- # ' @param handleInvalid How to handle invalid data (unseen labels or NULL values) in classification model.
414- # ' Supported options: "skip" (filter out rows with invalid data),
415- # ' "error" (throw an error), "keep" (put invalid data in a special additional
416- # ' bucket, at index numLabels). Default is "error".
415+ # ' @param handleInvalid How to handle invalid data (unseen labels or NULL values) in features and label
416+ # ' column of string type.
417+ # ' Supported options: "skip" (filter out rows with invalid data),
418+ # ' "error" (throw an error), "keep" (put invalid data in a special additional
419+ # ' bucket, at index numLabels). Default is "error".
417420# ' @param ... additional arguments passed to the method.
418421# ' @return \code{spark.mlp} returns a fitted Multilayer Perceptron Classification Model.
419422# ' @rdname spark.mlp
@@ -535,10 +538,11 @@ setMethod("write.ml", signature(object = "MultilayerPerceptronClassificationMode
535538# ' @param formula a symbolic description of the model to be fitted. Currently only a few formula
536539# ' operators are supported, including '~', '.', ':', '+', and '-'.
537540# ' @param smoothing smoothing parameter.
538- # ' @param handleInvalid How to handle invalid data (unseen labels or NULL values) in classification model.
539- # ' Supported options: "skip" (filter out rows with invalid data),
540- # ' "error" (throw an error), "keep" (put invalid data in a special additional
541- # ' bucket, at index numLabels). Default is "error".
541+ # ' @param handleInvalid How to handle invalid data (unseen labels or NULL values) in features and label
542+ # ' column of string type.
543+ # ' Supported options: "skip" (filter out rows with invalid data),
544+ # ' "error" (throw an error), "keep" (put invalid data in a special additional
545+ # ' bucket, at index numLabels). Default is "error".
542546# ' @param ... additional argument(s) passed to the method. Currently only \code{smoothing}.
543547# ' @return \code{spark.naiveBayes} returns a fitted naive Bayes model.
544548# ' @rdname spark.naiveBayes
0 commit comments