diff --git a/NAMESPACE b/NAMESPACE
index 319459eb1..e977f8311 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -92,6 +92,7 @@ S3method(type_sum,model_fit)
S3method(type_sum,model_spec)
S3method(update,C5_rules)
S3method(update,bag_mars)
+S3method(update,bag_mlp)
S3method(update,bag_tree)
S3method(update,bart)
S3method(update,boost_tree)
@@ -146,6 +147,7 @@ export(augment)
export(auto_ml)
export(autoplot)
export(bag_mars)
+export(bag_mlp)
export(bag_tree)
export(bart)
export(bartMachine_interval_calc)
diff --git a/R/bag_mlp.R b/R/bag_mlp.R
new file mode 100644
index 000000000..baa3f4f7e
--- /dev/null
+++ b/R/bag_mlp.R
@@ -0,0 +1,77 @@
+#' Ensembles of neural networks
+#'
+#' @description
+#'
+#' `bag_mlp()` defines an ensemble of single layer, feed-forward neural networks.
+#' This function can fit classification and regression models.
+#'
+#' \Sexpr[stage=render,results=rd]{parsnip:::make_engine_list("bag_mlp")}
+#'
+#' More information on how \pkg{parsnip} is used for modeling is at
+#' \url{https://www.tidymodels.org/}.
+#'
+#' @inheritParams mlp
+#'
+#' @template spec-details
+#'
+#' @template spec-references
+#'
+#' @seealso \Sexpr[stage=render,results=rd]{parsnip:::make_seealso_list("bag_mlp")}
+#' @export
+bag_mlp <-
+ function(mode = "unknown",
+ hidden_units = NULL,
+ penalty = NULL,
+ epochs = NULL,
+ engine = "nnet") {
+ args <- list(
+ hidden_units = enquo(hidden_units),
+ penalty = enquo(penalty),
+ epochs = enquo(epochs)
+ )
+
+ new_model_spec(
+ "bag_mlp",
+ args = args,
+ eng_args = NULL,
+ mode = mode,
+ user_specified_mode = !missing(mode),
+ method = NULL,
+ engine = engine,
+ user_specified_engine = !missing(engine)
+ )
+ }
+
+# ------------------------------------------------------------------------------
+
+#' @method update bag_mlp
+#' @rdname parsnip_update
+#' @inheritParams mars
+#' @export
+update.bag_mlp <-
+ function(object,
+ parameters = NULL,
+ hidden_units = NULL, penalty = NULL, epochs = NULL,
+ fresh = FALSE, ...) {
+
+ args <- list(
+ hidden_units = enquo(hidden_units),
+ penalty = enquo(penalty),
+ epochs = enquo(epochs)
+ )
+
+ update_spec(
+ object = object,
+ parameters = parameters,
+ args_enquo_list = args,
+ fresh = fresh,
+ cls = "bag_mlp",
+ ...
+ )
+ }
+
+# ------------------------------------------------------------------------------
+
+set_new_model("bag_mlp")
+set_model_mode("bag_mlp", "classification")
+set_model_mode("bag_mlp", "regression")
diff --git a/R/bag_mlp_nnet.R b/R/bag_mlp_nnet.R
new file mode 100644
index 000000000..f0f0c70f0
--- /dev/null
+++ b/R/bag_mlp_nnet.R
@@ -0,0 +1,12 @@
+#' Bagged neural networks via nnet
+#'
+#' [baguette::bagger()] creates a collection of neural networks forming an
+#' ensemble. All trees in the ensemble are combined to produce a final prediction.
+#'
+#' @includeRmd man/rmd/bag_mlp_nnet.md details
+#'
+#' @name details_bag_mlp_nnet
+#' @keywords internal
+NULL
+
+# See inst/README-DOCS.md for a description of how these files are processed
diff --git a/R/print.R b/R/print.R
index 6c3695dad..1f601b142 100644
--- a/R/print.R
+++ b/R/print.R
@@ -38,6 +38,7 @@ model_descs <- tibble::tribble(
~cls, ~desc,
"auto_ml", "Automatic Machine Learning",
"bag_mars", "Bagged MARS",
+ "bag_mlp", "Bagged Neural Network",
"bag_tree", "Bagged Decision Tree",
"bart", "BART",
"boost_tree", "Boosted Tree",
diff --git a/_pkgdown.yml b/_pkgdown.yml
index f15ab3825..12fe60196 100644
--- a/_pkgdown.yml
+++ b/_pkgdown.yml
@@ -35,6 +35,7 @@ reference:
contents:
- auto_ml
- bag_mars
+ - bag_mlp
- bag_tree
- bart
- boost_tree
diff --git a/inst/models.tsv b/inst/models.tsv
index e823ac58c..76cd99185 100644
--- a/inst/models.tsv
+++ b/inst/models.tsv
@@ -3,6 +3,8 @@
"auto_ml" "regression" "h2o" "agua"
"bag_mars" "classification" "earth" "baguette"
"bag_mars" "regression" "earth" "baguette"
+"bag_mlp" "classification" "nnet" "baguette"
+"bag_mlp" "regression" "nnet" "baguette"
"bag_tree" "censored regression" "rpart" "censored"
"bag_tree" "classification" "C5.0" "baguette"
"bag_tree" "classification" "rpart" "baguette"
@@ -12,10 +14,12 @@
"boost_tree" "censored regression" "mboost" "censored"
"boost_tree" "classification" "C5.0" NA
"boost_tree" "classification" "h2o" "agua"
+"boost_tree" "classification" "h2o_gbm" "agua"
"boost_tree" "classification" "lightgbm" "bonsai"
"boost_tree" "classification" "spark" NA
"boost_tree" "classification" "xgboost" NA
"boost_tree" "regression" "h2o" "agua"
+"boost_tree" "regression" "h2o_gbm" "agua"
"boost_tree" "regression" "lightgbm" "bonsai"
"boost_tree" "regression" "spark" NA
"boost_tree" "regression" "xgboost" NA
@@ -43,6 +47,7 @@
"linear_reg" "regression" "brulee" NA
"linear_reg" "regression" "gee" "multilevelmod"
"linear_reg" "regression" "glm" NA
+"linear_reg" "regression" "glmer" "multilevelmod"
"linear_reg" "regression" "glmnet" NA
"linear_reg" "regression" "gls" "multilevelmod"
"linear_reg" "regression" "h2o" "agua"
diff --git a/man/bag_mlp.Rd b/man/bag_mlp.Rd
new file mode 100644
index 000000000..f5c5bd8b5
--- /dev/null
+++ b/man/bag_mlp.Rd
@@ -0,0 +1,53 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/bag_mlp.R
+\name{bag_mlp}
+\alias{bag_mlp}
+\title{Ensembles of neural networks}
+\usage{
+bag_mlp(
+ mode = "unknown",
+ hidden_units = NULL,
+ penalty = NULL,
+ epochs = NULL,
+ engine = "nnet"
+)
+}
+\arguments{
+\item{mode}{A single character string for the prediction outcome mode.
+Possible values for this model are "unknown", "regression", or
+"classification".}
+
+\item{hidden_units}{An integer for the number of units in the hidden model.}
+
+\item{penalty}{A non-negative numeric value for the amount of weight
+decay.}
+
+\item{epochs}{An integer for the number of training iterations.}
+
+\item{engine}{A single character string specifying what computational engine
+to use for fitting.}
+}
+\description{
+\code{bag_mlp()} defines an ensemble of single layer, feed-forward neural networks.
+This function can fit classification and regression models.
+
+\Sexpr[stage=render,results=rd]{parsnip:::make_engine_list("bag_mlp")}
+
+More information on how \pkg{parsnip} is used for modeling is at
+\url{https://www.tidymodels.org/}.
+}
+\details{
+This function only defines what \emph{type} of model is being fit. Once an engine
+is specified, the \emph{method} to fit the model is also defined. See
+\code{\link[=set_engine]{set_engine()}} for more on setting the engine, including how to set engine
+arguments.
+
+The model is not trained or fit until the \code{\link[=fit.model_spec]{fit()}} function is used
+with the data.
+}
+\references{
+\url{https://www.tidymodels.org}, \href{https://www.tmwr.org/}{\emph{Tidy Modeling with R}}, \href{https://www.tidymodels.org/find/parsnip/}{searchable table of parsnip models}
+}
+\seealso{
+\Sexpr[stage=render,results=rd]{parsnip:::make_seealso_list("bag_mlp")}
+}
diff --git a/man/details_C5_rules_C5.0.Rd b/man/details_C5_rules_C5.0.Rd
index d8a05a9de..08801f45d 100644
--- a/man/details_C5_rules_C5.0.Rd
+++ b/man/details_C5_rules_C5.0.Rd
@@ -76,11 +76,10 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\item Quinlan R (1992). “Learning with Continuous Classes.” Proceedings of
the 5th Australian Joint Conference On Artificial Intelligence,
pp. 343-348.
-\item Quinlan R (1993).”Combining Instance-Based and Model-Based
-Learning.” Proceedings of the Tenth International Conference on
-Machine Learning, pp. 236-243.
-\item Kuhn M and Johnson K (2013). \emph{Applied Predictive Modeling}.
-Springer.
+\item Quinlan R (1993).”Combining Instance-Based and Model-Based Learning.”
+Proceedings of the Tenth International Conference on Machine Learning,
+pp. 236-243.
+\item Kuhn M and Johnson K (2013). \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_auto_ml_h2o.Rd b/man/details_auto_ml_h2o.Rd
index b567c3ef4..a131ddad2 100644
--- a/man/details_auto_ml_h2o.Rd
+++ b/man/details_auto_ml_h2o.Rd
@@ -15,11 +15,11 @@ This model has no tuning parameters.
Engine arguments of interest
\itemize{
-\item \code{max_runtime_secs} and \code{max_models}: controls the maximum running
-time and number of models to build in the automatic process.
-\item \code{exclude_algos} and \code{include_algos}: a character vector indicating
-the excluded or included algorithms during model building. To see a
-full list of supported models, see the details section in
+\item \code{max_runtime_secs} and \code{max_models}: controls the maximum running time
+and number of models to build in the automatic process.
+\item \code{exclude_algos} and \code{include_algos}: a character vector indicating the
+excluded or included algorithms during model building. To see a full
+list of supported models, see the details section in
\code{\link[h2o:h2o.automl]{h2o::h2o.automl()}}.
\item \code{validation}: An integer between 0 and 1 specifying the \emph{proportion}
of training data reserved as validation set. This is used by h2o for
diff --git a/man/details_bag_mars_earth.Rd b/man/details_bag_mars_earth.Rd
index aea7cd508..0b6759aaf 100644
--- a/man/details_bag_mars_earth.Rd
+++ b/man/details_bag_mars_earth.Rd
@@ -14,8 +14,7 @@ For this engine, there are multiple modes: classification and regression
This model has 3 tuning parameters:
\itemize{
\item \code{prod_degree}: Degree of Interaction (type: integer, default: 1L)
-\item \code{prune_method}: Pruning Method (type: character, default:
-‘backward’)
+\item \code{prune_method}: Pruning Method (type: character, default: ‘backward’)
\item \code{num_terms}: # Model Terms (type: integer, default: see below)
}
@@ -108,8 +107,7 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\item Friedman, J. 1991. “Multivariate Adaptive Regression Splines.” \emph{The
Annals of Statistics}, vol. 19, no. 1, pp. 1-67.
\item Milborrow, S. \href{http://www.milbo.org/doc/earth-notes.pdf}{“Notes on the earth package.”}
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_bag_mlp_nnet.Rd b/man/details_bag_mlp_nnet.Rd
new file mode 100644
index 000000000..0f46443a9
--- /dev/null
+++ b/man/details_bag_mlp_nnet.Rd
@@ -0,0 +1,104 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/bag_mlp_nnet.R
+\name{details_bag_mlp_nnet}
+\alias{details_bag_mlp_nnet}
+\title{Bagged neural networks via nnet}
+\description{
+\code{\link[baguette:bagger]{baguette::bagger()}} creates a collection of neural networks forming an
+ensemble. All trees in the ensemble are combined to produce a final prediction.
+}
+\details{
+For this engine, there are multiple modes: classification and regression
+\subsection{Tuning Parameters}{
+
+This model has 3 tuning parameters:
+\itemize{
+\item \code{hidden_units}: # Hidden Units (type: integer, default: 10L)
+\item \code{penalty}: Amount of Regularization (type: double, default: 0.0)
+\item \code{epochs}: # Epochs (type: integer, default: 1000L)
+}
+
+These defaults are set by the \code{baguette} package and are different than
+those in \code{\link[nnet:nnet]{nnet::nnet()}}.
+}
+
+\subsection{Translation from parsnip to the original package (classification)}{
+
+The \strong{baguette} extension package is required to fit this model.
+
+\if{html}{\out{
}}\preformatted{library(baguette)
+
+bag_mlp(penalty = double(1), hidden_units = integer(1)) \%>\%
+ set_engine("nnet") \%>\%
+ set_mode("classification") \%>\%
+ translate()
+}\if{html}{\out{
}}
+
+\if{html}{\out{}}\preformatted{## Bagged Neural Network Model Specification (classification)
+##
+## Main Arguments:
+## hidden_units = integer(1)
+## penalty = double(1)
+##
+## Computational engine: nnet
+##
+## Model fit template:
+## baguette::bagger(formula = missing_arg(), data = missing_arg(),
+## weights = missing_arg(), size = integer(1), decay = double(1),
+## base_model = "nnet")
+}\if{html}{\out{
}}
+}
+
+\subsection{Translation from parsnip to the original package (regression)}{
+
+The \strong{baguette} extension package is required to fit this model.
+
+\if{html}{\out{}}\preformatted{library(baguette)
+
+bag_mlp(penalty = double(1), hidden_units = integer(1)) \%>\%
+ set_engine("nnet") \%>\%
+ set_mode("regression") \%>\%
+ translate()
+}\if{html}{\out{
}}
+
+\if{html}{\out{}}\preformatted{## Bagged Neural Network Model Specification (regression)
+##
+## Main Arguments:
+## hidden_units = integer(1)
+## penalty = double(1)
+##
+## Computational engine: nnet
+##
+## Model fit template:
+## baguette::bagger(formula = missing_arg(), data = missing_arg(),
+## weights = missing_arg(), size = integer(1), decay = double(1),
+## base_model = "nnet")
+}\if{html}{\out{
}}
+}
+
+\subsection{Preprocessing requirements}{
+
+Factor/categorical predictors need to be converted to numeric values
+(e.g., dummy or indicator variables) for this engine. When using the
+formula method via \code{\link[=fit.model_spec]{fit()}}, parsnip will
+convert factor columns to indicators.
+
+Predictors should have the same scale. One way to achieve this is to
+center and scale each so that each predictor has mean zero and a
+variance of one.
+}
+
+\subsection{Case weights}{
+
+The underlying model implementation does not allow for case weights.
+}
+
+\subsection{References}{
+\itemize{
+\item Breiman L. 1996. “Bagging predictors”. Machine Learning. 24 (2):
+123-140
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
+}
+}
+}
+\keyword{internal}
diff --git a/man/details_bag_tree_C5.0.Rd b/man/details_bag_tree_C5.0.Rd
index c6370328b..53509ac36 100644
--- a/man/details_bag_tree_C5.0.Rd
+++ b/man/details_bag_tree_C5.0.Rd
@@ -65,8 +65,7 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\itemize{
\item Breiman, L. 1996. “Bagging predictors”. Machine Learning. 24 (2):
123-140
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_bag_tree_rpart.Rd b/man/details_bag_tree_rpart.Rd
index bb1f3c672..e84e45cd9 100644
--- a/man/details_bag_tree_rpart.Rd
+++ b/man/details_bag_tree_rpart.Rd
@@ -143,8 +143,7 @@ time.
123-140
\item Hothorn T, Lausen B, Benner A, Radespiel-Troeger M. 2004. Bagging
Survival Trees. \emph{Statistics in Medicine}, 23(1), 77–91.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_bart_dbarts.Rd b/man/details_bart_dbarts.Rd
index 942c8971c..5e250becc 100644
--- a/man/details_bart_dbarts.Rd
+++ b/man/details_bart_dbarts.Rd
@@ -18,8 +18,8 @@ This model has 4 tuning parameters:
double, default: 0.95)
\item \code{prior_terminal_node_expo}: Terminal Node Prior Exponent (type:
double, default: 2.00)
-\item \code{prior_outcome_range}: Prior for Outcome Range (type: double,
-default: 2.00)
+\item \code{prior_outcome_range}: Prior for Outcome Range (type: double, default:
+2.00)
}
}
@@ -33,16 +33,15 @@ to the user. Useful for “thinning” samples.
formulation.
\item \code{ndpost}, \code{n.samples}: The number of posterior draws after burn in,
\code{ndpost} / \code{keepevery} will actually be returned.
-\item \code{nskip}, \code{n.burn}: Number of MCMC iterations to be treated as burn
-in.
+\item \code{nskip}, \code{n.burn}: Number of MCMC iterations to be treated as burn in.
\item \code{nchain}, \code{n.chains}: Integer specifying how many independent tree
sets and fits should be calculated.
\item \code{nthread}, \code{n.threads}: Integer specifying how many threads to use.
Depending on the CPU architecture, using more than the number of
chains can degrade performance for small/medium data sets. As such
some calculations may be executed single threaded regardless.
-\item \code{combinechains}, \code{combineChains}: Logical; if \code{TRUE}, samples will
-be returned in arrays of dimensions equal to \code{nchain} times \code{ndpost}
+\item \code{combinechains}, \code{combineChains}: Logical; if \code{TRUE}, samples will be
+returned in arrays of dimensions equal to \code{nchain} times \code{ndpost}
times number of observations.
}
}
diff --git a/man/details_boost_tree_C5.0.Rd b/man/details_boost_tree_C5.0.Rd
index 5e8cd42b5..54928213d 100644
--- a/man/details_boost_tree_C5.0.Rd
+++ b/man/details_boost_tree_C5.0.Rd
@@ -16,8 +16,8 @@ This model has 3 tuning parameters:
\itemize{
\item \code{trees}: # Trees (type: integer, default: 15L)
\item \code{min_n}: Minimal Node Size (type: integer, default: 2L)
-\item \code{sample_size}: Proportion Observations Sampled (type: double,
-default: 1.0)
+\item \code{sample_size}: Proportion Observations Sampled (type: double, default:
+1.0)
}
The implementation of C5.0 limits the number of trees to be between 1
@@ -88,8 +88,7 @@ for \code{boost_tree()} with the \code{"C5.0"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_boost_tree_h2o.Rd b/man/details_boost_tree_h2o.Rd
index 25da9bbed..0d61e694a 100644
--- a/man/details_boost_tree_h2o.Rd
+++ b/man/details_boost_tree_h2o.Rd
@@ -21,8 +21,7 @@ This model has 8 tuning parameters:
\item \code{sample_size}: # Observations Sampled (type: integer, default: 1)
\item \code{mtry}: # Randomly Selected Predictors (type: integer, default: 1)
\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: 0)
-\item \code{stop_iter}: # Iterations Before Stopping (type: integer, default:
-0)
+\item \code{stop_iter}: # Iterations Before Stopping (type: integer, default: 0)
}
\code{min_n} represents the fewest allowed observations in a terminal node,
@@ -136,8 +135,8 @@ their analogue to the \code{mtry} argument as the \emph{proportion} of predictor
that will be randomly sampled at each split rather than the \emph{count}. In
some settings, such as when tuning over preprocessors that influence the
number of predictors, this parameterization is quite
-helpful—interpreting \code{mtry} as a proportion means that [0,1] is always
-a valid range for that parameter, regardless of input data.
+helpful—interpreting \code{mtry} as a proportion means that $\link{0, 1}$ is
+always a valid range for that parameter, regardless of input data.
parsnip and its extensions accommodate this parameterization using the
\code{counts} argument: a logical indicating whether \code{mtry} should be
@@ -154,7 +153,7 @@ to \code{TRUE}. For engines that support the proportion interpretation
(currently \code{"xgboost"} and \code{"xrf"}, via the rules package, and
\code{"lightgbm"} via the bonsai package) the user can pass the
\code{counts = FALSE} argument to \code{set_engine()} to supply \code{mtry} values
-within [0,1].
+within $\link{0, 1}$.
}
\subsection{Initializing h2o}{
diff --git a/man/details_boost_tree_lightgbm.Rd b/man/details_boost_tree_lightgbm.Rd
index f60a6ab74..4782bb8ab 100644
--- a/man/details_boost_tree_lightgbm.Rd
+++ b/man/details_boost_tree_lightgbm.Rd
@@ -137,8 +137,8 @@ their analogue to the \code{mtry} argument as the \emph{proportion} of predictor
that will be randomly sampled at each split rather than the \emph{count}. In
some settings, such as when tuning over preprocessors that influence the
number of predictors, this parameterization is quite
-helpful—interpreting \code{mtry} as a proportion means that [0,1] is always
-a valid range for that parameter, regardless of input data.
+helpful—interpreting \code{mtry} as a proportion means that $\link{0, 1}$ is
+always a valid range for that parameter, regardless of input data.
parsnip and its extensions accommodate this parameterization using the
\code{counts} argument: a logical indicating whether \code{mtry} should be
@@ -155,7 +155,7 @@ to \code{TRUE}. For engines that support the proportion interpretation
(currently \code{"xgboost"} and \code{"xrf"}, via the rules package, and
\code{"lightgbm"} via the bonsai package) the user can pass the
\code{counts = FALSE} argument to \code{set_engine()} to supply \code{mtry} values
-within [0,1].
+within $\link{0, 1}$.
}
\subsection{Bagging}{
@@ -173,7 +173,7 @@ that the booster will perform bagging at every \code{k}th boosting iteration.
Thus, by default, the \code{sample_size} argument would be ignored without
setting this argument manually. Other boosting libraries, like xgboost,
do not have an analogous argument to \code{bagging_freq} and use \code{k = 1} when
-the analogue to \code{bagging_fraction} is in (0,1). \emph{bonsai will thus
+the analogue to \code{bagging_fraction} is in $(0, 1)$. \emph{bonsai will thus
automatically set} \code{bagging_freq = 1} \emph{in} \code{set_engine("lightgbm", ...)}
if \code{sample_size} (i.e. \code{bagging_fraction}) is not equal to 1 and no
\code{bagging_freq} value is supplied. This default can be overridden by
@@ -200,8 +200,7 @@ The “Introduction to bonsai” article contains
\subsection{References}{
\itemize{
\item \href{https://papers.nips.cc/paper/2017/hash/6449f44a102fde848669bdd9eb6b76fa-Abstract.html}{LightGBM: A Highly Efficient Gradient Boosting Decision Tree}
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_boost_tree_mboost.Rd b/man/details_boost_tree_mboost.Rd
index 738f7c1da..cd1975d23 100644
--- a/man/details_boost_tree_mboost.Rd
+++ b/man/details_boost_tree_mboost.Rd
@@ -69,8 +69,7 @@ Predictions of type \code{"time"} are predictions of the mean survival time.
\itemize{
\item Buehlmann P, Hothorn T. 2007. Boosting algorithms: regularization,
prediction and model fitting. \emph{Statistical Science}, 22(4), 477–505.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_boost_tree_spark.Rd b/man/details_boost_tree_spark.Rd
index 7d105c259..9f8e29482 100644
--- a/man/details_boost_tree_spark.Rd
+++ b/man/details_boost_tree_spark.Rd
@@ -21,8 +21,7 @@ This model has 7 tuning parameters:
\item \code{mtry}: # Randomly Selected Predictors (type: integer, default: see
below)
\item \code{min_n}: Minimal Node Size (type: integer, default: 1L)
-\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default:
-0.0)
+\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: 0.0)
\item \code{sample_size}: # Observations Sampled (type: integer, default: 1.0)
}
@@ -126,15 +125,14 @@ to consider.
\itemize{
\item Only the formula interface to via \code{fit()} is available; using
\code{fit_xy()} will generate an error.
-\item The predictions will always be in a Spark table format. The names
-will be the same as documented but without the dots.
+\item The predictions will always be in a Spark table format. The names will
+be the same as documented but without the dots.
\item There is no equivalent to factor columns in Spark tables so class
predictions are returned as character columns.
\item To retain the model object for a new R session (via \code{save()}), the
\code{model$fit} element of the parsnip object should be serialized via
-\code{ml_save(object$fit)} and separately saved to disk. In a new
-session, the object can be reloaded and reattached to the parsnip
-object.
+\code{ml_save(object$fit)} and separately saved to disk. In a new session,
+the object can be reloaded and reattached to the parsnip object.
}
}
@@ -142,8 +140,7 @@ object.
\itemize{
\item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering Spark with R}.
O’Reilly Media
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_boost_tree_xgboost.Rd b/man/details_boost_tree_xgboost.Rd
index 8ea17c06c..23136b233 100644
--- a/man/details_boost_tree_xgboost.Rd
+++ b/man/details_boost_tree_xgboost.Rd
@@ -20,10 +20,9 @@ This model has 8 tuning parameters:
\item \code{mtry}: # Randomly Selected Predictors (type: integer, default: see
below)
\item \code{min_n}: Minimal Node Size (type: integer, default: 1L)
-\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default:
-0.0)
-\item \code{sample_size}: Proportion Observations Sampled (type: double,
-default: 1.0)
+\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: 0.0)
+\item \code{sample_size}: Proportion Observations Sampled (type: double, default:
+1.0)
\item \code{stop_iter}: # Iterations Before Stopping (type: integer, default:
Inf)
}
@@ -191,8 +190,8 @@ their analogue to the \code{mtry} argument as the \emph{proportion} of predictor
that will be randomly sampled at each split rather than the \emph{count}. In
some settings, such as when tuning over preprocessors that influence the
number of predictors, this parameterization is quite
-helpful—interpreting \code{mtry} as a proportion means that [0,1] is always
-a valid range for that parameter, regardless of input data.
+helpful—interpreting \code{mtry} as a proportion means that $\link{0, 1}$ is
+always a valid range for that parameter, regardless of input data.
parsnip and its extensions accommodate this parameterization using the
\code{counts} argument: a logical indicating whether \code{mtry} should be
@@ -209,7 +208,7 @@ to \code{TRUE}. For engines that support the proportion interpretation
(currently \code{"xgboost"} and \code{"xrf"}, via the rules package, and
\code{"lightgbm"} via the bonsai package) the user can pass the
\code{counts = FALSE} argument to \code{set_engine()} to supply \code{mtry} values
-within [0,1].
+within $\link{0, 1}$.
}
\subsection{Early stopping}{
@@ -252,8 +251,7 @@ for \code{boost_tree()} with the \code{"xgboost"} engine.
\subsection{References}{
\itemize{
\item \href{https://arxiv.org/abs/1603.02754}{XGBoost: A Scalable Tree Boosting System}
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_cubist_rules_Cubist.Rd b/man/details_cubist_rules_Cubist.Rd
index 8e6c6f39f..f7a73a7f1 100644
--- a/man/details_cubist_rules_Cubist.Rd
+++ b/man/details_cubist_rules_Cubist.Rd
@@ -64,11 +64,10 @@ are not required for this model.
\item Quinlan R (1992). “Learning with Continuous Classes.” Proceedings of
the 5th Australian Joint Conference On Artificial Intelligence,
pp. 343-348.
-\item Quinlan R (1993).”Combining Instance-Based and Model-Based
-Learning.” Proceedings of the Tenth International Conference on
-Machine Learning, pp. 236-243.
-\item Kuhn M and Johnson K (2013). \emph{Applied Predictive Modeling}.
-Springer.
+\item Quinlan R (1993).”Combining Instance-Based and Model-Based Learning.”
+Proceedings of the Tenth International Conference on Machine Learning,
+pp. 236-243.
+\item Kuhn M and Johnson K (2013). \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_decision_tree_C5.0.Rd b/man/details_decision_tree_C5.0.Rd
index d52eeb99b..19c2f1114 100644
--- a/man/details_decision_tree_C5.0.Rd
+++ b/man/details_decision_tree_C5.0.Rd
@@ -68,8 +68,7 @@ for \code{decision_tree()} with the \code{"C5.0"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_decision_tree_partykit.Rd b/man/details_decision_tree_partykit.Rd
index a7746f10b..87afcbcaa 100644
--- a/man/details_decision_tree_partykit.Rd
+++ b/man/details_decision_tree_partykit.Rd
@@ -134,8 +134,7 @@ time.
\subsection{References}{
\itemize{
\item \href{https://jmlr.org/papers/v16/hothorn15a.html}{partykit: A Modular Toolkit for Recursive Partytioning in R}
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_decision_tree_rpart.Rd b/man/details_decision_tree_rpart.Rd
index c1e1f7952..3536b1eef 100644
--- a/man/details_decision_tree_rpart.Rd
+++ b/man/details_decision_tree_rpart.Rd
@@ -133,8 +133,7 @@ for \code{decision_tree()} with the \code{"rpart"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_decision_tree_spark.Rd b/man/details_decision_tree_spark.Rd
index c1ab4a8ee..8e3b362a1 100644
--- a/man/details_decision_tree_spark.Rd
+++ b/man/details_decision_tree_spark.Rd
@@ -92,22 +92,20 @@ to consider.
\itemize{
\item Only the formula interface to via \code{fit()} is available; using
\code{fit_xy()} will generate an error.
-\item The predictions will always be in a Spark table format. The names
-will be the same as documented but without the dots.
+\item The predictions will always be in a Spark table format. The names will
+be the same as documented but without the dots.
\item There is no equivalent to factor columns in Spark tables so class
predictions are returned as character columns.
\item To retain the model object for a new R session (via \code{save()}), the
\code{model$fit} element of the parsnip object should be serialized via
-\code{ml_save(object$fit)} and separately saved to disk. In a new
-session, the object can be reloaded and reattached to the parsnip
-object.
+\code{ml_save(object$fit)} and separately saved to disk. In a new session,
+the object can be reloaded and reattached to the parsnip object.
}
}
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_discrim_flexible_earth.Rd b/man/details_discrim_flexible_earth.Rd
index a2bfca061..465ff19ce 100644
--- a/man/details_discrim_flexible_earth.Rd
+++ b/man/details_discrim_flexible_earth.Rd
@@ -17,8 +17,7 @@ This model has 3 tuning parameter:
\itemize{
\item \code{num_terms}: # Model Terms (type: integer, default: (see below))
\item \code{prod_degree}: Degree of Interaction (type: integer, default: 1L)
-\item \code{prune_method}: Pruning Method (type: character, default:
-‘backward’)
+\item \code{prune_method}: Pruning Method (type: character, default: ‘backward’)
}
The default value of \code{num_terms} depends on the number of columns (\code{p}):
@@ -79,8 +78,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\item Hastie, Tibshirani & Buja (1994) Flexible Discriminant Analysis by
Optimal Scoring, \emph{Journal of the American Statistical Association},
89:428, 1255-1270
-\item Friedman (1991). Multivariate Adaptive Regression Splines. \emph{The
-Annals of Statistics}, 19(1), 1-67.
+\item Friedman (1991). Multivariate Adaptive Regression Splines. \emph{The Annals
+of Statistics}, 19(1), 1-67.
}
}
}
diff --git a/man/details_discrim_linear_MASS.Rd b/man/details_discrim_linear_MASS.Rd
index 704888d05..3f046607d 100644
--- a/man/details_discrim_linear_MASS.Rd
+++ b/man/details_discrim_linear_MASS.Rd
@@ -55,8 +55,7 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_discrim_linear_sparsediscrim.Rd b/man/details_discrim_linear_sparsediscrim.Rd
index fb0e1cbee..3e1c2eed6 100644
--- a/man/details_discrim_linear_sparsediscrim.Rd
+++ b/man/details_discrim_linear_sparsediscrim.Rd
@@ -21,8 +21,7 @@ default: ‘diagonal’)
The possible values of this parameter, and the functions that they
execute, are:
\itemize{
-\item \code{"diagonal"}:
-\code{\link[sparsediscrim:lda_diag]{sparsediscrim::lda_diag()}}
+\item \code{"diagonal"}: \code{\link[sparsediscrim:lda_diag]{sparsediscrim::lda_diag()}}
\item \code{"min_distance"}:
\code{\link[sparsediscrim:lda_emp_bayes_eigen]{sparsediscrim::lda_emp_bayes_eigen()}}
\item \code{"shrink_mean"}:
@@ -85,8 +84,8 @@ Volume 28, Issue 4, 15 February 2012, Pages 531-537.
\item \code{lda_shrink_cov()}: Pang, Tong and Zhao (2009), Shrinkage-based
Diagonal Discriminant Analysis and Its Applications in
High-Dimensional Data. \emph{Biometrics}, 65, 1021-1029.
-\item \code{lda_emp_bayes_eigen()}: Srivistava and Kubokawa (2007), Comparison
-of Discrimination Methods for High Dimensional Data, \emph{Journal of the
+\item \code{lda_emp_bayes_eigen()}: Srivistava and Kubokawa (2007), Comparison of
+Discrimination Methods for High Dimensional Data, \emph{Journal of the
Japan Statistical Society}, 37:1, 123-134.
}
}
diff --git a/man/details_discrim_quad_MASS.Rd b/man/details_discrim_quad_MASS.Rd
index 2b83b92c5..1deb9a025 100644
--- a/man/details_discrim_quad_MASS.Rd
+++ b/man/details_discrim_quad_MASS.Rd
@@ -56,8 +56,7 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_discrim_quad_sparsediscrim.Rd b/man/details_discrim_quad_sparsediscrim.Rd
index 962ee6a8c..9d588c471 100644
--- a/man/details_discrim_quad_sparsediscrim.Rd
+++ b/man/details_discrim_quad_sparsediscrim.Rd
@@ -21,8 +21,7 @@ default: ‘diagonal’)
The possible values of this parameter, and the functions that they
execute, are:
\itemize{
-\item \code{"diagonal"}:
-\code{\link[sparsediscrim:qda_diag]{sparsediscrim::qda_diag()}}
+\item \code{"diagonal"}: \code{\link[sparsediscrim:qda_diag]{sparsediscrim::qda_diag()}}
\item \code{"shrink_mean"}:
\code{\link[sparsediscrim:qda_shrink_mean]{sparsediscrim::qda_shrink_mean()}}
\item \code{"shrink_cov"}:
diff --git a/man/details_discrim_regularized_klaR.Rd b/man/details_discrim_regularized_klaR.Rd
index fb946b0d1..a3c153d22 100644
--- a/man/details_discrim_regularized_klaR.Rd
+++ b/man/details_discrim_regularized_klaR.Rd
@@ -24,8 +24,8 @@ default: (see below))
Some special cases for the RDA model:
\itemize{
-\item \code{frac_identity = 0} and \code{frac_common_cov = 1} is a linear
-discriminant analysis (LDA) model.
+\item \code{frac_identity = 0} and \code{frac_common_cov = 1} is a linear discriminant
+analysis (LDA) model.
\item \code{frac_identity = 0} and \code{frac_common_cov = 0} is a quadratic
discriminant analysis (QDA) model.
}
@@ -76,10 +76,9 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item Friedman, J (1989). Regularized Discriminant Analysis. \emph{Journal of
-the American Statistical Association}, 84, 165-175.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Friedman, J (1989). Regularized Discriminant Analysis. \emph{Journal of the
+American Statistical Association}, 84, 165-175.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_gen_additive_mod_mgcv.Rd b/man/details_gen_additive_mod_mgcv.Rd
index 6b6408577..5c34ca1f0 100644
--- a/man/details_gen_additive_mod_mgcv.Rd
+++ b/man/details_gen_additive_mod_mgcv.Rd
@@ -14,8 +14,7 @@ For this engine, there are multiple modes: regression and classification
This model has 2 tuning parameters:
\itemize{
\item \code{select_features}: Select Features? (type: logical, default: FALSE)
-\item \code{adjust_deg_free}: Smoothness Adjustment (type: double, default:
-1.0)
+\item \code{adjust_deg_free}: Smoothness Adjustment (type: double, default: 1.0)
}
}
@@ -150,8 +149,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\subsection{References}{
\itemize{
\item Ross, W. 2021. \href{https://noamross.github.io/gams-in-r-course/}{\emph{Generalized Additive Models in R: A Free, Interactive Course using mgcv}}
-\item Wood, S. 2017. \emph{Generalized Additive Models: An Introduction with
-R}. Chapman and Hall/CRC.
+\item Wood, S. 2017. \emph{Generalized Additive Models: An Introduction with R}.
+Chapman and Hall/CRC.
}
}
}
diff --git a/man/details_linear_reg_brulee.Rd b/man/details_linear_reg_brulee.Rd
index 82780893d..b97c95185 100644
--- a/man/details_linear_reg_brulee.Rd
+++ b/man/details_linear_reg_brulee.Rd
@@ -34,8 +34,8 @@ process.
during optimization (\code{optimizer = "SGD"} only).
\item \code{batch_size()}: An integer for the number of training set points in
each batch.
-\item \code{stop_iter()}: A non-negative integer for how many iterations with
-no improvement before stopping. (default: 5L).
+\item \code{stop_iter()}: A non-negative integer for how many iterations with no
+improvement before stopping. (default: 5L).
}
}
@@ -78,8 +78,7 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_linear_reg_glm.Rd b/man/details_linear_reg_glm.Rd
index 3987a5c68..e6ff7cf6c 100644
--- a/man/details_linear_reg_glm.Rd
+++ b/man/details_linear_reg_glm.Rd
@@ -90,8 +90,7 @@ for \code{linear_reg()} with the \code{"glm"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_linear_reg_glmnet.Rd b/man/details_linear_reg_glmnet.Rd
index 5f6513ec5..7b47737d7 100644
--- a/man/details_linear_reg_glmnet.Rd
+++ b/man/details_linear_reg_glmnet.Rd
@@ -12,8 +12,7 @@ For this engine, there is a single mode: regression
This model has 2 tuning parameters:
\itemize{
-\item \code{penalty}: Amount of Regularization (type: double, default: see
-below)
+\item \code{penalty}: Amount of Regularization (type: double, default: see below)
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 1.0)
}
@@ -78,10 +77,9 @@ for \code{linear_reg()} with the \code{"glmnet"} engine.
\subsection{References}{
\itemize{
-\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical
-Learning with Sparsity}. CRC Press.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning
+with Sparsity}. CRC Press.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_linear_reg_gls.Rd b/man/details_linear_reg_gls.Rd
index 7609e9e15..1757de712 100644
--- a/man/details_linear_reg_gls.Rd
+++ b/man/details_linear_reg_gls.Rd
@@ -109,8 +109,8 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and
-S-PLUS}. Springer, New York, NY
+\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}.
+Springer, New York, NY
}
}
}
diff --git a/man/details_linear_reg_h2o.Rd b/man/details_linear_reg_h2o.Rd
index 1f5588f28..f5fb3c52b 100644
--- a/man/details_linear_reg_h2o.Rd
+++ b/man/details_linear_reg_h2o.Rd
@@ -14,8 +14,7 @@ This model has 2 tuning parameters:
\itemize{
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: see
below)
-\item \code{penalty}: Amount of Regularization (type: double, default: see
-below)
+\item \code{penalty}: Amount of Regularization (type: double, default: see below)
}
By default, when not given a fixed \code{penalty},
diff --git a/man/details_linear_reg_keras.Rd b/man/details_linear_reg_keras.Rd
index bbef52386..9142acf7c 100644
--- a/man/details_linear_reg_keras.Rd
+++ b/man/details_linear_reg_keras.Rd
@@ -69,8 +69,8 @@ for \code{linear_reg()} with the \code{"keras"} engine.
\subsection{References}{
\itemize{
-\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased
-Estimation for Nonorthogonal Problems}. Technometrics, 42(1), 80-86.
+\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased Estimation
+for Nonorthogonal Problems}. Technometrics, 42(1), 80-86.
}
}
}
diff --git a/man/details_linear_reg_lm.Rd b/man/details_linear_reg_lm.Rd
index 515f955ea..b2d0c933c 100644
--- a/man/details_linear_reg_lm.Rd
+++ b/man/details_linear_reg_lm.Rd
@@ -73,8 +73,7 @@ for \code{linear_reg()} with the \code{"lm"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_linear_reg_lme.Rd b/man/details_linear_reg_lme.Rd
index ffa2c41db..3c9c70096 100644
--- a/man/details_linear_reg_lme.Rd
+++ b/man/details_linear_reg_lme.Rd
@@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where \emph{i} denotes the \code{i}th independent experimental unit
+where $i$ denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
@@ -117,8 +117,8 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and
-S-PLUS}. Springer, New York, NY
+\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}.
+Springer, New York, NY
\item West, K, Band Welch, and A Galecki. 2014. \emph{Linear Mixed Models: A
Practical Guide Using Statistical Software}. CRC Press.
\item Thorson, J, Minto, C. 2015, Mixed effects: a unifying framework for
@@ -128,9 +128,9 @@ Science}, Volume 72, Issue 5, Pages 1245–1256.
Goodwin, CED, Robinson, BS, Hodgson, DJ, Inger, R. 2018. \emph{A brief
introduction to mixed effects modelling and multi-model inference in
ecology}. PeerJ 6:e4794.
-\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through
-Data Simulation. 2021. \emph{Advances in Methods and Practices in
-Psychological Science}.
+\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through Data
+Simulation. 2021. \emph{Advances in Methods and Practices in Psychological
+Science}.
}
}
}
diff --git a/man/details_linear_reg_lmer.Rd b/man/details_linear_reg_lmer.Rd
index c204bfce3..0441f464a 100644
--- a/man/details_linear_reg_lmer.Rd
+++ b/man/details_linear_reg_lmer.Rd
@@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where \emph{i} denotes the \code{i}th independent experimental unit
+where $i$ denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
@@ -120,8 +120,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\subsection{References}{
\itemize{
-\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and
-S-PLUS}. Springer, New York, NY
+\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}.
+Springer, New York, NY
\item West, K, Band Welch, and A Galecki. 2014. \emph{Linear Mixed Models: A
Practical Guide Using Statistical Software}. CRC Press.
\item Thorson, J, Minto, C. 2015, Mixed effects: a unifying framework for
@@ -131,9 +131,9 @@ Science}, Volume 72, Issue 5, Pages 1245–1256.
Goodwin, CED, Robinson, BS, Hodgson, DJ, Inger, R. 2018. \emph{A brief
introduction to mixed effects modelling and multi-model inference in
ecology}. PeerJ 6:e4794.
-\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through
-Data Simulation. 2021. \emph{Advances in Methods and Practices in
-Psychological Science}.
+\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through Data
+Simulation. 2021. \emph{Advances in Methods and Practices in Psychological
+Science}.
}
}
}
diff --git a/man/details_linear_reg_spark.Rd b/man/details_linear_reg_spark.Rd
index 2133218c7..d19c16e7f 100644
--- a/man/details_linear_reg_spark.Rd
+++ b/man/details_linear_reg_spark.Rd
@@ -23,8 +23,8 @@ For \code{penalty}, the amount of regularization includes both the L1 penalty
\itemize{
\item \code{mixture = 1} specifies a pure lasso model,
\item \code{mixture = 0} specifies a ridge regression model, and
-\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating
-lasso and ridge.
+\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso
+and ridge.
}
}
@@ -84,15 +84,14 @@ to consider.
\itemize{
\item Only the formula interface to via \code{fit()} is available; using
\code{fit_xy()} will generate an error.
-\item The predictions will always be in a Spark table format. The names
-will be the same as documented but without the dots.
+\item The predictions will always be in a Spark table format. The names will
+be the same as documented but without the dots.
\item There is no equivalent to factor columns in Spark tables so class
predictions are returned as character columns.
\item To retain the model object for a new R session (via \code{save()}), the
\code{model$fit} element of the parsnip object should be serialized via
-\code{ml_save(object$fit)} and separately saved to disk. In a new
-session, the object can be reloaded and reattached to the parsnip
-object.
+\code{ml_save(object$fit)} and separately saved to disk. In a new session,
+the object can be reloaded and reattached to the parsnip object.
}
}
@@ -100,10 +99,9 @@ object.
\itemize{
\item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering Spark with R}.
O’Reilly Media
-\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical
-Learning with Sparsity}. CRC Press.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning
+with Sparsity}. CRC Press.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_linear_reg_stan.Rd b/man/details_linear_reg_stan.Rd
index 98652904f..fa4939331 100644
--- a/man/details_linear_reg_stan.Rd
+++ b/man/details_linear_reg_stan.Rd
@@ -22,12 +22,11 @@ The default is 4.
\item \code{iter}: A positive integer specifying the number of iterations for
each chain (including warmup). The default is 2000.
\item \code{seed}: The seed for random number generation.
-\item \code{cores}: Number of cores to use when executing the chains in
-parallel.
-\item \code{prior}: The prior distribution for the (non-hierarchical)
-regression coefficients. The \code{"stan"} engine does not fit any
-hierarchical terms. See the \code{"stan_glmer"} engine from the
-multilevelmod package for that type of model.
+\item \code{cores}: Number of cores to use when executing the chains in parallel.
+\item \code{prior}: The prior distribution for the (non-hierarchical) regression
+coefficients. The \code{"stan"} engine does not fit any hierarchical terms.
+See the \code{"stan_glmer"} engine from the multilevelmod package for that
+type of model.
\item \code{prior_intercept}: The prior distribution for the intercept (after
centering all predictors).
}
diff --git a/man/details_linear_reg_stan_glmer.Rd b/man/details_linear_reg_stan_glmer.Rd
index ccdd56bb5..3bcb67ddf 100644
--- a/man/details_linear_reg_stan_glmer.Rd
+++ b/man/details_linear_reg_stan_glmer.Rd
@@ -23,10 +23,9 @@ The default is 4.
\item \code{iter}: A positive integer specifying the number of iterations for
each chain (including warmup). The default is 2000.
\item \code{seed}: The seed for random number generation.
-\item \code{cores}: Number of cores to use when executing the chains in
-parallel.
-\item \code{prior}: The prior distribution for the (non-hierarchical)
-regression coefficients.
+\item \code{cores}: Number of cores to use when executing the chains in parallel.
+\item \code{prior}: The prior distribution for the (non-hierarchical) regression
+coefficients.
\item \code{prior_intercept}: The prior distribution for the intercept (after
centering all predictors).
}
@@ -65,7 +64,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where \emph{i} denotes the \code{i}th independent experimental unit
+where $i$ denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
diff --git a/man/details_logistic_reg_LiblineaR.Rd b/man/details_logistic_reg_LiblineaR.Rd
index e6d10eef2..5aeaa83d7 100644
--- a/man/details_logistic_reg_LiblineaR.Rd
+++ b/man/details_logistic_reg_LiblineaR.Rd
@@ -14,8 +14,7 @@ For this engine, there is a single mode: classification
This model has 2 tuning parameters:
\itemize{
-\item \code{penalty}: Amount of Regularization (type: double, default: see
-below)
+\item \code{penalty}: Amount of Regularization (type: double, default: see below)
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 0)
}
@@ -71,10 +70,9 @@ for \code{logistic_reg()} with the \code{"LiblineaR"} engine.
\subsection{References}{
\itemize{
-\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical
-Learning with Sparsity}. CRC Press.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning
+with Sparsity}. CRC Press.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_logistic_reg_brulee.Rd b/man/details_logistic_reg_brulee.Rd
index 0a688aa16..5a7ceba30 100644
--- a/man/details_logistic_reg_brulee.Rd
+++ b/man/details_logistic_reg_brulee.Rd
@@ -35,8 +35,8 @@ process.
during optimization (\code{optimizer = "SGD"} only).
\item \code{batch_size()}: An integer for the number of training set points in
each batch.
-\item \code{stop_iter()}: A non-negative integer for how many iterations with
-no improvement before stopping. (default: 5L).
+\item \code{stop_iter()}: A non-negative integer for how many iterations with no
+improvement before stopping. (default: 5L).
\item \code{class_weights()}: Numeric class weights. See
\code{\link[brulee:brulee_logistic_reg]{brulee::brulee_logistic_reg()}}.
}
@@ -78,8 +78,7 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_logistic_reg_glm.Rd b/man/details_logistic_reg_glm.Rd
index 5f2aa99b1..0c5c69180 100644
--- a/man/details_logistic_reg_glm.Rd
+++ b/man/details_logistic_reg_glm.Rd
@@ -90,8 +90,7 @@ for \code{logistic_reg()} with the \code{"glm"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_logistic_reg_glmer.Rd b/man/details_logistic_reg_glmer.Rd
index fcdd796ed..b848df19c 100644
--- a/man/details_logistic_reg_glmer.Rd
+++ b/man/details_logistic_reg_glmer.Rd
@@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where \emph{i} denotes the \code{i}th independent experimental unit
+where $i$ denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
@@ -120,8 +120,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\subsection{References}{
\itemize{
-\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and
-S-PLUS}. Springer, New York, NY
+\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}.
+Springer, New York, NY
\item West, K, Band Welch, and A Galecki. 2014. \emph{Linear Mixed Models: A
Practical Guide Using Statistical Software}. CRC Press.
\item Thorson, J, Minto, C. 2015, Mixed effects: a unifying framework for
@@ -131,9 +131,9 @@ Science}, Volume 72, Issue 5, Pages 1245–1256.
Goodwin, CED, Robinson, BS, Hodgson, DJ, Inger, R. 2018. \emph{A brief
introduction to mixed effects modelling and multi-model inference in
ecology}. PeerJ 6:e4794.
-\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through
-Data Simulation. 2021. \emph{Advances in Methods and Practices in
-Psychological Science}.
+\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through Data
+Simulation. 2021. \emph{Advances in Methods and Practices in Psychological
+Science}.
}
}
}
diff --git a/man/details_logistic_reg_glmnet.Rd b/man/details_logistic_reg_glmnet.Rd
index 8793c642f..7caf11fbc 100644
--- a/man/details_logistic_reg_glmnet.Rd
+++ b/man/details_logistic_reg_glmnet.Rd
@@ -14,8 +14,7 @@ For this engine, there is a single mode: classification
This model has 2 tuning parameters:
\itemize{
-\item \code{penalty}: Amount of Regularization (type: double, default: see
-below)
+\item \code{penalty}: Amount of Regularization (type: double, default: see below)
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 1.0)
}
@@ -25,8 +24,8 @@ see \link{glmnet-details}. As for \code{mixture}:
\itemize{
\item \code{mixture = 1} specifies a pure lasso model,
\item \code{mixture = 0} specifies a ridge regression model, and
-\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating
-lasso and ridge.
+\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso
+and ridge.
}
}
@@ -83,10 +82,9 @@ for \code{logistic_reg()} with the \code{"glmnet"} engine.
\subsection{References}{
\itemize{
-\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical
-Learning with Sparsity}. CRC Press.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning
+with Sparsity}. CRC Press.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_logistic_reg_h2o.Rd b/man/details_logistic_reg_h2o.Rd
index d7b355601..0c235a75c 100644
--- a/man/details_logistic_reg_h2o.Rd
+++ b/man/details_logistic_reg_h2o.Rd
@@ -16,8 +16,7 @@ This model has 2 tuning parameters:
\itemize{
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: see
below)
-\item \code{penalty}: Amount of Regularization (type: double, default: see
-below)
+\item \code{penalty}: Amount of Regularization (type: double, default: see below)
}
By default, when not given a fixed \code{penalty},
diff --git a/man/details_logistic_reg_keras.Rd b/man/details_logistic_reg_keras.Rd
index 78c31e115..e1713fe15 100644
--- a/man/details_logistic_reg_keras.Rd
+++ b/man/details_logistic_reg_keras.Rd
@@ -71,8 +71,8 @@ for \code{logistic_reg()} with the \code{"keras"} engine.
\subsection{References}{
\itemize{
-\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased
-Estimation for Nonorthogonal Problems}. Technometrics, 42(1), 80-86.
+\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased Estimation
+for Nonorthogonal Problems}. Technometrics, 42(1), 80-86.
}
}
}
diff --git a/man/details_logistic_reg_spark.Rd b/man/details_logistic_reg_spark.Rd
index 275b5155d..bf8d9696e 100644
--- a/man/details_logistic_reg_spark.Rd
+++ b/man/details_logistic_reg_spark.Rd
@@ -24,8 +24,8 @@ For \code{penalty}, the amount of regularization includes both the L1 penalty
\itemize{
\item \code{mixture = 1} specifies a pure lasso model,
\item \code{mixture = 0} specifies a ridge regression model, and
-\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating
-lasso and ridge.
+\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso
+and ridge.
}
}
@@ -86,15 +86,14 @@ to consider.
\itemize{
\item Only the formula interface to via \code{fit()} is available; using
\code{fit_xy()} will generate an error.
-\item The predictions will always be in a Spark table format. The names
-will be the same as documented but without the dots.
+\item The predictions will always be in a Spark table format. The names will
+be the same as documented but without the dots.
\item There is no equivalent to factor columns in Spark tables so class
predictions are returned as character columns.
\item To retain the model object for a new R session (via \code{save()}), the
\code{model$fit} element of the parsnip object should be serialized via
-\code{ml_save(object$fit)} and separately saved to disk. In a new
-session, the object can be reloaded and reattached to the parsnip
-object.
+\code{ml_save(object$fit)} and separately saved to disk. In a new session,
+the object can be reloaded and reattached to the parsnip object.
}
}
@@ -102,10 +101,9 @@ object.
\itemize{
\item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering Spark with R}.
O’Reilly Media
-\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical
-Learning with Sparsity}. CRC Press.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning
+with Sparsity}. CRC Press.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_logistic_reg_stan.Rd b/man/details_logistic_reg_stan.Rd
index a4268b0ef..d50263b14 100644
--- a/man/details_logistic_reg_stan.Rd
+++ b/man/details_logistic_reg_stan.Rd
@@ -24,11 +24,10 @@ The default is 4.
\item \code{iter}: A positive integer specifying the number of iterations for
each chain (including warmup). The default is 2000.
\item \code{seed}: The seed for random number generation.
-\item \code{cores}: Number of cores to use when executing the chains in
-parallel.
-\item \code{prior}: The prior distribution for the (non-hierarchical)
-regression coefficients. This \code{"stan"} engine does not fit any
-hierarchical terms.
+\item \code{cores}: Number of cores to use when executing the chains in parallel.
+\item \code{prior}: The prior distribution for the (non-hierarchical) regression
+coefficients. This \code{"stan"} engine does not fit any hierarchical
+terms.
\item \code{prior_intercept}: The prior distribution for the intercept (after
centering all predictors).
}
diff --git a/man/details_logistic_reg_stan_glmer.Rd b/man/details_logistic_reg_stan_glmer.Rd
index 0dc528dc5..ce1281501 100644
--- a/man/details_logistic_reg_stan_glmer.Rd
+++ b/man/details_logistic_reg_stan_glmer.Rd
@@ -23,10 +23,9 @@ The default is 4.
\item \code{iter}: A positive integer specifying the number of iterations for
each chain (including warmup). The default is 2000.
\item \code{seed}: The seed for random number generation.
-\item \code{cores}: Number of cores to use when executing the chains in
-parallel.
-\item \code{prior}: The prior distribution for the (non-hierarchical)
-regression coefficients.
+\item \code{cores}: Number of cores to use when executing the chains in parallel.
+\item \code{prior}: The prior distribution for the (non-hierarchical) regression
+coefficients.
\item \code{prior_intercept}: The prior distribution for the intercept (after
centering all predictors).
}
@@ -64,7 +63,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where \emph{i} denotes the \code{i}th independent experimental unit
+where $i$ denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
diff --git a/man/details_mars_earth.Rd b/man/details_mars_earth.Rd
index 598add896..0ef130268 100644
--- a/man/details_mars_earth.Rd
+++ b/man/details_mars_earth.Rd
@@ -16,8 +16,7 @@ This model has 3 tuning parameters:
\itemize{
\item \code{num_terms}: # Model Terms (type: integer, default: see below)
\item \code{prod_degree}: Degree of Interaction (type: integer, default: 1L)
-\item \code{prune_method}: Pruning Method (type: character, default:
-‘backward’)
+\item \code{prune_method}: Pruning Method (type: character, default: ‘backward’)
}
The default value of \code{num_terms} depends on the number of predictor
@@ -110,8 +109,7 @@ for \code{mars()} with the \code{"earth"} engine.
\item Friedman, J. 1991. “Multivariate Adaptive Regression Splines.” \emph{The
Annals of Statistics}, vol. 19, no. 1, pp. 1-67.
\item Milborrow, S. \href{http://www.milbo.org/doc/earth-notes.pdf}{“Notes on the earth package.”}
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_mlp_brulee.Rd b/man/details_mlp_brulee.Rd
index 015815076..252d69a8c 100644
--- a/man/details_mlp_brulee.Rd
+++ b/man/details_mlp_brulee.Rd
@@ -36,8 +36,8 @@ during optimization.
each batch.
\item \code{class_weights()}: Numeric class weights. See
\code{\link[brulee:brulee_mlp]{brulee::brulee_mlp()}}.
-\item \code{stop_iter()}: A non-negative integer for how many iterations with
-no improvement before stopping. (default: 5L).
+\item \code{stop_iter()}: A non-negative integer for how many iterations with no
+improvement before stopping. (default: 5L).
}
}
@@ -131,8 +131,7 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_mlp_h2o.Rd b/man/details_mlp_h2o.Rd
index 223fe18fd..9cf8c8495 100644
--- a/man/details_mlp_h2o.Rd
+++ b/man/details_mlp_h2o.Rd
@@ -34,20 +34,19 @@ specifying the l1 penalty directly with the engine argument \code{l1}.
Other engine arguments of interest:
\itemize{
\item \code{stopping_rounds} controls early stopping rounds based on the
-convergence of another engine parameter \code{stopping_metric}. By
-default, \link[h2o:h2o.deeplearning]{h2o::h2o.deeplearning} stops
-training if simple moving average of length 5 of the stopping_metric
-does not improve for 5 scoring events. This is mostly useful when
-used alongside the engine parameter \code{validation}, which is the
-\strong{proportion} of train-validation split, parsnip will split and
-pass the two data frames to h2o. Then
+convergence of another engine parameter \code{stopping_metric}. By default,
+\link[h2o:h2o.deeplearning]{h2o::h2o.deeplearning} stops training if
+simple moving average of length 5 of the stopping_metric does not
+improve for 5 scoring events. This is mostly useful when used
+alongside the engine parameter \code{validation}, which is the
+\strong{proportion} of train-validation split, parsnip will split and pass
+the two data frames to h2o. Then
\link[h2o:h2o.deeplearning]{h2o::h2o.deeplearning} will evaluate the
metric and early stopping criteria on the validation set.
-\item h2o uses a 50\% dropout ratio controlled by \code{dropout} for hidden
-layers by default.
-\code{\link[h2o:h2o.deeplearning]{h2o::h2o.deeplearning()}} provides an
-engine argument \code{input_dropout_ratio} for dropout ratios in the
-input layer, which defaults to 0.
+\item h2o uses a 50\% dropout ratio controlled by \code{dropout} for hidden layers
+by default. \code{\link[h2o:h2o.deeplearning]{h2o::h2o.deeplearning()}}
+provides an engine argument \code{input_dropout_ratio} for dropout ratios
+in the input layer, which defaults to 0.
}
}
diff --git a/man/details_mlp_keras.Rd b/man/details_mlp_keras.Rd
index cfc0e103e..b67c46b2f 100644
--- a/man/details_mlp_keras.Rd
+++ b/man/details_mlp_keras.Rd
@@ -111,8 +111,7 @@ for \code{mlp()} with the \code{"keras"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_mlp_nnet.Rd b/man/details_mlp_nnet.Rd
index 623a8f954..875ec5481 100644
--- a/man/details_mlp_nnet.Rd
+++ b/man/details_mlp_nnet.Rd
@@ -106,8 +106,7 @@ for \code{mlp()} with the \code{"nnet"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_multinom_reg_brulee.Rd b/man/details_multinom_reg_brulee.Rd
index 5285ca592..8066cfffb 100644
--- a/man/details_multinom_reg_brulee.Rd
+++ b/man/details_multinom_reg_brulee.Rd
@@ -34,8 +34,8 @@ process.
during optimization (\code{optimizer = "SGD"} only).
\item \code{batch_size()}: An integer for the number of training set points in
each batch.
-\item \code{stop_iter()}: A non-negative integer for how many iterations with
-no improvement before stopping. (default: 5L).
+\item \code{stop_iter()}: A non-negative integer for how many iterations with no
+improvement before stopping. (default: 5L).
\item \code{class_weights()}: Numeric class weights. See
\code{\link[brulee:brulee_multinomial_reg]{brulee::brulee_multinomial_reg()}}.
}
@@ -77,8 +77,7 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_multinom_reg_glmnet.Rd b/man/details_multinom_reg_glmnet.Rd
index b51a7ca50..0940806e3 100644
--- a/man/details_multinom_reg_glmnet.Rd
+++ b/man/details_multinom_reg_glmnet.Rd
@@ -13,8 +13,7 @@ For this engine, there is a single mode: classification
This model has 2 tuning parameters:
\itemize{
-\item \code{penalty}: Amount of Regularization (type: double, default: see
-below)
+\item \code{penalty}: Amount of Regularization (type: double, default: see below)
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 1.0)
}
@@ -24,8 +23,8 @@ see \link{glmnet-details}. As for \code{mixture}:
\itemize{
\item \code{mixture = 1} specifies a pure lasso model,
\item \code{mixture = 0} specifies a ridge regression model, and
-\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating
-lasso and ridge.
+\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso
+and ridge.
}
}
@@ -82,10 +81,9 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\subsection{References}{
\itemize{
-\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical
-Learning with Sparsity}. CRC Press.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning
+with Sparsity}. CRC Press.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_multinom_reg_h2o.Rd b/man/details_multinom_reg_h2o.Rd
index 265ef2896..ad1fd40c4 100644
--- a/man/details_multinom_reg_h2o.Rd
+++ b/man/details_multinom_reg_h2o.Rd
@@ -15,8 +15,7 @@ This model has 2 tuning parameters:
\itemize{
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: see
below)
-\item \code{penalty}: Amount of Regularization (type: double, default: see
-below)
+\item \code{penalty}: Amount of Regularization (type: double, default: see below)
}
By default, when not given a fixed \code{penalty},
diff --git a/man/details_multinom_reg_keras.Rd b/man/details_multinom_reg_keras.Rd
index f2e8d00d5..7ca336e94 100644
--- a/man/details_multinom_reg_keras.Rd
+++ b/man/details_multinom_reg_keras.Rd
@@ -70,8 +70,8 @@ for \code{multinom_reg()} with the \code{"keras"} engine.
\subsection{References}{
\itemize{
-\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased
-Estimation for Nonorthogonal Problems}. Technometrics, 42(1), 80-86.
+\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased Estimation
+for Nonorthogonal Problems}. Technometrics, 42(1), 80-86.
}
}
}
diff --git a/man/details_multinom_reg_nnet.Rd b/man/details_multinom_reg_nnet.Rd
index 1d7ebf14c..aa7bdf58b 100644
--- a/man/details_multinom_reg_nnet.Rd
+++ b/man/details_multinom_reg_nnet.Rd
@@ -68,10 +68,9 @@ The underlying model implementation does not allow for case weights.
\itemize{
\item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering nnet with R}.
O’Reilly Media
-\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical
-Learning with Sparsity}. CRC Press.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning
+with Sparsity}. CRC Press.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_multinom_reg_spark.Rd b/man/details_multinom_reg_spark.Rd
index ff7f9a1e7..5ab0475fa 100644
--- a/man/details_multinom_reg_spark.Rd
+++ b/man/details_multinom_reg_spark.Rd
@@ -23,8 +23,8 @@ For \code{penalty}, the amount of regularization includes both the L1 penalty
\itemize{
\item \code{mixture = 1} specifies a pure lasso model,
\item \code{mixture = 0} specifies a ridge regression model, and
-\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating
-lasso and ridge.
+\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso
+and ridge.
}
}
@@ -85,15 +85,14 @@ to consider.
\itemize{
\item Only the formula interface to via \code{fit()} is available; using
\code{fit_xy()} will generate an error.
-\item The predictions will always be in a Spark table format. The names
-will be the same as documented but without the dots.
+\item The predictions will always be in a Spark table format. The names will
+be the same as documented but without the dots.
\item There is no equivalent to factor columns in Spark tables so class
predictions are returned as character columns.
\item To retain the model object for a new R session (via \code{save()}), the
\code{model$fit} element of the parsnip object should be serialized via
-\code{ml_save(object$fit)} and separately saved to disk. In a new
-session, the object can be reloaded and reattached to the parsnip
-object.
+\code{ml_save(object$fit)} and separately saved to disk. In a new session,
+the object can be reloaded and reattached to the parsnip object.
}
}
@@ -101,10 +100,9 @@ object.
\itemize{
\item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering Spark with R}.
O’Reilly Media
-\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical
-Learning with Sparsity}. CRC Press.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning
+with Sparsity}. CRC Press.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_naive_Bayes_h2o.Rd b/man/details_naive_Bayes_h2o.Rd
index 1795308f6..f4604bd93 100644
--- a/man/details_naive_Bayes_h2o.Rd
+++ b/man/details_naive_Bayes_h2o.Rd
@@ -19,10 +19,10 @@ This model has 1 tuning parameter:
\code{\link[h2o:h2o.naiveBayes]{h2o::h2o.naiveBayes()}} provides several engine
arguments to deal with imbalances and rare classes:
\itemize{
-\item \code{balance_classes} A logical value controlling over/under-sampling
-(for imbalanced data). Defaults to \code{FALSE}.
-\item \code{class_sampling_factors} The over/under-sampling ratios per class
-(in lexicographic order). If not specified, sampling factors will be
+\item \code{balance_classes} A logical value controlling over/under-sampling (for
+imbalanced data). Defaults to \code{FALSE}.
+\item \code{class_sampling_factors} The over/under-sampling ratios per class (in
+lexicographic order). If not specified, sampling factors will be
automatically computed to obtain class balance during training.
Require \code{balance_classes} to be \code{TRUE}.
\item \code{min_sdev}: The minimum standard deviation to use for observations
diff --git a/man/details_naive_Bayes_klaR.Rd b/man/details_naive_Bayes_klaR.Rd
index 9fa60decc..c9e3dd4d1 100644
--- a/man/details_naive_Bayes_klaR.Rd
+++ b/man/details_naive_Bayes_klaR.Rd
@@ -65,8 +65,7 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_naive_Bayes_naivebayes.Rd b/man/details_naive_Bayes_naivebayes.Rd
index ad99f199e..8df46db63 100644
--- a/man/details_naive_Bayes_naivebayes.Rd
+++ b/man/details_naive_Bayes_naivebayes.Rd
@@ -68,8 +68,7 @@ The underlying model implementation does not allow for case weights.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_nearest_neighbor_kknn.Rd b/man/details_nearest_neighbor_kknn.Rd
index 64911693e..40ea584dc 100644
--- a/man/details_nearest_neighbor_kknn.Rd
+++ b/man/details_nearest_neighbor_kknn.Rd
@@ -14,8 +14,8 @@ For this engine, there are multiple modes: classification and regression
This model has 3 tuning parameters:
\itemize{
\item \code{neighbors}: # Nearest Neighbors (type: integer, default: 5L)
-\item \code{weight_func}: Distance Weighting Function (type: character,
-default: ‘optimal’)
+\item \code{weight_func}: Distance Weighting Function (type: character, default:
+‘optimal’)
\item \code{dist_power}: Minkowski Distance Order (type: double, default: 2.0)
}
}
@@ -105,8 +105,7 @@ The underlying model implementation does not allow for case weights.
\itemize{
\item Hechenbichler K. and Schliep K.P. (2004) \href{https://epub.ub.uni-muenchen.de/1769/}{Weighted k-Nearest-Neighbor Techniques and Ordinal Classification}, Discussion
Paper 399, SFB 386, Ludwig-Maximilians University Munich
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_pls_mixOmics.Rd b/man/details_pls_mixOmics.Rd
index 7b1a1bd1a..439d20695 100644
--- a/man/details_pls_mixOmics.Rd
+++ b/man/details_pls_mixOmics.Rd
@@ -12,8 +12,8 @@ For this engine, there are multiple modes: classification and regression
This model has 2 tuning parameters:
\itemize{
-\item \code{predictor_prop}: Proportion of Predictors (type: double, default:
-see below)
+\item \code{predictor_prop}: Proportion of Predictors (type: double, default: see
+below)
\item \code{num_comp}: # Components (type: integer, default: 2L)
}
}
@@ -46,8 +46,7 @@ pls(num_comp = integer(1), predictor_prop = double(1)) \%>\%
\code{\link[plsmod:pls_fit]{plsmod::pls_fit()}} is a function that:
\itemize{
\item Determines the number of predictors in the data.
-\item Adjusts \code{num_comp} if the value is larger than the number of
-factors.
+\item Adjusts \code{num_comp} if the value is larger than the number of factors.
\item Determines whether sparsity is required based on the value of
\code{predictor_prop}.
\item Sets the \code{keepX} argument of \code{mixOmics::spls()} for sparse models.
diff --git a/man/details_poisson_reg_glmer.Rd b/man/details_poisson_reg_glmer.Rd
index 452148326..5a32c17bd 100644
--- a/man/details_poisson_reg_glmer.Rd
+++ b/man/details_poisson_reg_glmer.Rd
@@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where \emph{i} denotes the \code{i}th independent experimental unit
+where $i$ denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
@@ -119,8 +119,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\subsection{References}{
\itemize{
-\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and
-S-PLUS}. Springer, New York, NY
+\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}.
+Springer, New York, NY
\item West, K, Band Welch, and A Galecki. 2014. \emph{Linear Mixed Models: A
Practical Guide Using Statistical Software}. CRC Press.
\item Thorson, J, Minto, C. 2015, Mixed effects: a unifying framework for
@@ -130,9 +130,9 @@ Science}, Volume 72, Issue 5, Pages 1245–1256.
Goodwin, CED, Robinson, BS, Hodgson, DJ, Inger, R. 2018. \emph{A brief
introduction to mixed effects modelling and multi-model inference in
ecology}. PeerJ 6:e4794.
-\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through
-Data Simulation. 2021. \emph{Advances in Methods and Practices in
-Psychological Science}.
+\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through Data
+Simulation. 2021. \emph{Advances in Methods and Practices in Psychological
+Science}.
}
}
}
diff --git a/man/details_poisson_reg_glmnet.Rd b/man/details_poisson_reg_glmnet.Rd
index 1c621d0b1..9250dc70c 100644
--- a/man/details_poisson_reg_glmnet.Rd
+++ b/man/details_poisson_reg_glmnet.Rd
@@ -13,8 +13,7 @@ For this engine, there is a single mode: regression
This model has 2 tuning parameters:
\itemize{
-\item \code{penalty}: Amount of Regularization (type: double, default: see
-below)
+\item \code{penalty}: Amount of Regularization (type: double, default: see below)
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 1.0)
}
@@ -24,8 +23,8 @@ see \link{glmnet-details}. As for \code{mixture}:
\itemize{
\item \code{mixture = 1} specifies a pure lasso model,
\item \code{mixture = 0} specifies a ridge regression model, and
-\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating
-lasso and ridge.
+\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso
+and ridge.
}
}
diff --git a/man/details_poisson_reg_h2o.Rd b/man/details_poisson_reg_h2o.Rd
index 253b305a3..0dfe52eed 100644
--- a/man/details_poisson_reg_h2o.Rd
+++ b/man/details_poisson_reg_h2o.Rd
@@ -15,8 +15,7 @@ This model has 2 tuning parameters:
\itemize{
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: see
below)
-\item \code{penalty}: Amount of Regularization (type: double, default: see
-below)
+\item \code{penalty}: Amount of Regularization (type: double, default: see below)
}
By default, when not given a fixed \code{penalty},
diff --git a/man/details_poisson_reg_stan.Rd b/man/details_poisson_reg_stan.Rd
index 9c5084970..9147920d1 100644
--- a/man/details_poisson_reg_stan.Rd
+++ b/man/details_poisson_reg_stan.Rd
@@ -23,11 +23,9 @@ The default is 4.
\item \code{iter}: A positive integer specifying the number of iterations for
each chain (including warmup). The default is 2000.
\item \code{seed}: The seed for random number generation.
-\item \code{cores}: Number of cores to use when executing the chains in
-parallel.
-\item \code{prior}: The prior distribution for the (non-hierarchical)
-regression coefficients. The \code{"stan"} engine does not fit any
-hierarchical terms.
+\item \code{cores}: Number of cores to use when executing the chains in parallel.
+\item \code{prior}: The prior distribution for the (non-hierarchical) regression
+coefficients. The \code{"stan"} engine does not fit any hierarchical terms.
\item \code{prior_intercept}: The prior distribution for the intercept (after
centering all predictors).
}
diff --git a/man/details_poisson_reg_stan_glmer.Rd b/man/details_poisson_reg_stan_glmer.Rd
index 331251337..ef1065ada 100644
--- a/man/details_poisson_reg_stan_glmer.Rd
+++ b/man/details_poisson_reg_stan_glmer.Rd
@@ -23,10 +23,9 @@ The default is 4.
\item \code{iter}: A positive integer specifying the number of iterations for
each chain (including warmup). The default is 2000.
\item \code{seed}: The seed for random number generation.
-\item \code{cores}: Number of cores to use when executing the chains in
-parallel.
-\item \code{prior}: The prior distribution for the (non-hierarchical)
-regression coefficients.
+\item \code{cores}: Number of cores to use when executing the chains in parallel.
+\item \code{prior}: The prior distribution for the (non-hierarchical) regression
+coefficients.
\item \code{prior_intercept}: The prior distribution for the intercept (after
centering all predictors).
}
@@ -64,7 +63,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where \emph{i} denotes the \code{i}th independent experimental unit
+where $i$ denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
diff --git a/man/details_proportional_hazards_glmnet.Rd b/man/details_proportional_hazards_glmnet.Rd
index 084d2709a..1f0db1aac 100644
--- a/man/details_proportional_hazards_glmnet.Rd
+++ b/man/details_proportional_hazards_glmnet.Rd
@@ -12,8 +12,7 @@ For this engine, there is a single mode: censored regression
This model has 2 tuning parameters:
\itemize{
-\item \code{penalty}: Amount of Regularization (type: double, default: see
-below)
+\item \code{penalty}: Amount of Regularization (type: double, default: see below)
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 1.0)
}
@@ -24,8 +23,8 @@ see \link{glmnet-details}. As for
\itemize{
\item \code{mixture = 1} specifies a pure lasso model,
\item \code{mixture = 0} specifies a ridge regression model, and
-\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating
-lasso and ridge.
+\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso
+and ridge.
}
}
@@ -151,8 +150,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\item Simon N, Friedman J, Hastie T, Tibshirani R. 2011. “Regularization
Paths for Cox’s Proportional Hazards Model via Coordinate Descent.”
\emph{Journal of Statistical Software}, Articles 39 (5): 1–13. .
-\item Hastie T, Tibshirani R, Wainwright M. 2015. \emph{Statistical Learning
-with Sparsity}. CRC Press.
+\item Hastie T, Tibshirani R, Wainwright M. 2015. \emph{Statistical Learning with
+Sparsity}. CRC Press.
\item Kuhn M, Johnson K. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
diff --git a/man/details_proportional_hazards_survival.Rd b/man/details_proportional_hazards_survival.Rd
index 4a5f4c251..1e6cb151f 100644
--- a/man/details_proportional_hazards_survival.Rd
+++ b/man/details_proportional_hazards_survival.Rd
@@ -129,8 +129,7 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\subsection{References}{
\itemize{
\item Andersen P, Gill R. 1982. Cox’s regression model for counting
-processes, a large sample study. \emph{Annals of Statistics} 10,
-1100-1120.
+processes, a large sample study. \emph{Annals of Statistics} 10, 1100-1120.
}
}
}
diff --git a/man/details_rand_forest_partykit.Rd b/man/details_rand_forest_partykit.Rd
index 9e00500b1..25184df2e 100644
--- a/man/details_rand_forest_partykit.Rd
+++ b/man/details_rand_forest_partykit.Rd
@@ -109,8 +109,7 @@ time.
\subsection{References}{
\itemize{
\item \href{https://jmlr.org/papers/v16/hothorn15a.html}{partykit: A Modular Toolkit for Recursive Partytioning in R}
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_rand_forest_randomForest.Rd b/man/details_rand_forest_randomForest.Rd
index 5f5271765..ff899969b 100644
--- a/man/details_rand_forest_randomForest.Rd
+++ b/man/details_rand_forest_randomForest.Rd
@@ -105,8 +105,7 @@ for \code{rand_forest()} with the \code{"randomForest"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_rand_forest_ranger.Rd b/man/details_rand_forest_ranger.Rd
index e3cefb47e..d38389e36 100644
--- a/man/details_rand_forest_ranger.Rd
+++ b/man/details_rand_forest_ranger.Rd
@@ -132,8 +132,7 @@ for \code{rand_forest()} with the \code{"ranger"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_rand_forest_spark.Rd b/man/details_rand_forest_spark.Rd
index 4a791bb6b..8fdc04559 100644
--- a/man/details_rand_forest_spark.Rd
+++ b/man/details_rand_forest_spark.Rd
@@ -102,15 +102,14 @@ to consider.
\itemize{
\item Only the formula interface to via \code{fit()} is available; using
\code{fit_xy()} will generate an error.
-\item The predictions will always be in a Spark table format. The names
-will be the same as documented but without the dots.
+\item The predictions will always be in a Spark table format. The names will
+be the same as documented but without the dots.
\item There is no equivalent to factor columns in Spark tables so class
predictions are returned as character columns.
\item To retain the model object for a new R session (via \code{save()}), the
\code{model$fit} element of the parsnip object should be serialized via
-\code{ml_save(object$fit)} and separately saved to disk. In a new
-session, the object can be reloaded and reattached to the parsnip
-object.
+\code{ml_save(object$fit)} and separately saved to disk. In a new session,
+the object can be reloaded and reattached to the parsnip object.
}
}
@@ -129,8 +128,7 @@ a character string to specify the column with the numeric case weights.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_rule_fit_h2o.Rd b/man/details_rule_fit_h2o.Rd
index 456428c59..fed2f6c01 100644
--- a/man/details_rule_fit_h2o.Rd
+++ b/man/details_rule_fit_h2o.Rd
@@ -23,15 +23,15 @@ the L1 penalty (LASSO).
Other engine arguments of interest:
\itemize{
-\item \code{algorithm}: The algorithm to use to generate rules. should be one
-of “AUTO”, “DRF”, “GBM”, defaults to “AUTO”.
+\item \code{algorithm}: The algorithm to use to generate rules. should be one of
+“AUTO”, “DRF”, “GBM”, defaults to “AUTO”.
\item \code{min_rule_length}: Minimum length of tree depth, opposite of
\code{tree_dpeth}, defaults to 3.
\item \code{max_num_rules}: The maximum number of rules to return. The default
value of -1 means the number of rules is selected by diminishing
returns in model deviance.
-\item \code{model_type}: The type of base learners in the ensemble, should be
-one of: “rules_and_linear”, “rules”, “linear”, defaults to
+\item \code{model_type}: The type of base learners in the ensemble, should be one
+of: “rules_and_linear”, “rules”, “linear”, defaults to
“rules_and_linear”.
}
}
diff --git a/man/details_rule_fit_xrf.Rd b/man/details_rule_fit_xrf.Rd
index c4429550e..ee12446f8 100644
--- a/man/details_rule_fit_xrf.Rd
+++ b/man/details_rule_fit_xrf.Rd
@@ -20,10 +20,9 @@ default: see below)
\item \code{min_n}: Minimal Node Size (type: integer, default: 1L)
\item \code{tree_depth}: Tree Depth (type: integer, default: 6L)
\item \code{learn_rate}: Learning Rate (type: double, default: 0.3)
-\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default:
-0.0)
-\item \code{sample_size}: Proportion Observations Sampled (type: double,
-default: 1.0)
+\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: 0.0)
+\item \code{sample_size}: Proportion Observations Sampled (type: double, default:
+1.0)
\item \code{penalty}: Amount of Regularization (type: double, default: 0.1)
}
}
@@ -156,8 +155,8 @@ their analogue to the \code{mtry} argument as the \emph{proportion} of predictor
that will be randomly sampled at each split rather than the \emph{count}. In
some settings, such as when tuning over preprocessors that influence the
number of predictors, this parameterization is quite
-helpful—interpreting \code{mtry} as a proportion means that [0,1] is always
-a valid range for that parameter, regardless of input data.
+helpful—interpreting \code{mtry} as a proportion means that $\link{0, 1}$ is
+always a valid range for that parameter, regardless of input data.
parsnip and its extensions accommodate this parameterization using the
\code{counts} argument: a logical indicating whether \code{mtry} should be
@@ -174,7 +173,7 @@ to \code{TRUE}. For engines that support the proportion interpretation
(currently \code{"xgboost"} and \code{"xrf"}, via the rules package, and
\code{"lightgbm"} via the bonsai package) the user can pass the
\code{counts = FALSE} argument to \code{set_engine()} to supply \code{mtry} values
-within [0,1].
+within $\link{0, 1}$.
}
\subsection{Early stopping}{
diff --git a/man/details_surv_reg_survival.Rd b/man/details_surv_reg_survival.Rd
index b57f851d8..91e12434d 100644
--- a/man/details_surv_reg_survival.Rd
+++ b/man/details_surv_reg_survival.Rd
@@ -81,8 +81,8 @@ surv_reg() \%>\%
\subsection{References}{
\itemize{
-\item Kalbfleisch, J. D. and Prentice, R. L. 2002 \emph{The statistical
-analysis of failure time data}, Wiley.
+\item Kalbfleisch, J. D. and Prentice, R. L. 2002 \emph{The statistical analysis
+of failure time data}, Wiley.
}
}
}
diff --git a/man/details_survival_reg_survival.Rd b/man/details_survival_reg_survival.Rd
index b9b7446f8..1ae29ac47 100644
--- a/man/details_survival_reg_survival.Rd
+++ b/man/details_survival_reg_survival.Rd
@@ -98,8 +98,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called
\subsection{References}{
\itemize{
-\item Kalbfleisch, J. D. and Prentice, R. L. 2002 \emph{The statistical
-analysis of failure time data}, Wiley.
+\item Kalbfleisch, J. D. and Prentice, R. L. 2002 \emph{The statistical analysis
+of failure time data}, Wiley.
}
}
}
diff --git a/man/details_svm_linear_LiblineaR.Rd b/man/details_svm_linear_LiblineaR.Rd
index 0d7270ff5..ac1f786c1 100644
--- a/man/details_svm_linear_LiblineaR.Rd
+++ b/man/details_svm_linear_LiblineaR.Rd
@@ -106,8 +106,7 @@ for \code{svm_linear()} with the \code{"LiblineaR"} engine.
\subsection{References}{
\itemize{
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_svm_linear_kernlab.Rd b/man/details_svm_linear_kernlab.Rd
index 747e5840e..d155180c2 100644
--- a/man/details_svm_linear_kernlab.Rd
+++ b/man/details_svm_linear_kernlab.Rd
@@ -104,11 +104,9 @@ for \code{svm_linear()} with the \code{"kernlab"} engine.
\subsection{References}{
\itemize{
\item Lin, HT, and R Weng. \href{https://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.pdf}{“A Note on Platt’s Probabilistic Outputs for Support Vector Machines”}
-\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004.
-\href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of
+\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004. \href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of
Statistical Software}.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_svm_poly_kernlab.Rd b/man/details_svm_poly_kernlab.Rd
index 49cbb679d..bcc5f50f7 100644
--- a/man/details_svm_poly_kernlab.Rd
+++ b/man/details_svm_poly_kernlab.Rd
@@ -116,11 +116,9 @@ for \code{svm_poly()} with the \code{"kernlab"} engine.
\subsection{References}{
\itemize{
\item Lin, HT, and R Weng. \href{https://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.pdf}{“A Note on Platt’s Probabilistic Outputs for Support Vector Machines”}
-\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004.
-\href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of
+\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004. \href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of
Statistical Software}.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/details_svm_rbf_kernlab.Rd b/man/details_svm_rbf_kernlab.Rd
index 4c610e6aa..24a075d5d 100644
--- a/man/details_svm_rbf_kernlab.Rd
+++ b/man/details_svm_rbf_kernlab.Rd
@@ -116,11 +116,9 @@ for \code{svm_rbf()} with the \code{"kernlab"} engine.
\subsection{References}{
\itemize{
\item Lin, HT, and R Weng. \href{https://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.pdf}{“A Note on Platt’s Probabilistic Outputs for Support Vector Machines”}
-\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004.
-\href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of
+\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004. \href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of
Statistical Software}.
-\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}.
-Springer.
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
diff --git a/man/parsnip_update.Rd b/man/parsnip_update.Rd
index b6daa3e40..f95e05836 100644
--- a/man/parsnip_update.Rd
+++ b/man/parsnip_update.Rd
@@ -1,6 +1,6 @@
% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/bag_mars.R, R/bag_tree.R, R/bart.R,
-% R/boost_tree.R, R/c5_rules.R, R/cubist_rules.R, R/decision_tree.R,
+% Please edit documentation in R/bag_mars.R, R/bag_mlp.R, R/bag_tree.R,
+% R/bart.R, R/boost_tree.R, R/c5_rules.R, R/cubist_rules.R, R/decision_tree.R,
% R/discrim_flexible.R, R/discrim_linear.R, R/discrim_quad.R,
% R/discrim_regularized.R, R/gen_additive_mod.R, R/linear_reg.R,
% R/logistic_reg.R, R/mars.R, R/mlp.R, R/multinom_reg.R, R/naive_Bayes.R,
@@ -9,6 +9,7 @@
% R/svm_linear.R, R/svm_poly.R, R/svm_rbf.R, R/update.R
\name{update.bag_mars}
\alias{update.bag_mars}
+\alias{update.bag_mlp}
\alias{update.bag_tree}
\alias{update.bart}
\alias{update.boost_tree}
@@ -50,6 +51,16 @@
...
)
+\method{update}{bag_mlp}(
+ object,
+ parameters = NULL,
+ hidden_units = NULL,
+ penalty = NULL,
+ epochs = NULL,
+ fresh = FALSE,
+ ...
+)
+
\method{update}{bag_tree}(
object,
parameters = NULL,
@@ -321,6 +332,13 @@ modified in-place or replaced wholesale.}
\item{...}{Not used for \code{update()}.}
+\item{hidden_units}{An integer for the number of units in the hidden model.}
+
+\item{penalty}{An non-negative number representing the amount of
+regularization used by some of the engines.}
+
+\item{epochs}{An integer for the number of training iterations.}
+
\item{cost_complexity}{A positive number for the the cost/complexity
parameter (a.k.a. \code{Cp}) used by CART models (specific engines only).}
@@ -376,9 +394,6 @@ set instances that are used to adjust the model-based prediction.}
\item{max_rules}{The largest number of rules.}
-\item{penalty}{An non-negative number representing the amount of
-regularization used by some of the engines.}
-
\item{regularization_method}{A character string for the type of regularized
estimation. Possible values are: "\code{diagonal}", "\code{min_distance}",
"\code{shrink_cov}", and "\code{shrink_mean}" (\code{sparsediscrim} engine only).}
@@ -402,13 +417,9 @@ proportion of L1 regularization (i.e. lasso) in the model.
Available for specific engines only.}
-\item{hidden_units}{An integer for the number of units in the hidden model.}
-
\item{dropout}{A number between 0 (inclusive) and 1 denoting the proportion
of model parameters randomly set to zero during model training.}
-\item{epochs}{An integer for the number of training iterations.}
-
\item{activation}{A single character string denoting the type of relationship
between the original predictors and the hidden unit layer. The activation
function between the hidden and output layers is automatically set to either
diff --git a/man/rmd/bag_mlp_nnet.Rmd b/man/rmd/bag_mlp_nnet.Rmd
new file mode 100644
index 000000000..e9c46374b
--- /dev/null
+++ b/man/rmd/bag_mlp_nnet.Rmd
@@ -0,0 +1,74 @@
+```{r, child = "aaa.Rmd", include = FALSE}
+```
+
+`r descr_models("bag_mlp", "nnet")`
+
+## Tuning Parameters
+
+```{r nnet-param-info, echo = FALSE}
+defaults <-
+ tibble::tibble(parsnip = c("penalty", "hidden_units", "epochs"),
+ default = c("0.0", "10L", "1000L"))
+
+param <-
+ bag_mlp() %>%
+ set_engine("nnet") %>%
+ set_mode("regression") %>%
+ make_parameter_list(defaults)
+```
+
+This model has `r nrow(param)` tuning parameters:
+
+```{r nnet-param-list, echo = FALSE, results = "asis"}
+param$item
+```
+
+These defaults are set by the `baguette` package and are different than those in [nnet::nnet()].
+
+## Translation from parsnip to the original package (classification)
+
+`r uses_extension("bag_mlp", "nnet", "classification")`
+
+```{r nnet-cls}
+library(baguette)
+
+bag_mlp(penalty = double(1), hidden_units = integer(1)) %>%
+ set_engine("nnet") %>%
+ set_mode("classification") %>%
+ translate()
+```
+
+
+## Translation from parsnip to the original package (regression)
+
+`r uses_extension("bag_mlp", "nnet", "regression")`
+
+```{r nnet-reg}
+library(baguette)
+
+bag_mlp(penalty = double(1), hidden_units = integer(1)) %>%
+ set_engine("nnet") %>%
+ set_mode("regression") %>%
+ translate()
+```
+
+
+## Preprocessing requirements
+
+```{r child = "template-makes-dummies.Rmd"}
+```
+
+```{r child = "template-same-scale.Rmd"}
+```
+
+## Case weights
+
+```{r child = "template-no-case-weights.Rmd"}
+```
+
+
+## References
+
+ - Breiman L. 1996. "Bagging predictors". Machine Learning. 24 (2): 123-140
+
+ - Kuhn, M, and K Johnson. 2013. *Applied Predictive Modeling*. Springer.
diff --git a/man/rmd/bag_mlp_nnet.md b/man/rmd/bag_mlp_nnet.md
new file mode 100644
index 000000000..93955cb49
--- /dev/null
+++ b/man/rmd/bag_mlp_nnet.md
@@ -0,0 +1,99 @@
+
+
+
+For this engine, there are multiple modes: classification and regression
+
+## Tuning Parameters
+
+
+
+This model has 3 tuning parameters:
+
+- `hidden_units`: # Hidden Units (type: integer, default: 10L)
+
+- `penalty`: Amount of Regularization (type: double, default: 0.0)
+
+- `epochs`: # Epochs (type: integer, default: 1000L)
+
+These defaults are set by the `baguette` package and are different than those in [nnet::nnet()].
+
+## Translation from parsnip to the original package (classification)
+
+The **baguette** extension package is required to fit this model.
+
+
+```r
+library(baguette)
+
+bag_mlp(penalty = double(1), hidden_units = integer(1)) %>%
+ set_engine("nnet") %>%
+ set_mode("classification") %>%
+ translate()
+```
+
+```
+## Bagged Neural Network Model Specification (classification)
+##
+## Main Arguments:
+## hidden_units = integer(1)
+## penalty = double(1)
+##
+## Computational engine: nnet
+##
+## Model fit template:
+## baguette::bagger(formula = missing_arg(), data = missing_arg(),
+## weights = missing_arg(), size = integer(1), decay = double(1),
+## base_model = "nnet")
+```
+
+
+## Translation from parsnip to the original package (regression)
+
+The **baguette** extension package is required to fit this model.
+
+
+```r
+library(baguette)
+
+bag_mlp(penalty = double(1), hidden_units = integer(1)) %>%
+ set_engine("nnet") %>%
+ set_mode("regression") %>%
+ translate()
+```
+
+```
+## Bagged Neural Network Model Specification (regression)
+##
+## Main Arguments:
+## hidden_units = integer(1)
+## penalty = double(1)
+##
+## Computational engine: nnet
+##
+## Model fit template:
+## baguette::bagger(formula = missing_arg(), data = missing_arg(),
+## weights = missing_arg(), size = integer(1), decay = double(1),
+## base_model = "nnet")
+```
+
+
+## Preprocessing requirements
+
+
+Factor/categorical predictors need to be converted to numeric values (e.g., dummy or indicator variables) for this engine. When using the formula method via \\code{\\link[=fit.model_spec]{fit()}}, parsnip will convert factor columns to indicators.
+
+
+Predictors should have the same scale. One way to achieve this is to center and
+scale each so that each predictor has mean zero and a variance of one.
+
+## Case weights
+
+
+The underlying model implementation does not allow for case weights.
+
+
+## References
+
+ - Breiman L. 1996. "Bagging predictors". Machine Learning. 24 (2): 123-140
+
+ - Kuhn, M, and K Johnson. 2013. *Applied Predictive Modeling*. Springer.
diff --git a/tests/testthat/test_boost_tree_xgboost.R b/tests/testthat/test_boost_tree_xgboost.R
index 5adde2957..d6e23e8f9 100644
--- a/tests/testthat/test_boost_tree_xgboost.R
+++ b/tests/testthat/test_boost_tree_xgboost.R
@@ -600,6 +600,7 @@ test_that("count/proportion parameters", {
test_that('interface to param arguments', {
skip_if_not_installed("xgboost")
+ skip_on_os("windows") # some snapshots different on windows (added spaces)
ctrl$verbosity <- 0L