diff --git a/NAMESPACE b/NAMESPACE index 3bd93bfeb..e43144a35 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -53,6 +53,8 @@ S3method(print,rand_forest) S3method(print,surv_reg) S3method(print,svm_poly) S3method(print,svm_rbf) +S3method(req_pkgs,model_fit) +S3method(req_pkgs,model_spec) S3method(tidy,model_fit) S3method(tidy,nullmodel) S3method(translate,boost_tree) @@ -144,6 +146,7 @@ export(predict_raw) export(predict_raw.model_fit) export(rand_forest) export(repair_call) +export(req_pkgs) export(rpart_train) export(set_args) export(set_dependency) diff --git a/NEWS.md b/NEWS.md index 9704be62b..e5732df0e 100644 --- a/NEWS.md +++ b/NEWS.md @@ -18,6 +18,10 @@ * A function named `repair_call()` was added. This can help change the underlying models `call` object to better reflect what they would have obtained if the model function had been used directly (instead of via `parsnip`). This is only useful when the user chooses a formula interface and the model uses a formula interface. It will also be of limited use when a recipes is used to construct the feature set in `workflows` or `tune`. + * The `predict()` function now checks to see if required modeling packages are installed. The packages are loaded (but not attached). (#249) (#308) (tidymodels/workflows#45) + + * The function `req_pkgs()` is a user interface to determining the required packages. (#308) + # parsnip 0.1.1 ## New Features diff --git a/R/aaa.R b/R/aaa.R index 15372bd53..b333ca6af 100644 --- a/R/aaa.R +++ b/R/aaa.R @@ -66,7 +66,7 @@ utils::globalVariables( 'lab', 'original', 'predicted_label', 'prediction', 'value', 'type', "neighbors", ".submodels", "has_submodel", "max_neighbor", "max_penalty", "max_terms", "max_tree", "model", "name", "num_terms", "penalty", "trees", - "sub_neighbors", ".pred_class", "x", "y") + "sub_neighbors", ".pred_class", "x", "y", "predictor_indicators") ) # nocov end diff --git a/R/predict.R b/R/predict.R index 3ff53685e..32c0c5e6d 100644 --- a/R/predict.R +++ b/R/predict.R @@ -117,6 +117,9 @@ predict.model_fit <- function(object, new_data, type = NULL, opts = list(), ...) return(NULL) } + check_installs(object$spec) + load_libs(object$spec, quiet = TRUE) + other_args <- c("level", "std_error", "quantile") # "time" for survival probs later is_pred_arg <- names(the_dots) %in% other_args if (any(!is_pred_arg)) { diff --git a/R/req_pkgs.R b/R/req_pkgs.R new file mode 100644 index 000000000..25c5ce509 --- /dev/null +++ b/R/req_pkgs.R @@ -0,0 +1,53 @@ +#' Determine required packages for a model +#' +#' @param x A model specification or fit. +#' @param ... Not used. +#' @return A character string of package names (if any). +#' @details +#' For a model specification, the engine must be set. +#' +#' The list does not include the `parsnip` package. +#' @examples +#' should_fail <- try(req_pkgs(linear_reg()), silent = TRUE) +#' should_fail +#' +#' linear_reg() %>% +#' set_engine("glmnet") %>% +#' req_pkgs() +#' +#' linear_reg() %>% +#' set_engine("lm") %>% +#' fit(mpg ~ ., data = mtcars) %>% +#' req_pkgs() +#' @export +req_pkgs <- function(x, ...) { + UseMethod("req_pkgs") +} + +#' @export +#' @rdname req_pkgs +req_pkgs.model_spec <- function(x, ...) { + if (is.null(x$engine)) { + rlang::abort("Please set an engine.") + } + get_pkgs(x) +} + +#' @export +#' @rdname req_pkgs +req_pkgs.model_fit <- function(x, ...) { + get_pkgs(x$spec) +} + +get_pkgs <- function(x) { + cls <- class(x)[1] + pkgs <- + get_from_env(paste0(cls, "_pkgs")) %>% + dplyr::filter(engine == x$engine) + res <- pkgs$pkg[[1]] + if (length(res) == 0) { + res <- character(0) + } + res +} + diff --git a/docs/dev/articles/articles/Submodels.html b/docs/dev/articles/articles/Submodels.html index 280b44a2f..eb5a27c16 100644 --- a/docs/dev/articles/articles/Submodels.html +++ b/docs/dev/articles/articles/Submodels.html @@ -119,43 +119,42 @@

Evaluating Submodels with the Same Model Object

## see '?methods' for accessing help and source code

We’ll use the attrition data in rsample to illustrate:

library(tidymodels)
-
## ── Attaching packages ───────────────────────────────────────────────── tidymodels 0.1.0 ──
-
## ✓ broom     0.5.6           ✓ rsample   0.0.6      
-## ✓ dials     0.0.6           ✓ tibble    3.0.1      
-## ✓ dplyr     0.8.5           ✓ tune      0.1.0.9000 
-## ✓ infer     0.5.1           ✓ workflows 0.1.1      
-## ✓ purrr     0.3.4           ✓ yardstick 0.0.6      
-## ✓ recipes   0.1.12.9000
-
## Warning: package 'rsample' was built under R version 3.6.2
-
## ── Conflicts ──────────────────────────────────────────────────── tidymodels_conflicts() ──
+
## ── Attaching packages ───────────────────────────── tidymodels 0.1.0.9000 ──
+
## ✓ broom     0.5.6          ✓ rsample   0.0.6.9000
+## ✓ dials     0.0.6.9000     ✓ tibble    3.0.1.9000
+## ✓ dplyr     0.8.5          ✓ tune      0.1.0.9000
+## ✓ infer     0.5.1          ✓ workflows 0.1.1.9000
+## ✓ purrr     0.3.4          ✓ yardstick 0.0.6     
+## ✓ recipes   0.1.12
+
## ── Conflicts ───────────────────────────────────── tidymodels_conflicts() ──
 ## x purrr::discard() masks scales::discard()
 ## x dplyr::filter()  masks stats::filter()
 ## x dplyr::lag()     masks stats::lag()
 ## x dials::margin()  masks ggplot2::margin()
 ## x recipes::step()  masks stats::step()
-
data(attrition)
+
data(attrition)
 
 set.seed(4595)
 data_split <- initial_split(attrition, strata = "Attrition")
 attrition_train <- training(data_split)
 attrition_test  <- testing(data_split)

A boosted classification tree is one of the most low-maintenance approaches that we could take to these data:

-
# requires the xgboost package
+
# requires the xgboost package
 attrition_boost <-
   boost_tree(mode = "classification", trees = 100) %>%
   set_engine("C5.0")

Suppose that 10-fold cross-validation was being used to tune the model over the number of trees:

-
set.seed(616)
+
set.seed(616)
 folds <- vfold_cv(attrition_train)

The process would fit a model on 90% of the data and predict on the remaining 10%. Using rsample:

-
model_data <- analysis(folds$splits[[1]])
+
model_data <- analysis(folds$splits[[1]])
 pred_data  <- assessment(folds$splits[[1]])
 
 fold_1_model <-
   attrition_boost %>%
   fit_xy(x = model_data %>% dplyr::select(-Attrition), y = model_data$Attrition)

For multi_predict(), the same semantics of predict() are used but, for this model, there is an extra argument called trees. Candidate submodel values can be passed in with trees:

-
fold_1_pred <-
+
fold_1_pred <-
   multi_predict(
     fold_1_model,
     new_data = pred_data %>% dplyr::select(-Attrition),
@@ -178,7 +177,7 @@ 

Evaluating Submodels with the Same Model Object

## 10 <tibble [100 × 3]> ## # … with 101 more rows

The results is a tibble that has as many rows as the data being predicted (n = 111). The .pred column contains a list of tibbles and each has the predictions across the different number of trees:

-
fold_1_pred$.pred[[1]]
+
fold_1_pred$.pred[[1]]
## # A tibble: 100 x 3
 ##    trees .pred_No .pred_Yes
 ##    <int>    <dbl>     <dbl>
@@ -194,7 +193,7 @@ 

Evaluating Submodels with the Same Model Object

## 10 10 0.724 0.276 ## # … with 90 more rows

To get this into a format that is more usable, we can use tidyr::unnest() but we first add row numbers so that we can track the predictions by test sample as well as the actual classes:

-
fold_1_df <-
+
fold_1_df <-
   fold_1_pred %>%
   bind_cols(pred_data %>% dplyr::select(Attrition)) %>%
   add_rowindex() %>%
@@ -215,7 +214,7 @@ 

Evaluating Submodels with the Same Model Object

## 10 10 0.724 0.276 Yes 1 ## # … with 11,090 more rows

For two samples, what do these look like over trees?

-
fold_1_df %>%
+
fold_1_df %>%
   dplyr::filter(.row %in% c(1, 88)) %>%
   ggplot(aes(x = trees, y = .pred_No, col = Attrition, group = .row)) +
   geom_step() +
@@ -223,7 +222,7 @@ 

Evaluating Submodels with the Same Model Object

theme(legend.position = "top")

What does performance look like over trees (using the area under the ROC curve)?

-
fold_1_df %>%
+
fold_1_df %>%
   group_by(trees) %>%
   roc_auc(truth = Attrition, .pred_No) %>%
   ggplot(aes(x = trees, y = .estimate)) +
diff --git a/docs/dev/news/index.html b/docs/dev/news/index.html
index 4c4e988db..73319664f 100644
--- a/docs/dev/news/index.html
+++ b/docs/dev/news/index.html
@@ -158,6 +158,8 @@ 

  • A new main argument was added to boost_tree() called stop_iter for early stopping. The xgb_train() function gained arguments for early stopping and a percentage of data to leave out for a validation set.

  • If fit() is used and the underlying model uses a formula, the actual formula is pass to the model (instead of a placeholder). This makes the model call better.

  • A function named repair_call() was added. This can help change the underlying models call object to better reflect what they would have obtained if the model function had been used directly (instead of via parsnip). This is only useful when the user chooses a formula interface and the model uses a formula interface. It will also be of limited use when a recipes is used to construct the feature set in workflows or tune.

  • +
  • The predict() function now checks to see if required modeling packages are installed. The packages are loaded (but not attached). (#249) (#308) (tidymodels/workflows#45)

  • +
  • The function req_pkgs() is a user interface to determining the required packages. (#308)

  • diff --git a/docs/dev/pkgdown.css b/docs/dev/pkgdown.css index 1273238dd..c01e5923b 100644 --- a/docs/dev/pkgdown.css +++ b/docs/dev/pkgdown.css @@ -244,14 +244,14 @@ nav[data-toggle='toc'] .nav .nav > .active:focus > a { .ref-index th {font-weight: normal;} -.ref-index td {vertical-align: top; min-width: 100px} +.ref-index td {vertical-align: top;} .ref-index .icon {width: 40px;} .ref-index .alias {width: 40%;} .ref-index-icons .alias {width: calc(40% - 40px);} .ref-index .title {width: 60%;} .ref-arguments th {text-align: right; padding-right: 10px;} -.ref-arguments th, .ref-arguments td {vertical-align: top; min-width: 100px} +.ref-arguments th, .ref-arguments td {vertical-align: top;} .ref-arguments .name {width: 20%;} .ref-arguments .desc {width: 80%;} diff --git a/docs/dev/pkgdown.yml b/docs/dev/pkgdown.yml index e6716b06c..40bc87c30 100644 --- a/docs/dev/pkgdown.yml +++ b/docs/dev/pkgdown.yml @@ -1,6 +1,6 @@ pandoc: 2.9.2.1 -pkgdown: 1.5.1.9000 -pkgdown_sha: ac78596154e403df5f4e683f2185d88225a0fea6 +pkgdown: 1.5.1 +pkgdown_sha: ~ articles: Classification: articles/Classification.html Models: articles/Models.html @@ -8,7 +8,7 @@ articles: Scratch: articles/Scratch.html Submodels: articles/Submodels.html parsnip_Intro: parsnip_Intro.html -last_built: 2020-05-27T00:06Z +last_built: 2020-05-30T23:54Z urls: reference: https://parsnip.tidymodels.org/reference article: https://parsnip.tidymodels.org/articles diff --git a/docs/dev/reference/boost_tree.html b/docs/dev/reference/boost_tree.html index 8dd6013bf..254f1d0ed 100644 --- a/docs/dev/reference/boost_tree.html +++ b/docs/dev/reference/boost_tree.html @@ -325,34 +325,37 @@

    boost_tree() %>%
       set_engine("xgboost") %>%
       set_mode("regression") %>%
    -  translate()

    ## Boosted Tree Model Specification (regression)
    -## 
    -## Computational engine: xgboost 
    -## 
    -## Model fit template:
    -## parsnip::xgb_train(x = missing_arg(), y = missing_arg(), nthread = 1, 
    -##     verbose = 0)

    boost_tree() %>%
    +  translate()

    ## Boosted Tree Model Specification (regression)
    +## 
    +## Computational engine: xgboost 
    +## 
    +## Model fit template:
    +## parsnip::xgb_train(x = missing_arg(), y = missing_arg(), nthread = 1, 
    +##     verbose = 0)
    +

    boost_tree() %>%
       set_engine("xgboost") %>%
       set_mode("classification") %>%
    -  translate()

    ## Boosted Tree Model Specification (classification)
    -## 
    -## Computational engine: xgboost 
    -## 
    -## Model fit template:
    -## parsnip::xgb_train(x = missing_arg(), y = missing_arg(), nthread = 1, 
    -##     verbose = 0)
    + translate()

    ## Boosted Tree Model Specification (classification)
    +## 
    +## Computational engine: xgboost 
    +## 
    +## Model fit template:
    +## parsnip::xgb_train(x = missing_arg(), y = missing_arg(), nthread = 1, 
    +##     verbose = 0)
    +

    C5.0

    boost_tree() %>%
       set_engine("C5.0") %>%
       set_mode("classification") %>%
    -  translate()

    ## Boosted Tree Model Specification (classification)
    -## 
    -## Computational engine: C5.0 
    -## 
    -## Model fit template:
    -## parsnip::C5.0_train(x = missing_arg(), y = missing_arg(), weights = missing_arg())
    + translate()

    ## Boosted Tree Model Specification (classification)
    +## 
    +## Computational engine: C5.0 
    +## 
    +## Model fit template:
    +## parsnip::C5.0_train(x = missing_arg(), y = missing_arg(), weights = missing_arg())
    +

    Note that C50::C5.0() does not require factor predictors to be converted to indicator variables.

    @@ -361,22 +364,24 @@

    spark

    boost_tree() %>%
       set_engine("spark") %>%
       set_mode("regression") %>%
    -  translate()

    ## Boosted Tree Model Specification (regression)
    -## 
    -## Computational engine: spark 
    -## 
    -## Model fit template:
    -## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(), 
    -##     type = "regression", seed = sample.int(10^5, 1))

    boost_tree() %>%
    +  translate()

    ## Boosted Tree Model Specification (regression)
    +## 
    +## Computational engine: spark 
    +## 
    +## Model fit template:
    +## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(), 
    +##     type = "regression", seed = sample.int(10^5, 1))
    +

    boost_tree() %>%
       set_engine("spark") %>%
       set_mode("classification") %>%
    -  translate()

    ## Boosted Tree Model Specification (classification)
    -## 
    -## Computational engine: spark 
    -## 
    -## Model fit template:
    -## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(), 
    -##     type = "classification", seed = sample.int(10^5, 1))
    + translate()

    ## Boosted Tree Model Specification (classification)
    +## 
    +## Computational engine: spark 
    +## 
    +## Model fit template:
    +## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(), 
    +##     type = "classification", seed = sample.int(10^5, 1))
    +

    Parameter translations

    diff --git a/docs/dev/reference/decision_tree.html b/docs/dev/reference/decision_tree.html index fe0e95745..ebfa8600f 100644 --- a/docs/dev/reference/decision_tree.html +++ b/docs/dev/reference/decision_tree.html @@ -270,20 +270,22 @@

    decision_tree() %>%
       set_engine("rpart") %>%
       set_mode("regression") %>%
    -  translate()

    ## Decision Tree Model Specification (regression)
    -## 
    -## Computational engine: rpart 
    -## 
    -## Model fit template:
    -## rpart::rpart(formula = missing_arg(), data = missing_arg(), weights = missing_arg())

    decision_tree() %>%
    +  translate()

    ## Decision Tree Model Specification (regression)
    +## 
    +## Computational engine: rpart 
    +## 
    +## Model fit template:
    +## rpart::rpart(formula = missing_arg(), data = missing_arg(), weights = missing_arg())
    +

    decision_tree() %>%
       set_engine("rpart") %>%
       set_mode("classification") %>%
    -  translate()

    ## Decision Tree Model Specification (classification)
    -## 
    -## Computational engine: rpart 
    -## 
    -## Model fit template:
    -## rpart::rpart(formula = missing_arg(), data = missing_arg(), weights = missing_arg())
    + translate()

    ## Decision Tree Model Specification (classification)
    +## 
    +## Computational engine: rpart 
    +## 
    +## Model fit template:
    +## rpart::rpart(formula = missing_arg(), data = missing_arg(), weights = missing_arg())
    +

    Note that rpart::rpart() does not require factor predictors to be converted to indicator variables.

    @@ -292,13 +294,14 @@

    C5.0

    decision_tree() %>%
       set_engine("C5.0") %>%
       set_mode("classification") %>%
    -  translate()

    ## Decision Tree Model Specification (classification)
    -## 
    -## Computational engine: C5.0 
    -## 
    -## Model fit template:
    -## parsnip::C5.0_train(x = missing_arg(), y = missing_arg(), weights = missing_arg(), 
    -##     trials = 1)
    + translate()

    ## Decision Tree Model Specification (classification)
    +## 
    +## Computational engine: C5.0 
    +## 
    +## Model fit template:
    +## parsnip::C5.0_train(x = missing_arg(), y = missing_arg(), weights = missing_arg(), 
    +##     trials = 1)
    +

    Note that C50::C5.0() does not require factor predictors to be converted to indicator variables.

    @@ -307,22 +310,24 @@

    spark

    decision_tree() %>%
       set_engine("spark") %>%
       set_mode("regression") %>%
    -  translate()

    ## Decision Tree Model Specification (regression)
    -## 
    -## Computational engine: spark 
    -## 
    -## Model fit template:
    -## sparklyr::ml_decision_tree_classifier(x = missing_arg(), formula = missing_arg(), 
    -##     seed = sample.int(10^5, 1))

    decision_tree() %>%
    +  translate()

    ## Decision Tree Model Specification (regression)
    +## 
    +## Computational engine: spark 
    +## 
    +## Model fit template:
    +## sparklyr::ml_decision_tree_classifier(x = missing_arg(), formula = missing_arg(), 
    +##     seed = sample.int(10^5, 1))
    +

    decision_tree() %>%
       set_engine("spark") %>%
       set_mode("classification") %>%
    -  translate()

    ## Decision Tree Model Specification (classification)
    -## 
    -## Computational engine: spark 
    -## 
    -## Model fit template:
    -## sparklyr::ml_decision_tree_classifier(x = missing_arg(), formula = missing_arg(), 
    -##     seed = sample.int(10^5, 1))
    + translate()

    ## Decision Tree Model Specification (classification)
    +## 
    +## Computational engine: spark 
    +## 
    +## Model fit template:
    +## sparklyr::ml_decision_tree_classifier(x = missing_arg(), formula = missing_arg(), 
    +##     seed = sample.int(10^5, 1))
    +

    Parameter translations

    diff --git a/docs/dev/reference/descriptors.html b/docs/dev/reference/descriptors.html index faa7abdaf..6db6fa88f 100644 --- a/docs/dev/reference/descriptors.html +++ b/docs/dev/reference/descriptors.html @@ -195,9 +195,9 @@

    Details .obs() = 150 .lvls() = NA (no factor outcome) .facts() = 1 (the Species predictor) - .y() = <vector> (Sepal.Width as a vector) - .x() = <data.frame> (The other 4 columns as a data frame) - .dat() = <data.frame> (The full data set) + .y() = &lt;vector&gt; (Sepal.Width as a vector) + .x() = &lt;data.frame&gt; (The other 4 columns as a data frame) + .dat() = &lt;data.frame&gt; (The full data set)

    If the formula Species ~ . where used:

    @@ -206,9 +206,9 @@ 

    Details .obs() = 150 .lvls() = c(setosa = 50, versicolor = 50, virginica = 50) .facts() = 0 - .y() = <vector> (Species as a vector) - .x() = <data.frame> (The other 4 columns as a data frame) - .dat() = <data.frame> (The full data set) + .y() = &lt;vector&gt; (Species as a vector) + .x() = &lt;data.frame&gt; (The other 4 columns as a data frame) + .dat() = &lt;data.frame&gt; (The full data set)

    To use these in a model fit, pass them to a model specification. diff --git a/docs/dev/reference/fit.html b/docs/dev/reference/fit.html index 52522745e..309cc94d5 100644 --- a/docs/dev/reference/fit.html +++ b/docs/dev/reference/fit.html @@ -261,7 +261,7 @@

    Examp using_formula
    #> parsnip model object #> -#> Fit time: 26ms +#> Fit time: 25ms #> #> Call: stats::glm(formula = Class ~ funded_amnt + int_rate, family = stats::binomial, #> data = data) @@ -274,7 +274,7 @@

    Examp #> Null Deviance: 4055 #> Residual Deviance: 3698 AIC: 3704

    using_xy
    #> parsnip model object #> -#> Fit time: 23ms +#> Fit time: 17ms #> #> Call: stats::glm(formula = ..y ~ ., family = stats::binomial, data = data) #> diff --git a/docs/dev/reference/get_model_env.html b/docs/dev/reference/get_model_env.html index e1472fa50..1d114e7ce 100644 --- a/docs/dev/reference/get_model_env.html +++ b/docs/dev/reference/get_model_env.html @@ -182,46 +182,53 @@

    R

    Examples

    # Access the model data: current_code <- get_model_env() -ls(envir = current_code)
    #> [1] "boost_tree" "boost_tree_args" -#> [3] "boost_tree_fit" "boost_tree_modes" -#> [5] "boost_tree_pkgs" "boost_tree_predict" -#> [7] "decision_tree" "decision_tree_args" -#> [9] "decision_tree_fit" "decision_tree_modes" -#> [11] "decision_tree_pkgs" "decision_tree_predict" -#> [13] "linear_reg" "linear_reg_args" -#> [15] "linear_reg_fit" "linear_reg_modes" -#> [17] "linear_reg_pkgs" "linear_reg_predict" -#> [19] "logistic_reg" "logistic_reg_args" -#> [21] "logistic_reg_fit" "logistic_reg_modes" -#> [23] "logistic_reg_pkgs" "logistic_reg_predict" -#> [25] "mars" "mars_args" -#> [27] "mars_fit" "mars_modes" -#> [29] "mars_pkgs" "mars_predict" -#> [31] "mlp" "mlp_args" -#> [33] "mlp_fit" "mlp_modes" -#> [35] "mlp_pkgs" "mlp_predict" -#> [37] "models" "modes" -#> [39] "multinom_reg" "multinom_reg_args" -#> [41] "multinom_reg_fit" "multinom_reg_modes" -#> [43] "multinom_reg_pkgs" "multinom_reg_predict" -#> [45] "nearest_neighbor" "nearest_neighbor_args" -#> [47] "nearest_neighbor_fit" "nearest_neighbor_modes" -#> [49] "nearest_neighbor_pkgs" "nearest_neighbor_predict" -#> [51] "null_model" "null_model_args" -#> [53] "null_model_fit" "null_model_modes" -#> [55] "null_model_pkgs" "null_model_predict" -#> [57] "rand_forest" "rand_forest_args" -#> [59] "rand_forest_fit" "rand_forest_modes" -#> [61] "rand_forest_pkgs" "rand_forest_predict" -#> [63] "surv_reg" "surv_reg_args" -#> [65] "surv_reg_fit" "surv_reg_modes" -#> [67] "surv_reg_pkgs" "surv_reg_predict" -#> [69] "svm_poly" "svm_poly_args" -#> [71] "svm_poly_fit" "svm_poly_modes" -#> [73] "svm_poly_pkgs" "svm_poly_predict" -#> [75] "svm_rbf" "svm_rbf_args" -#> [77] "svm_rbf_fit" "svm_rbf_modes" -#> [79] "svm_rbf_pkgs" "svm_rbf_predict"
    +ls(envir = current_code)
    #> [1] "boost_tree" "boost_tree_args" +#> [3] "boost_tree_encoding" "boost_tree_fit" +#> [5] "boost_tree_modes" "boost_tree_pkgs" +#> [7] "boost_tree_predict" "decision_tree" +#> [9] "decision_tree_args" "decision_tree_encoding" +#> [11] "decision_tree_fit" "decision_tree_modes" +#> [13] "decision_tree_pkgs" "decision_tree_predict" +#> [15] "linear_reg" "linear_reg_args" +#> [17] "linear_reg_encoding" "linear_reg_fit" +#> [19] "linear_reg_modes" "linear_reg_pkgs" +#> [21] "linear_reg_predict" "logistic_reg" +#> [23] "logistic_reg_args" "logistic_reg_encoding" +#> [25] "logistic_reg_fit" "logistic_reg_modes" +#> [27] "logistic_reg_pkgs" "logistic_reg_predict" +#> [29] "mars" "mars_args" +#> [31] "mars_encoding" "mars_fit" +#> [33] "mars_modes" "mars_pkgs" +#> [35] "mars_predict" "mlp" +#> [37] "mlp_args" "mlp_encoding" +#> [39] "mlp_fit" "mlp_modes" +#> [41] "mlp_pkgs" "mlp_predict" +#> [43] "models" "modes" +#> [45] "multinom_reg" "multinom_reg_args" +#> [47] "multinom_reg_encoding" "multinom_reg_fit" +#> [49] "multinom_reg_modes" "multinom_reg_pkgs" +#> [51] "multinom_reg_predict" "nearest_neighbor" +#> [53] "nearest_neighbor_args" "nearest_neighbor_encoding" +#> [55] "nearest_neighbor_fit" "nearest_neighbor_modes" +#> [57] "nearest_neighbor_pkgs" "nearest_neighbor_predict" +#> [59] "null_model" "null_model_args" +#> [61] "null_model_encoding" "null_model_fit" +#> [63] "null_model_modes" "null_model_pkgs" +#> [65] "null_model_predict" "rand_forest" +#> [67] "rand_forest_args" "rand_forest_encoding" +#> [69] "rand_forest_fit" "rand_forest_modes" +#> [71] "rand_forest_pkgs" "rand_forest_predict" +#> [73] "surv_reg" "surv_reg_args" +#> [75] "surv_reg_encoding" "surv_reg_fit" +#> [77] "surv_reg_modes" "surv_reg_pkgs" +#> [79] "surv_reg_predict" "svm_poly" +#> [81] "svm_poly_args" "svm_poly_encoding" +#> [83] "svm_poly_fit" "svm_poly_modes" +#> [85] "svm_poly_pkgs" "svm_poly_predict" +#> [87] "svm_rbf" "svm_rbf_args" +#> [89] "svm_rbf_encoding" "svm_rbf_fit" +#> [91] "svm_rbf_modes" "svm_rbf_pkgs" +#> [93] "svm_rbf_predict"

    ## Linear Regression Model Specification (regression)
    -## 
    -## Computational engine: lm 
    -## 
    -## Model fit template:
    -## stats::lm(formula = missing_arg(), data = missing_arg(), weights = missing_arg())
    + translate()

    ## Linear Regression Model Specification (regression)
    +## 
    +## Computational engine: lm 
    +## 
    +## Model fit template:
    +## stats::lm(formula = missing_arg(), data = missing_arg(), weights = missing_arg())
    +

    glmnet

    linear_reg() %>%
       set_engine("glmnet") %>%
       set_mode("regression") %>%
    -  translate()

    ## Linear Regression Model Specification (regression)
    -## 
    -## Computational engine: glmnet 
    -## 
    -## Model fit template:
    -## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), 
    -##     family = "gaussian")
    + translate()

    ## Linear Regression Model Specification (regression)
    +## 
    +## Computational engine: glmnet 
    +## 
    +## Model fit template:
    +## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), 
    +##     family = "gaussian")
    +

    For glmnet models, the full regularization path is always fit regardless of the value given to penalty. Also, there is the option to @@ -294,13 +296,14 @@

    stan

    linear_reg() %>%
       set_engine("stan") %>%
       set_mode("regression") %>%
    -  translate()

    ## Linear Regression Model Specification (regression)
    -## 
    -## Computational engine: stan 
    -## 
    -## Model fit template:
    -## rstanarm::stan_glm(formula = missing_arg(), data = missing_arg(), 
    -##     weights = missing_arg(), family = stats::gaussian, refresh = 0)
    + translate()

    ## Linear Regression Model Specification (regression)
    +## 
    +## Computational engine: stan 
    +## 
    +## Model fit template:
    +## rstanarm::stan_glm(formula = missing_arg(), data = missing_arg(), 
    +##     weights = missing_arg(), family = stats::gaussian, refresh = 0)
    +

    Note that the refresh default prevents logging of the estimation process. Change this value in set_engine() will show the logs.

    @@ -314,26 +317,28 @@

    spark

    linear_reg() %>%
       set_engine("spark") %>%
       set_mode("regression") %>%
    -  translate()

    ## Linear Regression Model Specification (regression)
    -## 
    -## Computational engine: spark 
    -## 
    -## Model fit template:
    -## sparklyr::ml_linear_regression(x = missing_arg(), formula = missing_arg(), 
    -##     weight_col = missing_arg())
    + translate()

    ## Linear Regression Model Specification (regression)
    +## 
    +## Computational engine: spark 
    +## 
    +## Model fit template:
    +## sparklyr::ml_linear_regression(x = missing_arg(), formula = missing_arg(), 
    +##     weight_col = missing_arg())
    +

    keras

    linear_reg() %>%
       set_engine("keras") %>%
       set_mode("regression") %>%
    -  translate()

    ## Linear Regression Model Specification (regression)
    -## 
    -## Computational engine: keras 
    -## 
    -## Model fit template:
    -## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, 
    -##     act = "linear")
    + translate()

    ## Linear Regression Model Specification (regression)
    +## 
    +## Computational engine: keras 
    +## 
    +## Model fit template:
    +## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, 
    +##     act = "linear")
    +

    Parameter translations

    diff --git a/docs/dev/reference/logistic_reg.html b/docs/dev/reference/logistic_reg.html index 4bb0a12d7..972851e29 100644 --- a/docs/dev/reference/logistic_reg.html +++ b/docs/dev/reference/logistic_reg.html @@ -258,26 +258,28 @@

    logistic_reg() %>%
       set_engine("glm") %>%
       set_mode("classification") %>%
    -  translate()

    ## Logistic Regression Model Specification (classification)
    -## 
    -## Computational engine: glm 
    -## 
    -## Model fit template:
    -## stats::glm(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), 
    -##     family = stats::binomial)
    + translate()

    ## Logistic Regression Model Specification (classification)
    +## 
    +## Computational engine: glm 
    +## 
    +## Model fit template:
    +## stats::glm(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), 
    +##     family = stats::binomial)
    +

    glmnet

    logistic_reg() %>%
       set_engine("glmnet") %>%
       set_mode("classification") %>%
    -  translate()

    ## Logistic Regression Model Specification (classification)
    -## 
    -## Computational engine: glmnet 
    -## 
    -## Model fit template:
    -## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), 
    -##     family = "binomial")
    + translate()

    ## Logistic Regression Model Specification (classification)
    +## 
    +## Computational engine: glmnet 
    +## 
    +## Model fit template:
    +## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), 
    +##     family = "binomial")
    +

    For glmnet models, the full regularization path is always fit regardless of the value given to penalty. Also, there is the option to @@ -293,13 +295,14 @@

    stan

    logistic_reg() %>%
       set_engine("stan") %>%
       set_mode("classification") %>%
    -  translate()

    ## Logistic Regression Model Specification (classification)
    -## 
    -## Computational engine: stan 
    -## 
    -## Model fit template:
    -## rstanarm::stan_glm(formula = missing_arg(), data = missing_arg(), 
    -##     weights = missing_arg(), family = stats::binomial, refresh = 0)
    + translate()

    ## Logistic Regression Model Specification (classification)
    +## 
    +## Computational engine: stan 
    +## 
    +## Model fit template:
    +## rstanarm::stan_glm(formula = missing_arg(), data = missing_arg(), 
    +##     weights = missing_arg(), family = stats::binomial, refresh = 0)
    +

    Note that the refresh default prevents logging of the estimation process. Change this value in set_engine() will show the logs.

    @@ -313,26 +316,28 @@

    spark

    logistic_reg() %>%
       set_engine("spark") %>%
       set_mode("classification") %>%
    -  translate()

    ## Logistic Regression Model Specification (classification)
    -## 
    -## Computational engine: spark 
    -## 
    -## Model fit template:
    -## sparklyr::ml_logistic_regression(x = missing_arg(), formula = missing_arg(), 
    -##     weight_col = missing_arg(), family = "binomial")
    + translate()

    ## Logistic Regression Model Specification (classification)
    +## 
    +## Computational engine: spark 
    +## 
    +## Model fit template:
    +## sparklyr::ml_logistic_regression(x = missing_arg(), formula = missing_arg(), 
    +##     weight_col = missing_arg(), family = "binomial")
    +

    keras

    logistic_reg() %>%
       set_engine("keras") %>%
       set_mode("classification") %>%
    -  translate()

    ## Logistic Regression Model Specification (classification)
    -## 
    -## Computational engine: keras 
    -## 
    -## Model fit template:
    -## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, 
    -##     act = "linear")
    + translate()

    ## Logistic Regression Model Specification (classification)
    +## 
    +## Computational engine: keras 
    +## 
    +## Model fit template:
    +## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, 
    +##     act = "linear")
    +

    Parameter translations

    diff --git a/docs/dev/reference/mars.html b/docs/dev/reference/mars.html index daa4827c9..b1861ad95 100644 --- a/docs/dev/reference/mars.html +++ b/docs/dev/reference/mars.html @@ -251,25 +251,27 @@

    mars() %>%
       set_engine("earth") %>%
       set_mode("regression") %>%
    -  translate()

    ## MARS Model Specification (regression)
    -## 
    -## Computational engine: earth 
    -## 
    -## Model fit template:
    -## earth::earth(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), 
    -##     keepxy = TRUE)

    mars() %>%
    +  translate()

    ## MARS Model Specification (regression)
    +## 
    +## Computational engine: earth 
    +## 
    +## Model fit template:
    +## earth::earth(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), 
    +##     keepxy = TRUE)
    +

    mars() %>%
       set_engine("earth") %>%
       set_mode("classification") %>%
    -  translate()

    ## MARS Model Specification (classification)
    -## 
    -## Engine-Specific Arguments:
    -##   glm = list(family = stats::binomial)
    -## 
    -## Computational engine: earth 
    -## 
    -## Model fit template:
    -## earth::earth(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), 
    -##     glm = list(family = stats::binomial), keepxy = TRUE)
    + translate()

    ## MARS Model Specification (classification)
    +## 
    +## Engine-Specific Arguments:
    +##   glm = list(family = stats::binomial)
    +## 
    +## Computational engine: earth 
    +## 
    +## Model fit template:
    +## earth::earth(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), 
    +##     glm = list(family = stats::binomial), keepxy = TRUE)
    +

    Note that, when the model is fit, the earth package only has its namespace loaded. However, if multi_predict is used, the package is diff --git a/docs/dev/reference/mlp.html b/docs/dev/reference/mlp.html index a84fd2ae5..88629696a 100644 --- a/docs/dev/reference/mlp.html +++ b/docs/dev/reference/mlp.html @@ -279,20 +279,22 @@

    mlp() %>%
       set_engine("keras") %>%
       set_mode("regression") %>%
    -  translate()

    ## Single Layer Neural Network Specification (regression)
    -## 
    -## Computational engine: keras 
    -## 
    -## Model fit template:
    -## parsnip::keras_mlp(x = missing_arg(), y = missing_arg())

    mlp() %>%
    +  translate()

    ## Single Layer Neural Network Specification (regression)
    +## 
    +## Computational engine: keras 
    +## 
    +## Model fit template:
    +## parsnip::keras_mlp(x = missing_arg(), y = missing_arg())
    +

    mlp() %>%
       set_engine("keras") %>%
       set_mode("classification") %>%
    -  translate()

    ## Single Layer Neural Network Specification (classification)
    -## 
    -## Computational engine: keras 
    -## 
    -## Model fit template:
    -## parsnip::keras_mlp(x = missing_arg(), y = missing_arg())
    + translate()

    ## Single Layer Neural Network Specification (classification)
    +## 
    +## Computational engine: keras 
    +## 
    +## Model fit template:
    +## parsnip::keras_mlp(x = missing_arg(), y = missing_arg())
    +

    An error is thrown if both penalty and dropout are specified for keras models.

    @@ -301,28 +303,30 @@

    nnet

    mlp() %>%
       set_engine("nnet") %>%
       set_mode("regression") %>%
    -  translate()

    ## Single Layer Neural Network Specification (regression)
    -## 
    -## Main Arguments:
    -##   hidden_units = 5
    -## 
    -## Computational engine: nnet 
    -## 
    -## Model fit template:
    -## nnet::nnet(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), 
    -##     size = 5, trace = FALSE, linout = TRUE)

    mlp() %>%
    +  translate()

    ## Single Layer Neural Network Specification (regression)
    +## 
    +## Main Arguments:
    +##   hidden_units = 5
    +## 
    +## Computational engine: nnet 
    +## 
    +## Model fit template:
    +## nnet::nnet(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), 
    +##     size = 5, trace = FALSE, linout = TRUE)
    +

    mlp() %>%
       set_engine("nnet") %>%
       set_mode("classification") %>%
    -  translate()

    ## Single Layer Neural Network Specification (classification)
    -## 
    -## Main Arguments:
    -##   hidden_units = 5
    -## 
    -## Computational engine: nnet 
    -## 
    -## Model fit template:
    -## nnet::nnet(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), 
    -##     size = 5, trace = FALSE, linout = FALSE)
    + translate()

    ## Single Layer Neural Network Specification (classification)
    +## 
    +## Main Arguments:
    +##   hidden_units = 5
    +## 
    +## Computational engine: nnet 
    +## 
    +## Model fit template:
    +## nnet::nnet(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), 
    +##     size = 5, trace = FALSE, linout = FALSE)
    +

    Parameter translations

    diff --git a/docs/dev/reference/model_spec.html b/docs/dev/reference/model_spec.html index 53794323f..310fa17a2 100644 --- a/docs/dev/reference/model_spec.html +++ b/docs/dev/reference/model_spec.html @@ -200,7 +200,7 @@

    multinom_reg() %>%
       set_engine("glmnet") %>%
       set_mode("classification") %>%
    -  translate()

    ## Multinomial Regression Model Specification (classification)
    -## 
    -## Computational engine: glmnet 
    -## 
    -## Model fit template:
    -## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), 
    -##     family = "multinomial")
    + translate()

    ## Multinomial Regression Model Specification (classification)
    +## 
    +## Computational engine: glmnet 
    +## 
    +## Model fit template:
    +## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), 
    +##     family = "multinomial")
    +

    For glmnet models, the full regularization path is always fit regardless of the value given to penalty. Also, there is the option to @@ -279,39 +280,42 @@

    nnet

    multinom_reg() %>%
       set_engine("nnet") %>%
       set_mode("classification") %>%
    -  translate()

    ## Multinomial Regression Model Specification (classification)
    -## 
    -## Computational engine: nnet 
    -## 
    -## Model fit template:
    -## nnet::multinom(formula = missing_arg(), data = missing_arg(), 
    -##     weights = missing_arg(), trace = FALSE)
    + translate()

    ## Multinomial Regression Model Specification (classification)
    +## 
    +## Computational engine: nnet 
    +## 
    +## Model fit template:
    +## nnet::multinom(formula = missing_arg(), data = missing_arg(), 
    +##     weights = missing_arg(), trace = FALSE)
    +

    spark

    multinom_reg() %>%
       set_engine("spark") %>%
       set_mode("classification") %>%
    -  translate()

    ## Multinomial Regression Model Specification (classification)
    -## 
    -## Computational engine: spark 
    -## 
    -## Model fit template:
    -## sparklyr::ml_logistic_regression(x = missing_arg(), formula = missing_arg(), 
    -##     weight_col = missing_arg(), family = "multinomial")
    + translate()

    ## Multinomial Regression Model Specification (classification)
    +## 
    +## Computational engine: spark 
    +## 
    +## Model fit template:
    +## sparklyr::ml_logistic_regression(x = missing_arg(), formula = missing_arg(), 
    +##     weight_col = missing_arg(), family = "multinomial")
    +

    keras

    multinom_reg() %>%
       set_engine("keras") %>%
       set_mode("classification") %>%
    -  translate()

    ## Multinomial Regression Model Specification (classification)
    -## 
    -## Computational engine: keras 
    -## 
    -## Model fit template:
    -## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, 
    -##     act = "linear")
    + translate()

    ## Multinomial Regression Model Specification (classification)
    +## 
    +## Computational engine: keras 
    +## 
    +## Model fit template:
    +## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, 
    +##     act = "linear")
    +

    Parameter translations

    diff --git a/docs/dev/reference/nearest_neighbor.html b/docs/dev/reference/nearest_neighbor.html index 73e82ad99..f128012f0 100644 --- a/docs/dev/reference/nearest_neighbor.html +++ b/docs/dev/reference/nearest_neighbor.html @@ -225,22 +225,24 @@

    nearest_neighbor() %>%
       set_engine("kknn") %>%
       set_mode("regression") %>%
    -  translate()

    ## K-Nearest Neighbor Model Specification (regression)
    -## 
    -## Computational engine: kknn 
    -## 
    -## Model fit template:
    -## kknn::train.kknn(formula = missing_arg(), data = missing_arg(), 
    -##     ks = 5)

    nearest_neighbor() %>%
    +  translate()

    ## K-Nearest Neighbor Model Specification (regression)
    +## 
    +## Computational engine: kknn 
    +## 
    +## Model fit template:
    +## kknn::train.kknn(formula = missing_arg(), data = missing_arg(), 
    +##     ks = 5)
    +

    nearest_neighbor() %>%
       set_engine("kknn") %>%
       set_mode("classification") %>%
    -  translate()

    ## K-Nearest Neighbor Model Specification (classification)
    -## 
    -## Computational engine: kknn 
    -## 
    -## Model fit template:
    -## kknn::train.kknn(formula = missing_arg(), data = missing_arg(), 
    -##     ks = 5)
    + translate()

    ## K-Nearest Neighbor Model Specification (classification)
    +## 
    +## Computational engine: kknn 
    +## 
    +## Model fit template:
    +## kknn::train.kknn(formula = missing_arg(), data = missing_arg(), 
    +##     ks = 5)
    +

    For kknn, the underlying modeling function used is a restricted version of train.kknn() and not kknn(). It is set up in this way so diff --git a/docs/dev/reference/null_model.html b/docs/dev/reference/null_model.html index f5890685b..5fed78193 100644 --- a/docs/dev/reference/null_model.html +++ b/docs/dev/reference/null_model.html @@ -174,20 +174,22 @@

    null_model() %>%
       set_engine("parsnip") %>%
       set_mode("regression") %>%
    -  translate()

    ## Model Specification (regression)
    -## 
    -## Computational engine: parsnip 
    -## 
    -## Model fit template:
    -## nullmodel(x = missing_arg(), y = missing_arg())

    null_model() %>%
    +  translate()

    ## Model Specification (regression)
    +## 
    +## Computational engine: parsnip 
    +## 
    +## Model fit template:
    +## nullmodel(x = missing_arg(), y = missing_arg())
    +

    null_model() %>%
       set_engine("parsnip") %>%
       set_mode("classification") %>%
    -  translate()

    ## Model Specification (classification)
    -## 
    -## Computational engine: parsnip 
    -## 
    -## Model fit template:
    -## nullmodel(x = missing_arg(), y = missing_arg())
    + translate()

    ## Model Specification (classification)
    +## 
    +## Computational engine: parsnip 
    +## 
    +## Model fit template:
    +## nullmodel(x = missing_arg(), y = missing_arg())
    +

    See also

    diff --git a/docs/dev/reference/rand_forest.html b/docs/dev/reference/rand_forest.html index e9fbe86c2..99e577301 100644 --- a/docs/dev/reference/rand_forest.html +++ b/docs/dev/reference/rand_forest.html @@ -259,24 +259,26 @@

    rand_forest() %>%
       set_engine("ranger") %>%
       set_mode("regression") %>%
    -  translate()

    ## Random Forest Model Specification (regression)
    -## 
    -## Computational engine: ranger 
    -## 
    -## Model fit template:
    -## ranger::ranger(formula = missing_arg(), data = missing_arg(), 
    -##     case.weights = missing_arg(), num.threads = 1, verbose = FALSE, 
    -##     seed = sample.int(10^5, 1))

    rand_forest() %>%
    +  translate()

    ## Random Forest Model Specification (regression)
    +## 
    +## Computational engine: ranger 
    +## 
    +## Model fit template:
    +## ranger::ranger(formula = missing_arg(), data = missing_arg(), 
    +##     case.weights = missing_arg(), num.threads = 1, verbose = FALSE, 
    +##     seed = sample.int(10^5, 1))
    +

    rand_forest() %>%
       set_engine("ranger") %>%
       set_mode("classification") %>%
    -  translate()

    ## Random Forest Model Specification (classification)
    -## 
    -## Computational engine: ranger 
    -## 
    -## Model fit template:
    -## ranger::ranger(formula = missing_arg(), data = missing_arg(), 
    -##     case.weights = missing_arg(), num.threads = 1, verbose = FALSE, 
    -##     seed = sample.int(10^5, 1), probability = TRUE)
    + translate()

    ## Random Forest Model Specification (classification)
    +## 
    +## Computational engine: ranger 
    +## 
    +## Model fit template:
    +## ranger::ranger(formula = missing_arg(), data = missing_arg(), 
    +##     case.weights = missing_arg(), num.threads = 1, verbose = FALSE, 
    +##     seed = sample.int(10^5, 1), probability = TRUE)
    +

    Note that ranger::ranger() does not require factor predictors to be converted to indicator variables.

    @@ -289,20 +291,22 @@

    randomForest

    rand_forest() %>%
       set_engine("randomForest") %>%
       set_mode("regression") %>%
    -  translate()

    ## Random Forest Model Specification (regression)
    -## 
    -## Computational engine: randomForest 
    -## 
    -## Model fit template:
    -## randomForest::randomForest(x = missing_arg(), y = missing_arg())

    rand_forest() %>%
    +  translate()

    ## Random Forest Model Specification (regression)
    +## 
    +## Computational engine: randomForest 
    +## 
    +## Model fit template:
    +## randomForest::randomForest(x = missing_arg(), y = missing_arg())
    +

    rand_forest() %>%
       set_engine("randomForest") %>%
       set_mode("classification") %>%
    -  translate()

    ## Random Forest Model Specification (classification)
    -## 
    -## Computational engine: randomForest 
    -## 
    -## Model fit template:
    -## randomForest::randomForest(x = missing_arg(), y = missing_arg())
    + translate()

    ## Random Forest Model Specification (classification)
    +## 
    +## Computational engine: randomForest 
    +## 
    +## Model fit template:
    +## randomForest::randomForest(x = missing_arg(), y = missing_arg())
    +

    Note that randomForest::randomForest() does @@ -312,22 +316,24 @@

    spark

    rand_forest() %>%
       set_engine("spark") %>%
       set_mode("regression") %>%
    -  translate()

    ## Random Forest Model Specification (regression)
    -## 
    -## Computational engine: spark 
    -## 
    -## Model fit template:
    -## sparklyr::ml_random_forest(x = missing_arg(), formula = missing_arg(), 
    -##     type = "regression", seed = sample.int(10^5, 1))

    rand_forest() %>%
    +  translate()

    ## Random Forest Model Specification (regression)
    +## 
    +## Computational engine: spark 
    +## 
    +## Model fit template:
    +## sparklyr::ml_random_forest(x = missing_arg(), formula = missing_arg(), 
    +##     type = "regression", seed = sample.int(10^5, 1))
    +

    rand_forest() %>%
       set_engine("spark") %>%
       set_mode("classification") %>%
    -  translate()

    ## Random Forest Model Specification (classification)
    -## 
    -## Computational engine: spark 
    -## 
    -## Model fit template:
    -## sparklyr::ml_random_forest(x = missing_arg(), formula = missing_arg(), 
    -##     type = "classification", seed = sample.int(10^5, 1))
    + translate()

    ## Random Forest Model Specification (classification)
    +## 
    +## Computational engine: spark 
    +## 
    +## Model fit template:
    +## sparklyr::ml_random_forest(x = missing_arg(), formula = missing_arg(), 
    +##     type = "classification", seed = sample.int(10^5, 1))
    +

    Parameter translations

    diff --git a/docs/dev/reference/reexports.html b/docs/dev/reference/reexports.html index 823e3eba5..53b5fce63 100644 --- a/docs/dev/reference/reexports.html +++ b/docs/dev/reference/reexports.html @@ -47,7 +47,7 @@ genericsfit, fit_xy, tidy, varying_args - magrittr%&gt;% + magrittr%>% " /> @@ -149,7 +149,7 @@

    Objects exported from other packages

    These objects are imported from other packages. Follow the links below to see their documentation.

    -
    +
    generics

    fit, fit_xy, tidy, varying_args

    magrittr

    %>%

    diff --git a/docs/dev/reference/req_pkgs.html b/docs/dev/reference/req_pkgs.html new file mode 100644 index 000000000..ba98ca81e --- /dev/null +++ b/docs/dev/reference/req_pkgs.html @@ -0,0 +1,234 @@ + + + + + + + + +Determine required packages for a model — req_pkgs • parsnip + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Determine required packages for a model

    +
    + +
    req_pkgs(x, ...)
    +
    +# S3 method for model_spec
    +req_pkgs(x, ...)
    +
    +# S3 method for model_fit
    +req_pkgs(x, ...)
    + +

    Arguments

    + + + + + + + + + + +
    x

    A model specification or fit.

    ...

    Not used.

    + +

    Value

    + +

    A character string of package names (if any).

    +

    Details

    + +

    For a model specification, the engine must be set.

    +

    The list does not include the parsnip package.

    + +

    Examples

    +
    should_fail <- try(req_pkgs(linear_reg()), silent = TRUE) +should_fail
    #> [1] "Error : Please set an engine.\n" +#> attr(,"class") +#> [1] "try-error" +#> attr(,"condition") +#> <error/rlang_error> +#> Please set an engine. +#> Backtrace: +#> 1. base::tryCatch(...) +#> 13. pkgdown::build_site(...) +#> 14. pkgdown:::build_site_local(...) +#> 15. pkgdown::build_reference(...) +#> 16. purrr::map(...) +#> 17. pkgdown:::.f(.x[[i]], ...) +#> 18. pkgdown:::data_reference_topic(...) +#> 19. pkgdown:::run_examples(...) +#> 20. pkgdown:::highlight_examples(code, topic, env = env) +#> 21. evaluate::evaluate(x, child_env(env), new_device = TRUE) +#> 22. evaluate:::evaluate_call(...) +#> 32. [ base::eval(...) ] with 1 more call +#> 40. parsnip:::req_pkgs.model_spec(linear_reg())
    +linear_reg() %>% + set_engine("glmnet") %>% + req_pkgs()
    #> [1] "glmnet"
    +linear_reg() %>% + set_engine("lm") %>% + fit(mpg ~ ., data = mtcars) %>% + req_pkgs()
    #> [1] "stats"
    +
    + +
    + + +
    +
    +

    parsnip is a part of the tidymodels ecosystem, a collection of modeling packages designed with common APIs and a shared philosophy.

    +
    + +
    +

    + Developed by Max Kuhn, Davis Vaughan. + Site built by pkgdown. +

    +
    + +
    +
    + + + + + + + + diff --git a/docs/dev/reference/set_new_model.html b/docs/dev/reference/set_new_model.html index 2b5b8af00..7c92f2cd3 100644 --- a/docs/dev/reference/set_new_model.html +++ b/docs/dev/reference/set_new_model.html @@ -169,7 +169,11 @@

    Tools to Register Models

    show_model_info(model) -pred_value_template(pre = NULL, post = NULL, func, ...) +pred_value_template(pre = NULL, post = NULL, func, ...) + +set_encoding(model, mode, eng, options) + +get_encoding(model)

    Arguments

    @@ -236,6 +240,14 @@

    Arg

    + + + + diff --git a/docs/dev/reference/surv_reg.html b/docs/dev/reference/surv_reg.html index 332f4aedc..53f56e486 100644 --- a/docs/dev/reference/surv_reg.html +++ b/docs/dev/reference/surv_reg.html @@ -229,26 +229,28 @@

    surv_reg() %>%
       set_engine("flexsurv") %>%
       set_mode("regression") %>%
    -  translate()

    ## Parametric Survival Regression Model Specification (regression)
    -## 
    -## Computational engine: flexsurv 
    -## 
    -## Model fit template:
    -## flexsurv::flexsurvreg(formula = missing_arg(), data = missing_arg(), 
    -##     weights = missing_arg())
    + translate()

    ## Parametric Survival Regression Model Specification (regression)
    +## 
    +## Computational engine: flexsurv 
    +## 
    +## Model fit template:
    +## flexsurv::flexsurvreg(formula = missing_arg(), data = missing_arg(), 
    +##     weights = missing_arg())
    +

    survival

    surv_reg() %>%
       set_engine("survival") %>%
       set_mode("regression") %>%
    -  translate()

    ## Parametric Survival Regression Model Specification (regression)
    -## 
    -## Computational engine: survival 
    -## 
    -## Model fit template:
    -## survival::survreg(formula = missing_arg(), data = missing_arg(), 
    -##     weights = missing_arg(), model = TRUE)
    + translate()

    ## Parametric Survival Regression Model Specification (regression)
    +## 
    +## Computational engine: survival 
    +## 
    +## Model fit template:
    +## survival::survreg(formula = missing_arg(), data = missing_arg(), 
    +##     weights = missing_arg(), model = TRUE)
    +

    Note that model = TRUE is needed to produce quantile predictions when there is a stratification variable and can be overridden in other cases.

    diff --git a/docs/dev/reference/svm_poly.html b/docs/dev/reference/svm_poly.html index 6d8d510eb..831614806 100644 --- a/docs/dev/reference/svm_poly.html +++ b/docs/dev/reference/svm_poly.html @@ -256,21 +256,23 @@

    svm_poly() %>%
       set_engine("kernlab") %>%
       set_mode("regression") %>%
    -  translate()

    ## Polynomial Support Vector Machine Specification (regression)
    -## 
    -## Computational engine: kernlab 
    -## 
    -## Model fit template:
    -## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "polydot")

    svm_poly() %>%
    +  translate()

    ## Polynomial Support Vector Machine Specification (regression)
    +## 
    +## Computational engine: kernlab 
    +## 
    +## Model fit template:
    +## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "polydot")
    +

    svm_poly() %>%
       set_engine("kernlab") %>%
       set_mode("classification") %>%
    -  translate()

    ## Polynomial Support Vector Machine Specification (classification)
    -## 
    -## Computational engine: kernlab 
    -## 
    -## Model fit template:
    -## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "polydot", 
    -##     prob.model = TRUE)
    + translate()

    ## Polynomial Support Vector Machine Specification (classification)
    +## 
    +## Computational engine: kernlab 
    +## 
    +## Model fit template:
    +## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "polydot", 
    +##     prob.model = TRUE)
    +

    Parameter translations

    diff --git a/docs/dev/reference/svm_rbf.html b/docs/dev/reference/svm_rbf.html index f0690ecb4..355ae7815 100644 --- a/docs/dev/reference/svm_rbf.html +++ b/docs/dev/reference/svm_rbf.html @@ -246,43 +246,47 @@

    svm_rbf() %>%
       set_engine("kernlab") %>%
       set_mode("regression") %>%
    -  translate()

    ## Radial Basis Function Support Vector Machine Specification (regression)
    -## 
    -## Computational engine: kernlab 
    -## 
    -## Model fit template:
    -## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "rbfdot")

    svm_rbf() %>%
    +  translate()

    ## Radial Basis Function Support Vector Machine Specification (regression)
    +## 
    +## Computational engine: kernlab 
    +## 
    +## Model fit template:
    +## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "rbfdot")
    +

    svm_rbf() %>%
       set_engine("kernlab") %>%
       set_mode("classification") %>%
    -  translate()

    ## Radial Basis Function Support Vector Machine Specification (classification)
    -## 
    -## Computational engine: kernlab 
    -## 
    -## Model fit template:
    -## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "rbfdot", 
    -##     prob.model = TRUE)
    + translate()

    ## Radial Basis Function Support Vector Machine Specification (classification)
    +## 
    +## Computational engine: kernlab 
    +## 
    +## Model fit template:
    +## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "rbfdot", 
    +##     prob.model = TRUE)
    +

    liquidSVM

    svm_rbf() %>%
       set_engine("liquidSVM") %>%
       set_mode("regression") %>%
    -  translate()

    ## Radial Basis Function Support Vector Machine Specification (regression)
    -## 
    -## Computational engine: liquidSVM 
    -## 
    -## Model fit template:
    -## liquidSVM::svm(x = missing_arg(), y = missing_arg(), folds = 1, 
    -##     threads = 0)

    svm_rbf() %>%
    +  translate()

    ## Radial Basis Function Support Vector Machine Specification (regression)
    +## 
    +## Computational engine: liquidSVM 
    +## 
    +## Model fit template:
    +## liquidSVM::svm(x = missing_arg(), y = missing_arg(), folds = 1, 
    +##     threads = 0)
    +

    svm_rbf() %>%
       set_engine("liquidSVM") %>%
       set_mode("classification") %>%
    -  translate()

    ## Radial Basis Function Support Vector Machine Specification (classification)
    -## 
    -## Computational engine: liquidSVM 
    -## 
    -## Model fit template:
    -## liquidSVM::svm(x = missing_arg(), y = missing_arg(), folds = 1, 
    -##     threads = 0)
    + translate()

    ## Radial Basis Function Support Vector Machine Specification (classification)
    +## 
    +## Computational engine: liquidSVM 
    +## 
    +## Model fit template:
    +## liquidSVM::svm(x = missing_arg(), y = missing_arg(), folds = 1, 
    +##     threads = 0)
    +

    Note that models created using the liquidSVM engine cannot be saved like conventional R objects. The fit slot of the model_fit object diff --git a/docs/dev/sitemap.xml b/docs/dev/sitemap.xml index 246afae93..72288e0eb 100644 --- a/docs/dev/sitemap.xml +++ b/docs/dev/sitemap.xml @@ -102,6 +102,9 @@ https://parsnip.tidymodels.org/reference/repair_call.html + + https://parsnip.tidymodels.org/reference/req_pkgs.html + https://parsnip.tidymodels.org/reference/rpart_train.html diff --git a/docs/reference/boost_tree.html b/docs/reference/boost_tree.html index cbe58e3e9..e8629b7f9 100644 --- a/docs/reference/boost_tree.html +++ b/docs/reference/boost_tree.html @@ -311,9 +311,9 @@

    boost_tree() %&gt;%
    -  set_engine("xgboost") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    boost_tree() %>%
    +  set_engine("xgboost") %>%
    +  set_mode("regression") %>%
       translate()

    ## Boosted Tree Model Specification (regression)
     ## 
     ## Computational engine: xgboost 
    @@ -321,9 +321,9 @@ 

    boost_tree() %&gt;%
    -  set_engine("xgboost") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    boost_tree() %>%
    +  set_engine("xgboost") %>%
    +  set_mode("classification") %>%
       translate()

    ## Boosted Tree Model Specification (classification)
     ## 
     ## Computational engine: xgboost 
    @@ -335,9 +335,9 @@ 

    boost_tree() %&gt;%
    -  set_engine("C5.0") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    boost_tree() %>%
    +  set_engine("C5.0") %>%
    +  set_mode("classification") %>%
       translate()

    ## Boosted Tree Model Specification (classification)
     ## 
     ## Computational engine: C5.0 
    @@ -350,9 +350,9 @@ 

    C5.0

    predictors to be converted to indicator variables.

    spark

    -

    boost_tree() %&gt;%
    -  set_engine("spark") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    boost_tree() %>%
    +  set_engine("spark") %>%
    +  set_mode("regression") %>%
       translate()

    ## Boosted Tree Model Specification (regression)
     ## 
     ## Computational engine: spark 
    @@ -360,9 +360,9 @@ 

    spark

    ## Model fit template: ## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(), ## type = "regression", seed = sample.int(10^5, 1)) -

    boost_tree() %&gt;%
    -  set_engine("spark") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    boost_tree() %>%
    +  set_engine("spark") %>%
    +  set_mode("classification") %>%
       translate()

    ## Boosted Tree Model Specification (classification)
     ## 
     ## Computational engine: spark 
    diff --git a/docs/reference/decision_tree.html b/docs/reference/decision_tree.html
    index 8777f5dd2..b952b2811 100644
    --- a/docs/reference/decision_tree.html
    +++ b/docs/reference/decision_tree.html
    @@ -267,18 +267,18 @@ 

    decision_tree() %&gt;%
    -  set_engine("rpart") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    decision_tree() %>%
    +  set_engine("rpart") %>%
    +  set_mode("regression") %>%
       translate()

    ## Decision Tree Model Specification (regression)
     ## 
     ## Computational engine: rpart 
     ## 
     ## Model fit template:
     ## rpart::rpart(formula = missing_arg(), data = missing_arg(), weights = missing_arg())
    -

    decision_tree() %&gt;%
    -  set_engine("rpart") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    decision_tree() %>%
    +  set_engine("rpart") %>%
    +  set_mode("classification") %>%
       translate()

    ## Decision Tree Model Specification (classification)
     ## 
     ## Computational engine: rpart 
    @@ -291,9 +291,9 @@ 

    decision_tree() %&gt;%
    -  set_engine("C5.0") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    decision_tree() %>%
    +  set_engine("C5.0") %>%
    +  set_mode("classification") %>%
       translate()

    ## Decision Tree Model Specification (classification)
     ## 
     ## Computational engine: C5.0 
    @@ -307,9 +307,9 @@ 

    C5.0

    predictors to be converted to indicator variables.

    spark

    -

    decision_tree() %&gt;%
    -  set_engine("spark") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    decision_tree() %>%
    +  set_engine("spark") %>%
    +  set_mode("regression") %>%
       translate()

    ## Decision Tree Model Specification (regression)
     ## 
     ## Computational engine: spark 
    @@ -317,9 +317,9 @@ 

    spark

    ## Model fit template: ## sparklyr::ml_decision_tree_classifier(x = missing_arg(), formula = missing_arg(), ## seed = sample.int(10^5, 1)) -

    decision_tree() %&gt;%
    -  set_engine("spark") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    decision_tree() %>%
    +  set_engine("spark") %>%
    +  set_mode("classification") %>%
       translate()

    ## Decision Tree Model Specification (classification)
     ## 
     ## Computational engine: spark 
    diff --git a/docs/reference/linear_reg.html b/docs/reference/linear_reg.html
    index fa38696b9..9e59e11fe 100644
    --- a/docs/reference/linear_reg.html
    +++ b/docs/reference/linear_reg.html
    @@ -257,9 +257,9 @@ 

    linear_reg() %&gt;%
    -  set_engine("lm") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    linear_reg() %>%
    +  set_engine("lm") %>%
    +  set_mode("regression") %>%
       translate()

    ## Linear Regression Model Specification (regression)
     ## 
     ## Computational engine: lm 
    @@ -270,9 +270,9 @@ 

    linear_reg() %&gt;%
    -  set_engine("glmnet") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    linear_reg() %>%
    +  set_engine("glmnet") %>%
    +  set_mode("regression") %>%
       translate()

    ## Linear Regression Model Specification (regression)
     ## 
     ## Computational engine: glmnet 
    @@ -293,9 +293,9 @@ 

    glmnet

    results.

    stan

    -

    linear_reg() %&gt;%
    -  set_engine("stan") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    linear_reg() %>%
    +  set_engine("stan") %>%
    +  set_mode("regression") %>%
       translate()

    ## Linear Regression Model Specification (regression)
     ## 
     ## Computational engine: stan 
    @@ -314,9 +314,9 @@ 

    stan

    predictive distribution as appropriate) is returned.

    spark

    -

    linear_reg() %&gt;%
    -  set_engine("spark") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    linear_reg() %>%
    +  set_engine("spark") %>%
    +  set_mode("regression") %>%
       translate()

    ## Linear Regression Model Specification (regression)
     ## 
     ## Computational engine: spark 
    @@ -328,9 +328,9 @@ 

    spark

    keras

    -

    linear_reg() %&gt;%
    -  set_engine("keras") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    linear_reg() %>%
    +  set_engine("keras") %>%
    +  set_mode("regression") %>%
       translate()

    ## Linear Regression Model Specification (regression)
     ## 
     ## Computational engine: keras 
    diff --git a/docs/reference/logistic_reg.html b/docs/reference/logistic_reg.html
    index 825f21a16..4fd705671 100644
    --- a/docs/reference/logistic_reg.html
    +++ b/docs/reference/logistic_reg.html
    @@ -255,9 +255,9 @@ 

    logistic_reg() %&gt;%
    -  set_engine("glm") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    logistic_reg() %>%
    +  set_engine("glm") %>%
    +  set_mode("classification") %>%
       translate()

    ## Logistic Regression Model Specification (classification)
     ## 
     ## Computational engine: glm 
    @@ -269,9 +269,9 @@ 

    logistic_reg() %&gt;%
    -  set_engine("glmnet") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    logistic_reg() %>%
    +  set_engine("glmnet") %>%
    +  set_mode("classification") %>%
       translate()

    ## Logistic Regression Model Specification (classification)
     ## 
     ## Computational engine: glmnet 
    @@ -292,9 +292,9 @@ 

    glmnet

    results.

    stan

    -

    logistic_reg() %&gt;%
    -  set_engine("stan") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    logistic_reg() %>%
    +  set_engine("stan") %>%
    +  set_mode("classification") %>%
       translate()

    ## Logistic Regression Model Specification (classification)
     ## 
     ## Computational engine: stan 
    @@ -313,9 +313,9 @@ 

    stan

    predictive distribution as appropriate) is returned.

    spark

    -

    logistic_reg() %&gt;%
    -  set_engine("spark") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    logistic_reg() %>%
    +  set_engine("spark") %>%
    +  set_mode("classification") %>%
       translate()

    ## Logistic Regression Model Specification (classification)
     ## 
     ## Computational engine: spark 
    @@ -327,9 +327,9 @@ 

    spark

    keras

    -

    logistic_reg() %&gt;%
    -  set_engine("keras") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    logistic_reg() %>%
    +  set_engine("keras") %>%
    +  set_mode("classification") %>%
       translate()

    ## Logistic Regression Model Specification (classification)
     ## 
     ## Computational engine: keras 
    diff --git a/docs/reference/mars.html b/docs/reference/mars.html
    index f23769579..b684871ee 100644
    --- a/docs/reference/mars.html
    +++ b/docs/reference/mars.html
    @@ -248,9 +248,9 @@ 

    mars() %&gt;%
    -  set_engine("earth") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    mars() %>%
    +  set_engine("earth") %>%
    +  set_mode("regression") %>%
       translate()

    ## MARS Model Specification (regression)
     ## 
     ## Computational engine: earth 
    @@ -258,9 +258,9 @@ 

    mars() %&gt;%
    -  set_engine("earth") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    mars() %>%
    +  set_engine("earth") %>%
    +  set_mode("classification") %>%
       translate()

    ## MARS Model Specification (classification)
     ## 
     ## Engine-Specific Arguments:
    diff --git a/docs/reference/mlp.html b/docs/reference/mlp.html
    index eaea05f85..0af81c4c1 100644
    --- a/docs/reference/mlp.html
    +++ b/docs/reference/mlp.html
    @@ -276,18 +276,18 @@ 

    mlp() %&gt;%
    -  set_engine("keras") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    mlp() %>%
    +  set_engine("keras") %>%
    +  set_mode("regression") %>%
       translate()

    ## Single Layer Neural Network Specification (regression)
     ## 
     ## Computational engine: keras 
     ## 
     ## Model fit template:
     ## parsnip::keras_mlp(x = missing_arg(), y = missing_arg())
    -

    mlp() %&gt;%
    -  set_engine("keras") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    mlp() %>%
    +  set_engine("keras") %>%
    +  set_mode("classification") %>%
       translate()

    ## Single Layer Neural Network Specification (classification)
     ## 
     ## Computational engine: keras 
    @@ -300,9 +300,9 @@ 

    mlp() %&gt;%
    -  set_engine("nnet") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    mlp() %>%
    +  set_engine("nnet") %>%
    +  set_mode("regression") %>%
       translate()

    ## Single Layer Neural Network Specification (regression)
     ## 
     ## Main Arguments:
    @@ -313,9 +313,9 @@ 

    nnet

    ## Model fit template: ## nnet::nnet(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), ## size = 5, trace = FALSE, linout = TRUE) -

    mlp() %&gt;%
    -  set_engine("nnet") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    mlp() %>%
    +  set_engine("nnet") %>%
    +  set_mode("classification") %>%
       translate()

    ## Single Layer Neural Network Specification (classification)
     ## 
     ## Main Arguments:
    diff --git a/docs/reference/multinom_reg.html b/docs/reference/multinom_reg.html
    index cbe9ed65c..b35bf4ab1 100644
    --- a/docs/reference/multinom_reg.html
    +++ b/docs/reference/multinom_reg.html
    @@ -254,9 +254,9 @@ 

    multinom_reg() %&gt;%
    -  set_engine("glmnet") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    multinom_reg() %>%
    +  set_engine("glmnet") %>%
    +  set_mode("classification") %>%
       translate()

    ## Multinomial Regression Model Specification (classification)
     ## 
     ## Computational engine: glmnet 
    @@ -277,9 +277,9 @@ 

    multinom_reg() %&gt;%
    -  set_engine("nnet") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    multinom_reg() %>%
    +  set_engine("nnet") %>%
    +  set_mode("classification") %>%
       translate()

    ## Multinomial Regression Model Specification (classification)
     ## 
     ## Computational engine: nnet 
    @@ -291,9 +291,9 @@ 

    nnet

    spark

    -

    multinom_reg() %&gt;%
    -  set_engine("spark") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    multinom_reg() %>%
    +  set_engine("spark") %>%
    +  set_mode("classification") %>%
       translate()

    ## Multinomial Regression Model Specification (classification)
     ## 
     ## Computational engine: spark 
    @@ -305,9 +305,9 @@ 

    spark

    keras

    -

    multinom_reg() %&gt;%
    -  set_engine("keras") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    multinom_reg() %>%
    +  set_engine("keras") %>%
    +  set_mode("classification") %>%
       translate()

    ## Multinomial Regression Model Specification (classification)
     ## 
     ## Computational engine: keras 
    diff --git a/docs/reference/nearest_neighbor.html b/docs/reference/nearest_neighbor.html
    index a4c350468..b67dcfc13 100644
    --- a/docs/reference/nearest_neighbor.html
    +++ b/docs/reference/nearest_neighbor.html
    @@ -222,9 +222,9 @@ 

    nearest_neighbor() %&gt;%
    -  set_engine("kknn") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    nearest_neighbor() %>%
    +  set_engine("kknn") %>%
    +  set_mode("regression") %>%
       translate()

    ## K-Nearest Neighbor Model Specification (regression)
     ## 
     ## Computational engine: kknn 
    @@ -232,9 +232,9 @@ 

    nearest_neighbor() %&gt;%
    -  set_engine("kknn") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    nearest_neighbor() %>%
    +  set_engine("kknn") %>%
    +  set_mode("classification") %>%
       translate()

    ## K-Nearest Neighbor Model Specification (classification)
     ## 
     ## Computational engine: kknn 
    diff --git a/docs/reference/null_model.html b/docs/reference/null_model.html
    index 4be28b6e1..2b7a457af 100644
    --- a/docs/reference/null_model.html
    +++ b/docs/reference/null_model.html
    @@ -171,18 +171,18 @@ 

    null_model() %&gt;%
    -  set_engine("parsnip") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    null_model() %>%
    +  set_engine("parsnip") %>%
    +  set_mode("regression") %>%
       translate()

    ## Model Specification (regression)
     ## 
     ## Computational engine: parsnip 
     ## 
     ## Model fit template:
     ## nullmodel(x = missing_arg(), y = missing_arg())
    -

    null_model() %&gt;%
    -  set_engine("parsnip") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    null_model() %>%
    +  set_engine("parsnip") %>%
    +  set_mode("classification") %>%
       translate()

    ## Model Specification (classification)
     ## 
     ## Computational engine: parsnip 
    diff --git a/docs/reference/rand_forest.html b/docs/reference/rand_forest.html
    index d4a186176..884042d48 100644
    --- a/docs/reference/rand_forest.html
    +++ b/docs/reference/rand_forest.html
    @@ -256,9 +256,9 @@ 

    rand_forest() %&gt;%
    -  set_engine("ranger") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    rand_forest() %>%
    +  set_engine("ranger") %>%
    +  set_mode("regression") %>%
       translate()

    ## Random Forest Model Specification (regression)
     ## 
     ## Computational engine: ranger 
    @@ -267,9 +267,9 @@ 

    rand_forest() %&gt;%
    -  set_engine("ranger") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    rand_forest() %>%
    +  set_engine("ranger") %>%
    +  set_mode("classification") %>%
       translate()

    ## Random Forest Model Specification (classification)
     ## 
     ## Computational engine: ranger 
    @@ -288,18 +288,18 @@ 

    rand_forest() %&gt;%
    -  set_engine("randomForest") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    rand_forest() %>%
    +  set_engine("randomForest") %>%
    +  set_mode("regression") %>%
       translate()

    ## Random Forest Model Specification (regression)
     ## 
     ## Computational engine: randomForest 
     ## 
     ## Model fit template:
     ## randomForest::randomForest(x = missing_arg(), y = missing_arg())
    -

    rand_forest() %&gt;%
    -  set_engine("randomForest") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    rand_forest() %>%
    +  set_engine("randomForest") %>%
    +  set_mode("classification") %>%
       translate()

    ## Random Forest Model Specification (classification)
     ## 
     ## Computational engine: randomForest 
    @@ -313,9 +313,9 @@ 

    randomForest

    not require factor predictors to be converted to indicator variables.

    spark

    -

    rand_forest() %&gt;%
    -  set_engine("spark") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    rand_forest() %>%
    +  set_engine("spark") %>%
    +  set_mode("regression") %>%
       translate()

    ## Random Forest Model Specification (regression)
     ## 
     ## Computational engine: spark 
    @@ -323,9 +323,9 @@ 

    spark

    ## Model fit template: ## sparklyr::ml_random_forest(x = missing_arg(), formula = missing_arg(), ## type = "regression", seed = sample.int(10^5, 1)) -

    rand_forest() %&gt;%
    -  set_engine("spark") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    rand_forest() %>%
    +  set_engine("spark") %>%
    +  set_mode("classification") %>%
       translate()

    ## Random Forest Model Specification (classification)
     ## 
     ## Computational engine: spark 
    diff --git a/docs/reference/reexports.html b/docs/reference/reexports.html
    index b1041fb84..f4e18803c 100644
    --- a/docs/reference/reexports.html
    +++ b/docs/reference/reexports.html
    @@ -47,7 +47,7 @@
     
       genericsfit, fit_xy, tidy, varying_args
     
    -  magrittr%&gt;%
    +  magrittr%>%
     
     " />
     
    diff --git a/docs/reference/surv_reg.html b/docs/reference/surv_reg.html
    index 1912b698a..65040046b 100644
    --- a/docs/reference/surv_reg.html
    +++ b/docs/reference/surv_reg.html
    @@ -226,9 +226,9 @@ 

    surv_reg() %&gt;%
    -  set_engine("flexsurv") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    surv_reg() %>%
    +  set_engine("flexsurv") %>%
    +  set_mode("regression") %>%
       translate()

    ## Parametric Survival Regression Model Specification (regression)
     ## 
     ## Computational engine: flexsurv 
    @@ -240,9 +240,9 @@ 

    surv_reg() %&gt;%
    -  set_engine("survival") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    surv_reg() %>%
    +  set_engine("survival") %>%
    +  set_mode("regression") %>%
       translate()

    ## Parametric Survival Regression Model Specification (regression)
     ## 
     ## Computational engine: survival 
    diff --git a/docs/reference/svm_poly.html b/docs/reference/svm_poly.html
    index ca08b9603..110492025 100644
    --- a/docs/reference/svm_poly.html
    +++ b/docs/reference/svm_poly.html
    @@ -253,18 +253,18 @@ 

    svm_poly() %&gt;%
    -  set_engine("kernlab") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    svm_poly() %>%
    +  set_engine("kernlab") %>%
    +  set_mode("regression") %>%
       translate()

    ## Polynomial Support Vector Machine Specification (regression)
     ## 
     ## Computational engine: kernlab 
     ## 
     ## Model fit template:
     ## kernlab::ksvm(x = missing_arg(), y = missing_arg(), kernel = "polydot")
    -

    svm_poly() %&gt;%
    -  set_engine("kernlab") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    svm_poly() %>%
    +  set_engine("kernlab") %>%
    +  set_mode("classification") %>%
       translate()

    ## Polynomial Support Vector Machine Specification (classification)
     ## 
     ## Computational engine: kernlab 
    diff --git a/docs/reference/svm_rbf.html b/docs/reference/svm_rbf.html
    index cb0c5f809..3c108fe5c 100644
    --- a/docs/reference/svm_rbf.html
    +++ b/docs/reference/svm_rbf.html
    @@ -243,18 +243,18 @@ 

    svm_rbf() %&gt;%
    -  set_engine("kernlab") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    svm_rbf() %>%
    +  set_engine("kernlab") %>%
    +  set_mode("regression") %>%
       translate()

    ## Radial Basis Function Support Vector Machine Specification (regression)
     ## 
     ## Computational engine: kernlab 
     ## 
     ## Model fit template:
     ## kernlab::ksvm(x = missing_arg(), y = missing_arg(), kernel = "rbfdot")
    -

    svm_rbf() %&gt;%
    -  set_engine("kernlab") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    svm_rbf() %>%
    +  set_engine("kernlab") %>%
    +  set_mode("classification") %>%
       translate()

    ## Radial Basis Function Support Vector Machine Specification (classification)
     ## 
     ## Computational engine: kernlab 
    @@ -266,9 +266,9 @@ 

    svm_rbf() %&gt;%
    -  set_engine("liquidSVM") %&gt;%
    -  set_mode("regression") %&gt;%
    +

    svm_rbf() %>%
    +  set_engine("liquidSVM") %>%
    +  set_mode("regression") %>%
       translate()

    ## Radial Basis Function Support Vector Machine Specification (regression)
     ## 
     ## Computational engine: liquidSVM 
    @@ -276,9 +276,9 @@ 

    liquidSVM

    ## Model fit template: ## liquidSVM::svm(x = missing_arg(), y = missing_arg(), folds = 1, ## threads = 0) -

    svm_rbf() %&gt;%
    -  set_engine("liquidSVM") %&gt;%
    -  set_mode("classification") %&gt;%
    +

    svm_rbf() %>%
    +  set_engine("liquidSVM") %>%
    +  set_mode("classification") %>%
       translate()

    ## Radial Basis Function Support Vector Machine Specification (classification)
     ## 
     ## Computational engine: liquidSVM 
    diff --git a/man/req_pkgs.Rd b/man/req_pkgs.Rd
    new file mode 100644
    index 000000000..790da1087
    --- /dev/null
    +++ b/man/req_pkgs.Rd
    @@ -0,0 +1,43 @@
    +% Generated by roxygen2: do not edit by hand
    +% Please edit documentation in R/req_pkgs.R
    +\name{req_pkgs}
    +\alias{req_pkgs}
    +\alias{req_pkgs.model_spec}
    +\alias{req_pkgs.model_fit}
    +\title{Determine required packages for a model}
    +\usage{
    +req_pkgs(x, ...)
    +
    +\method{req_pkgs}{model_spec}(x, ...)
    +
    +\method{req_pkgs}{model_fit}(x, ...)
    +}
    +\arguments{
    +\item{x}{A model specification or fit.}
    +
    +\item{...}{Not used.}
    +}
    +\value{
    +A character string of package names (if any).
    +}
    +\description{
    +Determine required packages for a model
    +}
    +\details{
    +For a model specification, the engine must be set.
    +
    +The list does not include the \code{parsnip} package.
    +}
    +\examples{
    +should_fail <- try(req_pkgs(linear_reg()), silent = TRUE)
    +should_fail
    +
    +linear_reg() \%>\%
    +  set_engine("glmnet") \%>\%
    +  req_pkgs()
    +
    +linear_reg() \%>\%
    +  set_engine("lm") \%>\%
    +  fit(mpg ~ ., data = mtcars) \%>\%
    +  req_pkgs()
    +}
    diff --git a/tests/testthat/mars_model.RData b/tests/testthat/mars_model.RData
    new file mode 100644
    index 000000000..6a534acc1
    Binary files /dev/null and b/tests/testthat/mars_model.RData differ
    diff --git a/tests/testthat/test_packages.R b/tests/testthat/test_packages.R
    new file mode 100644
    index 000000000..d4e951b78
    --- /dev/null
    +++ b/tests/testthat/test_packages.R
    @@ -0,0 +1,41 @@
    +
    +context("checking for packages")
    +load(test_path("mars_model.RData"))
    +
    +# ------------------------------------------------------------------------------
    +
    +test_that('required packages', {
    +
    +  expect_error(req_pkgs(linear_reg()), "Please set an engine")
    +
    +  glmn <-
    +    linear_reg() %>%
    +    set_engine("glmnet") %>%
    +    req_pkgs()
    +  expect_equal(glmn, "glmnet")
    +
    +  lm_fit <-
    +    linear_reg() %>%
    +    set_engine("lm") %>%
    +    fit(mpg ~ ., data = mtcars) %>%
    +    req_pkgs()
    +  expect_equal(lm_fit, "stats")
    +})
    +
    +# ------------------------------------------------------------------------------
    +
    +test_that('missing packages', {
    +  has_earth <- parsnip:::is_installed("earth")
    +
    +  if (has_earth) {
    +    expect_error(predict(mars_model, mtcars[1:3, -1]), regexp = NA)
    +
    +  } else {
    +    expect_error(predict(mars_model, mtcars[1:3, -1]), regexp = "earth")
    +    expect_true(any(names(sessionInfo()$loadedOnly) == "earth"))
    +  }
    +  mars_model$spec$method$libs <- "rootveggie"
    +  expect_error(predict(mars_model, mtcars[1:3, -1]), regexp = "rootveggie")
    +
    +})
    +
    

    ...

    Optional arguments that should be passed into the args slot for prediction objects.

    options

    A list of options for engine-specific encodings. Currently, +the option implemented is predictor_indicators which tells parsnip +whether the pre-processing should make indicator/dummy variables from factor +predictors. This only affects cases when fit.model_spec() is used and the +underlying model has an x/y interface.

    arg