From 884cc7a7399287a56b30ef5ccd91ca2612081591 Mon Sep 17 00:00:00 2001 From: "Simon P. Couch" Date: Fri, 1 Mar 2024 09:37:30 -0600 Subject: [PATCH] require optional arguments to be named (#863) * move dots in `augment.tune_results()` * move dots in `collect_*()` functions * move dots in `compute_metrics()` * move dots in developer-focused functions * move dots in `autoplot.tune_results()` * add dots to relevant developer-focused functions * move dots in `*_best()` * add dots to `conf_mat_resampled()` * check dots are empty in functions that newly have them * move dots in `first_eval_time()` * check existing but newly moved dots are empty * move dots in `fit_best()` * revert "move dots in `autoplot.tune_results()`" Those dots are actually passed on to internal functions and aren't just to enforce naming arguments. * add NEWS entry * correct ref, note exception * re`document()` * name `collect_predictions()` argument * write out ad-hoc dots check --- NEWS.md | 5 +- R/augment.R | 45 ++++------------ R/checks.R | 4 +- R/collect.R | 18 +++++-- R/compute_metrics.R | 5 +- R/conf_mat_resampled.R | 4 +- R/fit_best.R | 10 ++-- R/load_ns.R | 9 ++-- R/metric-selection.R | 15 +++--- R/select_best.R | 16 +++--- R/tune_bayes.R | 5 +- man/augment.tune_results.Rd | 6 +-- man/choose_metric.Rd | 9 ++-- man/collect_predictions.Rd | 10 ++-- man/compute_metrics.Rd | 2 +- man/conf_mat_resampled.Rd | 4 +- man/empty_ellipses.Rd | 10 ++-- man/fit_best.Rd | 6 +-- man/load_pkgs.Rd | 2 +- man/show_best.Rd | 4 +- tests/testthat/_snaps/augment.md | 12 +++-- tests/testthat/_snaps/collect.md | 52 ------------------- tests/testthat/_snaps/conf-mat-resampled.md | 9 ++-- .../_snaps/eval-time-single-selection.md | 4 +- tests/testthat/_snaps/fit_best.md | 4 +- tests/testthat/test-collect.R | 2 +- tests/testthat/test-conf-mat-resampled.R | 8 +-- .../test-eval-time-single-selection.R | 8 +-- 28 files changed, 123 insertions(+), 165 deletions(-) diff --git a/NEWS.md b/NEWS.md index 6c89dd7d8..ca8c00302 100644 --- a/NEWS.md +++ b/NEWS.md @@ -45,10 +45,11 @@ * For iterative optimization routines, `autoplot()` will use integer breaks when `type = "performance"` or `type = "parameters"`. -## Breaking Change +## Breaking Changes -* Several functions gain an `eval_time` argument for the evaluation time of dynamic metrics for censored regression. The placement of the argument breaks passing-by-position for one or more other arguments to `fit_best.tune_results()`, `show_best.tune_results()`, and the developer-focused `check_initial()` (#857). +* Several functions gained an `eval_time` argument for the evaluation time of dynamic metrics for censored regression. The placement of the argument breaks passing-by-position for one or more other arguments to `autoplot.tune_results()` and the developer-focused `check_initial()` (#857). +* Ellipses (...) are now used consistently in the package to require optional arguments to be named. For functions that previously had ellipses at the end of the function signature, they have been moved to follow the last argument without a default value: this applies to `augment.tune_results()`, `collect_predictions.tune_results()`, `collect_metrics.tune_results()`, and the developer-focused `estimate_tune_results()`, `load_pkgs()`, and `encode_set()`. Several other functions that previously did not have ellipses in their signatures gained them: this applies to `conf_mat_resampled()` and the developer-focused `check_workflow()`. Optional arguments previously passed by position will now error informatively prompting them to be named. These changes don't apply in cases when the ellipses are currently in use to forward arguments to other functions (#863). # tune 1.1.2 diff --git a/R/augment.R b/R/augment.R index 8c1bebdcf..2ca56fc04 100644 --- a/R/augment.R +++ b/R/augment.R @@ -7,12 +7,12 @@ #' @param x An object resulting from one of the `tune_*()` functions, #' `fit_resamples()`, or `last_fit()`. The control specifications for these #' objects should have used the option `save_pred = TRUE`. +#' @param ... Not currently used. #' @param parameters A data frame with a single row that indicates what #' tuning parameters should be used to generate the predictions (for `tune_*()` -#' objects only). If `NULL`, `select_best(x)` will be used with the first -#' metric and, if applicable, the first evaluation time point, used to +#' objects only). If `NULL`, `select_best(x)` will be used with the first +#' metric and, if applicable, the first evaluation time point, used to #' create `x`. -#' @param ... Not currently used. #' @return A data frame with one or more additional columns for model #' predictions. #' @@ -34,24 +34,15 @@ #' results. #' #' @export -augment.tune_results <- function(x, parameters = NULL, ...) { - dots <- rlang::list2(...) - if (length(dots) > 0) { - rlang::abort( - paste( - "The only two arguments for `augment.tune_results()` are", - "'x' and 'parameters'. Others were passed:", - paste0("'", names(dots), "'", collapse = ", ") - ) - ) - } +augment.tune_results <- function(x, ..., parameters = NULL) { + rlang::check_dots_empty() # check/determine best settings if (is.null(parameters)) { obj_fun <- .get_tune_metric_names(x)[1] obj_eval_time <- choose_eval_time( - x, - metric = obj_fun, + x, + metric = obj_fun, eval_time = NULL, quietly = TRUE ) @@ -70,16 +61,7 @@ augment.tune_results <- function(x, parameters = NULL, ...) { #' @rdname augment.tune_results #' @export augment.resample_results <- function(x, ...) { - dots <- rlang::list2(...) - if (length(dots) > 0) { - rlang::abort( - paste( - "The only argument for `augment.fit_resamples()` is", - "'x'. Others were passed:", - paste0("'", names(dots), "'", collapse = ", ") - ) - ) - } + rlang::check_dots_empty() pred <- collect_predictions(x, summarize = TRUE) y_nm <- .get_tune_outcome_names(x) @@ -91,16 +73,7 @@ augment.resample_results <- function(x, ...) { #' @rdname augment.tune_results #' @export augment.last_fit <- function(x, ...) { - dots <- rlang::list2(...) - if (length(dots) > 0) { - rlang::abort( - paste( - "The only argument for `augment.last_fit()` is", - "'x'. Others were passed:", - paste0("'", names(dots), "'", collapse = ", ") - ) - ) - } + rlang::check_dots_empty() pred <- collect_predictions(x, summarize = TRUE) pred$.row <- 1:nrow(pred) diff --git a/R/checks.R b/R/checks.R index a72175226..4d3d6ebeb 100644 --- a/R/checks.R +++ b/R/checks.R @@ -275,7 +275,7 @@ check_param_objects <- function(pset) { #' @keywords internal #' @rdname empty_ellipses #' @param check_dials A logical for check for a NULL parameter object. -check_workflow <- function(x, pset = NULL, check_dials = FALSE, call = caller_env()) { +check_workflow <- function(x, ..., pset = NULL, check_dials = FALSE, call = caller_env()) { if (!inherits(x, "workflow")) { rlang::abort("The `object` argument should be a 'workflow' object.") } @@ -288,6 +288,8 @@ check_workflow <- function(x, pset = NULL, check_dials = FALSE, call = caller_en rlang::abort("A parsnip model is required.") } + rlang::check_dots_empty(call = call) + if (check_dials) { if (is.null(pset)) { pset <- hardhat::extract_parameter_set_dials(x) diff --git a/R/collect.R b/R/collect.R index 30f7f8b4e..d17ae8513 100644 --- a/R/collect.R +++ b/R/collect.R @@ -3,6 +3,7 @@ #' @param x The results of [tune_grid()], [tune_bayes()], [fit_resamples()], #' or [last_fit()]. For [collect_predictions()], the control option `save_pred #' = TRUE` should have been used. +#' @param ... Not currently used. #' @param summarize A logical; should metrics be summarized over resamples #' (`TRUE`) or return the values for each individual resample. Note that, if `x` #' is created by [last_fit()], `summarize` has no effect. For the other object @@ -17,7 +18,6 @@ #' each metric has its own column and the `n` and `std_err` columns are removed, #' if they exist. #' -#' @param ... Not currently used. #' @return A tibble. The column names depend on the results and the mode of the #' model. #' @@ -120,7 +120,11 @@ #' #' collect_predictions(resampled) %>% arrange(.row) #' collect_predictions(resampled, summarize = TRUE) %>% arrange(.row) -#' collect_predictions(resampled, summarize = TRUE, grid[1, ]) %>% arrange(.row) +#' collect_predictions( +#' resampled, +#' summarize = TRUE, +#' parameters = grid[1, ] +#' ) %>% arrange(.row) #' #' collect_extracts(resampled) #' @@ -139,7 +143,9 @@ collect_predictions.default <- function(x, ...) { #' @export #' @rdname collect_predictions -collect_predictions.tune_results <- function(x, summarize = FALSE, parameters = NULL, ...) { +collect_predictions.tune_results <- function(x, ..., summarize = FALSE, parameters = NULL) { + rlang::check_dots_empty() + names <- colnames(x) coll_col <- ".predictions" @@ -454,7 +460,8 @@ collect_metrics.default <- function(x, ...) { #' @export #' @rdname collect_predictions -collect_metrics.tune_results <- function(x, summarize = TRUE, type = c("long", "wide"), ...) { +collect_metrics.tune_results <- function(x, ..., summarize = TRUE, type = c("long", "wide")) { + rlang::check_dots_empty() rlang::arg_match0(type, values = c("long", "wide")) if (inherits(x, "last_fit")) { @@ -551,7 +558,8 @@ collector <- function(x, coll_col = ".predictions") { #' @export #' @keywords internal #' @rdname empty_ellipses -estimate_tune_results <- function(x, col_name = ".metrics", ...) { +estimate_tune_results <- function(x, ..., col_name = ".metrics") { + rlang::check_dots_empty() param_names <- .get_tune_parameter_names(x) id_names <- grep("^id", names(x), value = TRUE) group_cols <- .get_extra_col_names(x) diff --git a/R/compute_metrics.R b/R/compute_metrics.R index bd434b8b3..64d105d3e 100644 --- a/R/compute_metrics.R +++ b/R/compute_metrics.R @@ -83,9 +83,10 @@ compute_metrics.default <- function(x, #' @rdname compute_metrics compute_metrics.tune_results <- function(x, metrics, + ..., summarize = TRUE, - event_level = "first", - ...) { + event_level = "first") { + rlang::check_dots_empty() if (!".predictions" %in% names(x)) { rlang::abort(paste0( "`x` must have been generated with the ", diff --git a/R/conf_mat_resampled.R b/R/conf_mat_resampled.R index bc8780fac..eb563e324 100644 --- a/R/conf_mat_resampled.R +++ b/R/conf_mat_resampled.R @@ -5,6 +5,7 @@ #' #' @param x An object with class `tune_results` that was used with a #' classification model that was run with `control_*(save_pred = TRUE)`. +#' @param ... Currently unused, must be empty. #' @param parameters A tibble with a single tuning parameter combination. Only #' one tuning parameter combination (if any were used) is allowed here. #' @param tidy Should the results come back in a tibble (`TRUE`) or a `conf_mat` @@ -30,7 +31,8 @@ #' conf_mat_resampled(res) #' conf_mat_resampled(res, tidy = FALSE) #' @export -conf_mat_resampled <- function(x, parameters = NULL, tidy = TRUE) { +conf_mat_resampled <- function(x, ..., parameters = NULL, tidy = TRUE) { + rlang::check_dots_empty() if (!inherits(x, "tune_results")) { rlang::abort( "The first argument needs to be an object with class 'tune_results'." diff --git a/R/fit_best.R b/R/fit_best.R index d22be22eb..370470819 100644 --- a/R/fit_best.R +++ b/R/fit_best.R @@ -6,6 +6,7 @@ #' @param x The results of class `tune_results` (coming from functions such as #' [tune_grid()], [tune_bayes()], etc). The control option #' [`save_workflow = TRUE`][tune::control_grid] should have been used. +#' @param ... Not currently used, must be empty. #' @param metric A character string (or `NULL`) for which metric to optimize. If #' `NULL`, the first metric is used. #' @param parameters An optional 1-row tibble of tuning parameter settings, with @@ -22,7 +23,6 @@ #' `NULL`, the validation set is not used for resamples originating from #' [rsample::validation_set()] while it is used for resamples originating #' from [rsample::validation_split()]. -#' @param ... Not currently used. #' @inheritParams select_best #' @details #' This function is a shortcut for the manual steps of: @@ -88,15 +88,13 @@ fit_best.default <- function(x, ...) { #' @export #' @rdname fit_best fit_best.tune_results <- function(x, + ..., metric = NULL, eval_time = NULL, parameters = NULL, verbose = FALSE, - add_validation_set = NULL, - ...) { - if (length(list(...))) { - cli::cli_abort(c("x" = "The `...` are not used by this function.")) - } + add_validation_set = NULL) { + rlang::check_dots_empty() wflow <- .get_tune_workflow(x) if (is.null(wflow)) { cli::cli_abort(c("x" = "The control option `save_workflow = TRUE` should be used when tuning.")) diff --git a/R/load_ns.R b/R/load_ns.R index ec181a624..23d2cb738 100644 --- a/R/load_ns.R +++ b/R/load_ns.R @@ -8,17 +8,19 @@ #' @return An invisible NULL. #' @keywords internal #' @export -load_pkgs <- function(x, infra = TRUE, ...) { +load_pkgs <- function(x, ..., infra = TRUE) { UseMethod("load_pkgs") } #' @export load_pkgs.character <- function(x, ...) { + rlang::check_dots_empty() withr::with_preserve_seed(.load_namespace(x)) } #' @export -load_pkgs.model_spec <- function(x, infra = TRUE, ...) { +load_pkgs.model_spec <- function(x, ..., infra = TRUE) { + rlang::check_dots_empty() pkgs <- required_pkgs(x) if (infra) { pkgs <- c(infra_pkgs, pkgs) @@ -27,7 +29,8 @@ load_pkgs.model_spec <- function(x, infra = TRUE, ...) { } #' @export -load_pkgs.workflow <- function(x, infra = TRUE, ...) { +load_pkgs.workflow <- function(x, ..., infra = TRUE) { + rlang::check_dots_empty() load_pkgs.model_spec(extract_spec_parsnip(x), infra = infra) } diff --git a/R/metric-selection.R b/R/metric-selection.R index 503cc4a65..09a54b297 100644 --- a/R/metric-selection.R +++ b/R/metric-selection.R @@ -68,7 +68,8 @@ check_mult_metrics <- function(metric, ..., call = rlang::caller_env()) { #' @rdname choose_metric #' @export -check_metric_in_tune_results <- function(mtr_info, metric, call = rlang::caller_env()) { +check_metric_in_tune_results <- function(mtr_info, metric, ..., call = rlang::caller_env()) { + rlang::check_dots_empty(call = call) if (!any(mtr_info$metric == metric)) { cli::cli_abort("{.val {metric}} was not in the metric set. Please choose from: {.val {mtr_info$metric}}.", call = call) @@ -97,8 +98,8 @@ contains_survival_metric <- function(mtr_info) { # choose_eval_time() is called by show_best(), select_best(), and augment() #' @rdname choose_metric #' @export -choose_eval_time <- function(x, metric, eval_time = NULL, quietly = FALSE, call = rlang::caller_env()) { - +choose_eval_time <- function(x, metric, ..., eval_time = NULL, quietly = FALSE, call = rlang::caller_env()) { + rlang::check_dots_empty(call = call) mtr_set <- .get_tune_metrics(x) mtr_info <- tibble::as_tibble(mtr_set) @@ -183,7 +184,7 @@ first_metric <- function(mtr_set) { # such as tune_bayes(). #' @rdname choose_metric #' @export -first_eval_time <- function(mtr_set, metric = NULL, eval_time = NULL, ..., quietly = FALSE, call = rlang::caller_env()) { +first_eval_time <- function(mtr_set, ..., metric = NULL, eval_time = NULL, quietly = FALSE, call = rlang::caller_env()) { rlang::check_dots_empty() num_times <- length(eval_time) @@ -253,7 +254,8 @@ first_eval_time <- function(mtr_set, metric = NULL, eval_time = NULL, ..., quiet #' @rdname choose_metric #' @export -check_metrics_arg <- function(mtr_set, wflow, call = rlang::caller_env()) { +check_metrics_arg <- function(mtr_set, wflow, ..., call = rlang::caller_env()) { + rlang::check_dots_empty(call = call) mode <- extract_spec_parsnip(wflow)$mode if (is.null(mtr_set)) { @@ -308,7 +310,8 @@ check_metrics_arg <- function(mtr_set, wflow, call = rlang::caller_env()) { #' @rdname choose_metric #' @export -check_eval_time_arg <- function(eval_time, mtr_set, call = rlang::caller_env()) { +check_eval_time_arg <- function(eval_time, mtr_set, ..., call = rlang::caller_env()) { + rlang::check_dots_empty(call = call) mtr_info <- tibble::as_tibble(mtr_set) # Not a survival metric diff --git a/R/select_best.R b/R/select_best.R index f39597a5c..b6e9bacd7 100644 --- a/R/select_best.R +++ b/R/select_best.R @@ -15,6 +15,12 @@ #' performance is within some acceptable limit. #' #' @param x The results of [tune_grid()] or [tune_bayes()]. +#' @param ... For [select_by_one_std_err()] and [select_by_pct_loss()], this +#' argument is passed directly to [dplyr::arrange()] so that the user can sort +#' the models from *most simple to most complex*. That is, for a parameter `p`, +#' pass the unquoted expression `p` if smaller values of `p` indicate a simpler +#' model, or `desc(p)` if larger values indicate a simpler model. At +#' least one term is required for these two functions. See the examples below. #' @param metric A character value for the metric that will be used to sort #' the models. (See #' \url{https://yardstick.tidymodels.org/articles/metric-types.html} for @@ -24,12 +30,6 @@ #' @param n An integer for the number of top results/rows to return. #' @param limit The limit of loss of performance that is acceptable (in percent #' units). See details below. -#' @param ... For [select_by_one_std_err()] and [select_by_pct_loss()], this -#' argument is passed directly to [dplyr::arrange()] so that the user can sort -#' the models from *most simple to most complex*. That is, for a parameter `p`, -#' pass the unquoted expression `p` if smaller values of `p` indicate a simpler -#' model, or `desc(p)` if larger values indicate a simpler model. At -#' least one term is required for these two functions. See the examples below. #' @param eval_time A single numeric time point where dynamic event time #' metrics should be chosen (e.g., the time-dependent ROC curve, etc). The #' values should be consistent with the values used to create `x`. The `NULL` @@ -78,10 +78,10 @@ show_best.default <- function(x, ...) { #' @export #' @rdname show_best show_best.tune_results <- function(x, + ..., metric = NULL, eval_time = NULL, n = 5, - ..., call = rlang::current_env()) { rlang::check_dots_empty() @@ -119,7 +119,7 @@ select_best.default <- function(x, ...) { #' @export #' @rdname show_best -select_best.tune_results <- function(x, metric = NULL, eval_time = NULL, ...) { +select_best.tune_results <- function(x, ..., metric = NULL, eval_time = NULL) { rlang::check_dots_empty() metric_info <- choose_metric(x, metric) diff --git a/R/tune_bayes.R b/R/tune_bayes.R index 72648ee3d..e02669a35 100644 --- a/R/tune_bayes.R +++ b/R/tune_bayes.R @@ -296,7 +296,7 @@ tune_bayes_workflow <- function(object, maximize <- opt_metric$direction == "maximize" eval_time <- check_eval_time_arg(eval_time, metrics, call = call) - opt_metric_time <- first_eval_time(metrics, opt_metric_name, eval_time, call = call) + opt_metric_time <- first_eval_time(metrics, metric = opt_metric_name, eval_time = eval_time, call = call) if (is.null(param_info)) { param_info <- hardhat::extract_parameter_set_dials(object) @@ -547,7 +547,8 @@ check_iter <- function(iter, call) { #' @rdname empty_ellipses #' @param pset A `parameters` object. #' @param as_matrix A logical for the return type. -encode_set <- function(x, pset, as_matrix = FALSE, ...) { +encode_set <- function(x, pset, ..., as_matrix = FALSE) { + rlang::check_dots_empty() # change the numeric variables to the transformed scale (if any) has_trans <- purrr::map_lgl(pset$object, ~ !is.null(.x$trans)) if (any(has_trans)) { diff --git a/man/augment.tune_results.Rd b/man/augment.tune_results.Rd index a3e3b58e2..0b625e05a 100644 --- a/man/augment.tune_results.Rd +++ b/man/augment.tune_results.Rd @@ -6,7 +6,7 @@ \alias{augment.last_fit} \title{Augment data with holdout predictions} \usage{ -\method{augment}{tune_results}(x, parameters = NULL, ...) +\method{augment}{tune_results}(x, ..., parameters = NULL) \method{augment}{resample_results}(x, ...) @@ -17,13 +17,13 @@ \code{fit_resamples()}, or \code{last_fit()}. The control specifications for these objects should have used the option \code{save_pred = TRUE}.} +\item{...}{Not currently used.} + \item{parameters}{A data frame with a single row that indicates what tuning parameters should be used to generate the predictions (for \verb{tune_*()} objects only). If \code{NULL}, \code{select_best(x)} will be used with the first metric and, if applicable, the first evaluation time point, used to create \code{x}.} - -\item{...}{Not currently used.} } \value{ A data frame with one or more additional columns for model diff --git a/man/choose_metric.Rd b/man/choose_metric.Rd index 37cb3f9ca..da56caeb6 100644 --- a/man/choose_metric.Rd +++ b/man/choose_metric.Rd @@ -14,11 +14,12 @@ \usage{ choose_metric(x, metric, ..., call = rlang::caller_env()) -check_metric_in_tune_results(mtr_info, metric, call = rlang::caller_env()) +check_metric_in_tune_results(mtr_info, metric, ..., call = rlang::caller_env()) choose_eval_time( x, metric, + ..., eval_time = NULL, quietly = FALSE, call = rlang::caller_env() @@ -30,18 +31,18 @@ first_metric(mtr_set) first_eval_time( mtr_set, + ..., metric = NULL, eval_time = NULL, - ..., quietly = FALSE, call = rlang::caller_env() ) .filter_perf_metrics(x, metric, eval_time) -check_metrics_arg(mtr_set, wflow, call = rlang::caller_env()) +check_metrics_arg(mtr_set, wflow, ..., call = rlang::caller_env()) -check_eval_time_arg(eval_time, mtr_set, call = rlang::caller_env()) +check_eval_time_arg(eval_time, mtr_set, ..., call = rlang::caller_env()) } \arguments{ \item{x}{An object with class \code{tune_results}.} diff --git a/man/collect_predictions.Rd b/man/collect_predictions.Rd index 1fbe5b12e..c1c59257b 100644 --- a/man/collect_predictions.Rd +++ b/man/collect_predictions.Rd @@ -16,11 +16,11 @@ collect_predictions(x, ...) \method{collect_predictions}{default}(x, ...) -\method{collect_predictions}{tune_results}(x, summarize = FALSE, parameters = NULL, ...) +\method{collect_predictions}{tune_results}(x, ..., summarize = FALSE, parameters = NULL) collect_metrics(x, ...) -\method{collect_metrics}{tune_results}(x, summarize = TRUE, type = c("long", "wide"), ...) +\method{collect_metrics}{tune_results}(x, ..., summarize = TRUE, type = c("long", "wide")) collect_notes(x, ...) @@ -161,7 +161,11 @@ resampled <- collect_predictions(resampled) \%>\% arrange(.row) collect_predictions(resampled, summarize = TRUE) \%>\% arrange(.row) -collect_predictions(resampled, summarize = TRUE, grid[1, ]) \%>\% arrange(.row) +collect_predictions( + resampled, + summarize = TRUE, + parameters = grid[1, ] +) \%>\% arrange(.row) collect_extracts(resampled) \dontshow{\}) # examplesIf} diff --git a/man/compute_metrics.Rd b/man/compute_metrics.Rd index 25e77ade2..cf286e67c 100644 --- a/man/compute_metrics.Rd +++ b/man/compute_metrics.Rd @@ -10,7 +10,7 @@ compute_metrics(x, metrics, summarize, event_level, ...) \method{compute_metrics}{default}(x, metrics, summarize = TRUE, event_level = "first", ...) -\method{compute_metrics}{tune_results}(x, metrics, summarize = TRUE, event_level = "first", ...) +\method{compute_metrics}{tune_results}(x, metrics, ..., summarize = TRUE, event_level = "first") } \arguments{ \item{x}{The results of a tuning function like \code{\link[=tune_grid]{tune_grid()}} or diff --git a/man/conf_mat_resampled.Rd b/man/conf_mat_resampled.Rd index 875ca66ec..da8c1f8e5 100644 --- a/man/conf_mat_resampled.Rd +++ b/man/conf_mat_resampled.Rd @@ -4,12 +4,14 @@ \alias{conf_mat_resampled} \title{Compute average confusion matrix across resamples} \usage{ -conf_mat_resampled(x, parameters = NULL, tidy = TRUE) +conf_mat_resampled(x, ..., parameters = NULL, tidy = TRUE) } \arguments{ \item{x}{An object with class \code{tune_results} that was used with a classification model that was run with \code{control_*(save_pred = TRUE)}.} +\item{...}{Currently unused, must be empty.} + \item{parameters}{A tibble with a single tuning parameter combination. Only one tuning parameter combination (if any were used) is allowed here.} diff --git a/man/empty_ellipses.Rd b/man/empty_ellipses.Rd index 75b5e29a6..fcbdb54f4 100644 --- a/man/empty_ellipses.Rd +++ b/man/empty_ellipses.Rd @@ -27,7 +27,7 @@ check_rset(x) check_parameters(wflow, pset = NULL, data, grid_names = character(0)) -check_workflow(x, pset = NULL, check_dials = FALSE, call = caller_env()) +check_workflow(x, ..., pset = NULL, check_dials = FALSE, call = caller_env()) check_metrics(x, object) @@ -48,7 +48,7 @@ val_class_and_single(x, cls = "numeric", where = NULL) .config_key_from_metrics(x) -estimate_tune_results(x, col_name = ".metrics", ...) +estimate_tune_results(x, ..., col_name = ".metrics") metrics_info(x) @@ -65,7 +65,7 @@ new_iteration_results( get_tune_colors() -encode_set(x, pset, as_matrix = FALSE, ...) +encode_set(x, pset, ..., as_matrix = FALSE) check_time(origin, limit) @@ -90,6 +90,8 @@ is_workflow(x) \item{grid_names}{A character vector of column names from the grid.} +\item{...}{Other options} + \item{check_dials}{A logical for check for a NULL parameter object.} \item{object}{A \code{workflow} object.} @@ -107,8 +109,6 @@ metrics should be computed (e.g. the time-dependent ROC curve, etc).} \item{where}{A character string for the calling function.} -\item{...}{Other options} - \item{parameters}{A \code{parameters} object.} \item{outcomes}{A character vector of outcome names.} diff --git a/man/fit_best.Rd b/man/fit_best.Rd index de5cddad6..6afc7ec8e 100644 --- a/man/fit_best.Rd +++ b/man/fit_best.Rd @@ -12,12 +12,12 @@ fit_best(x, ...) \method{fit_best}{tune_results}( x, + ..., metric = NULL, eval_time = NULL, parameters = NULL, verbose = FALSE, - add_validation_set = NULL, - ... + add_validation_set = NULL ) } \arguments{ @@ -25,7 +25,7 @@ fit_best(x, ...) \code{\link[=tune_grid]{tune_grid()}}, \code{\link[=tune_bayes]{tune_bayes()}}, etc). The control option \code{\link[=control_grid]{save_workflow = TRUE}} should have been used.} -\item{...}{Not currently used.} +\item{...}{Not currently used, must be empty.} \item{metric}{A character string (or \code{NULL}) for which metric to optimize. If \code{NULL}, the first metric is used.} diff --git a/man/load_pkgs.Rd b/man/load_pkgs.Rd index f376332e9..6892c4b77 100644 --- a/man/load_pkgs.Rd +++ b/man/load_pkgs.Rd @@ -4,7 +4,7 @@ \alias{load_pkgs} \title{Quietly load package namespace} \usage{ -load_pkgs(x, infra = TRUE, ...) +load_pkgs(x, ..., infra = TRUE) } \arguments{ \item{x}{A character vector of packages.} diff --git a/man/show_best.Rd b/man/show_best.Rd index 638a18532..649e8bcdf 100644 --- a/man/show_best.Rd +++ b/man/show_best.Rd @@ -21,10 +21,10 @@ show_best(x, ...) \method{show_best}{tune_results}( x, + ..., metric = NULL, eval_time = NULL, n = 5, - ..., call = rlang::current_env() ) @@ -32,7 +32,7 @@ select_best(x, ...) \method{select_best}{default}(x, ...) -\method{select_best}{tune_results}(x, metric = NULL, eval_time = NULL, ...) +\method{select_best}{tune_results}(x, ..., metric = NULL, eval_time = NULL) select_by_pct_loss(x, ...) diff --git a/tests/testthat/_snaps/augment.md b/tests/testthat/_snaps/augment.md index d0d0c81cd..36bf4eb22 100644 --- a/tests/testthat/_snaps/augment.md +++ b/tests/testthat/_snaps/augment.md @@ -4,7 +4,9 @@ augment(fit_1, hey = "you") Condition Error in `augment()`: - ! The only argument for `augment.fit_resamples()` is 'x'. Others were passed: 'hey' + ! `...` must be empty. + x Problematic argument: + * hey = "you" --- @@ -36,7 +38,9 @@ augment(fit_1, cost = 4) Condition Error in `augment()`: - ! The only two arguments for `augment.tune_results()` are 'x' and 'parameters'. Others were passed: 'cost' + ! `...` must be empty. + x Problematic argument: + * cost = 4 # augment last_fit @@ -44,5 +48,7 @@ augment(fit_1, potato = TRUE) Condition Error in `augment()`: - ! The only argument for `augment.last_fit()` is 'x'. Others were passed: 'potato' + ! `...` must be empty. + x Problematic argument: + * potato = TRUE diff --git a/tests/testthat/_snaps/collect.md b/tests/testthat/_snaps/collect.md index e3533aeef..67af0b6c2 100644 --- a/tests/testthat/_snaps/collect.md +++ b/tests/testthat/_snaps/collect.md @@ -22,58 +22,6 @@ Error in `filter_predictions()`: ! `parameters` should only have columns: 'cost value' -# collecting notes - fit_resamples - - Code - lm_splines <- fit_resamples(lin_mod, mpg ~ ., flds) - Message - ! Bootstrap1: preprocessor 1/1, model 1/1 (predictions): prediction from a rank-deficient fit may be misleading - ! Bootstrap2: preprocessor 1/1, model 1/1 (predictions): prediction from a rank-deficient fit may be misleading - ---- - - Code - lm_splines - Output - # Resampling results - # Bootstrap sampling - # A tibble: 2 x 4 - splits id .metrics .notes - - 1 Bootstrap1 - 2 Bootstrap2 - - There were issues with some computations: - - - Warning(s) x2: prediction from a rank-deficient fit may be misleading - - Run `show_notes(.Last.tune.result)` for more information. - -# collecting notes - last_fit - - Code - lst <- last_fit(lin_mod, mpg ~ ., split) - Message - ! train/test split: preprocessor 1/1, model 1/1 (predictions): prediction from a rank-deficient fit may be misleading - ---- - - Code - lst - Output - # Resampling results - # Manual resampling - # A tibble: 1 x 6 - splits id .metrics .notes .predictions .workflow - - 1 train/test split - - There were issues with some computations: - - - Warning(s) x1: prediction from a rank-deficient fit may be misleading - - Run `show_notes(.Last.tune.result)` for more information. - # `collect_notes()` errors informatively applied to unsupported class Code diff --git a/tests/testthat/_snaps/conf-mat-resampled.md b/tests/testthat/_snaps/conf-mat-resampled.md index 774d31c12..1ac63c0e9 100644 --- a/tests/testthat/_snaps/conf-mat-resampled.md +++ b/tests/testthat/_snaps/conf-mat-resampled.md @@ -41,7 +41,8 @@ --- Code - conf_mat_resampled(broke_results, select_best(broke_results, "accuracy")) + conf_mat_resampled(broke_results, parameters = select_best(broke_results, + metric = "accuracy")) Condition Error in `conf_mat_resampled()`: ! Cannot determine the proper outcome name @@ -49,8 +50,10 @@ --- Code - conf_mat_resampled(svm_results) + conf_mat_resampled(svm_results, argument_that_doesnt_exist = TRUE) Condition Error in `conf_mat_resampled()`: - ! It looks like there are 5 tuning parameter combination(s) in the data. Please use the `parameters` argument to select one combination of parameters. + ! `...` must be empty. + x Problematic argument: + * argument_that_doesnt_exist = TRUE diff --git a/tests/testthat/_snaps/eval-time-single-selection.md b/tests/testthat/_snaps/eval-time-single-selection.md index e4aa314c4..e6aee1881 100644 --- a/tests/testthat/_snaps/eval-time-single-selection.md +++ b/tests/testthat/_snaps/eval-time-single-selection.md @@ -9,7 +9,7 @@ --- Code - first_eval_time(met_dyn, "brier_survival", eval_time = NULL) + first_eval_time(met_dyn, metric = "brier_survival", eval_time = NULL) Condition Error: ! A single evaluation time is required to use this metric. @@ -84,7 +84,7 @@ --- Code - choose_eval_time(ames_grid_search, "rmse", 1) + choose_eval_time(ames_grid_search, "rmse", eval_time = 1) Condition Warning: `eval_time` is only used for models with mode "censored regression". diff --git a/tests/testthat/_snaps/fit_best.md b/tests/testthat/_snaps/fit_best.md index f9f072bf2..3b86c704b 100644 --- a/tests/testthat/_snaps/fit_best.md +++ b/tests/testthat/_snaps/fit_best.md @@ -62,7 +62,9 @@ --- - x The `...` are not used by this function. + `...` must be empty. + x Problematic argument: + * chickens = 2 --- diff --git a/tests/testthat/test-collect.R b/tests/testthat/test-collect.R index 959d4874f..9d5f6ec36 100644 --- a/tests/testthat/test-collect.R +++ b/tests/testthat/test-collect.R @@ -47,7 +47,7 @@ svm_tune_class$.predictions <- ) attr(svm_tune_class, "metrics") <- yardstick::metric_set(yardstick::kap) -svm_grd <- show_best(svm_tune, "roc_auc") %>% dplyr::select(`cost value`) +svm_grd <- show_best(svm_tune, metric = "roc_auc") %>% dplyr::select(`cost value`) # ------------------------------------------------------------------------------ diff --git a/tests/testthat/test-conf-mat-resampled.R b/tests/testthat/test-conf-mat-resampled.R index 75602a1f6..a2c04e2ab 100644 --- a/tests/testthat/test-conf-mat-resampled.R +++ b/tests/testthat/test-conf-mat-resampled.R @@ -2,13 +2,13 @@ test_that("appropriate return values", { svm_results <- readRDS(test_path("data", "svm_results.rds")) expect_error( - cm_1 <- conf_mat_resampled(svm_results, select_best(svm_results, "accuracy")), + cm_1 <- conf_mat_resampled(svm_results, parameters = select_best(svm_results, metric = "accuracy")), regex = NA ) expect_true(tibble::is_tibble(cm_1)) expect_error( - cm_2 <- conf_mat_resampled(svm_results, select_best(svm_results, "accuracy"), tidy = FALSE), + cm_2 <- conf_mat_resampled(svm_results, parameters = select_best(svm_results, metric = "accuracy"), tidy = FALSE), regex = NA ) expect_equal(class(cm_2), "conf_mat") @@ -60,10 +60,10 @@ test_that("bad argss", { attr(broke_results, "outcomes") <- NULL expect_snapshot(error = TRUE, { - conf_mat_resampled(broke_results, select_best(broke_results, "accuracy")) + conf_mat_resampled(broke_results, parameters = select_best(broke_results, metric = "accuracy")) }) expect_snapshot(error = TRUE, { - conf_mat_resampled(svm_results) + conf_mat_resampled(svm_results, argument_that_doesnt_exist = TRUE) }) }) diff --git a/tests/testthat/test-eval-time-single-selection.R b/tests/testthat/test-eval-time-single-selection.R index c31e23818..2add0490a 100644 --- a/tests/testthat/test-eval-time-single-selection.R +++ b/tests/testthat/test-eval-time-single-selection.R @@ -25,7 +25,7 @@ test_that("selecting single eval time - pure metric sets", { # all static; return NULL and add warning if times are given expect_null(first_eval_time(met_stc, eval_time = NULL)) - expect_null(first_eval_time(met_stc, "concordance_survival", eval_time = NULL)) + expect_null(first_eval_time(met_stc, metric = "concordance_survival", eval_time = NULL)) expect_silent( stc_one <- first_eval_time(met_stc, eval_time = times_1) @@ -45,7 +45,7 @@ test_that("selecting single eval time - pure metric sets", { error = TRUE ) expect_snapshot( - first_eval_time(met_dyn, "brier_survival", eval_time = NULL), + first_eval_time(met_dyn, metric = "brier_survival", eval_time = NULL), error = TRUE ) @@ -63,7 +63,7 @@ test_that("selecting single eval time - pure metric sets", { expect_null(first_eval_time(met_int, eval_time = NULL)) expect_null( - first_eval_time(met_int, "brier_survival_integrated", eval_time = NULL) + first_eval_time(met_int, metric = "brier_survival_integrated", eval_time = NULL) ) expect_silent( @@ -219,5 +219,5 @@ test_that("selecting an evaluation time", { ) data("example_ames_knn") - expect_snapshot(choose_eval_time(ames_grid_search, "rmse", 1)) + expect_snapshot(choose_eval_time(ames_grid_search, "rmse", eval_time = 1)) })