Skip to content

Commit

Permalink
[R-package] remove uses of testthat::context() in tests (#4915)
Browse files Browse the repository at this point in the history
  • Loading branch information
jameslamb authored Dec 30, 2021
1 parent ab78829 commit a55ff18
Show file tree
Hide file tree
Showing 16 changed files with 0 additions and 64 deletions.
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_Predictor.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("Predictor")

test_that("Predictor$finalize() should not fail", {
X <- as.matrix(as.integer(iris[, "Species"]), ncol = 1L)
y <- iris[["Sepal.Length"]]
Expand Down
15 changes: 0 additions & 15 deletions R-package/tests/testthat/test_basic.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("lightgbm()")

ON_WINDOWS <- .Platform$OS.type == "windows"

UTF8_LOCALE <- all(grepl(
Expand Down Expand Up @@ -326,9 +324,6 @@ test_that("lightgbm() does not write model to disk if save_name=NULL", {
expect_equal(files_before, files_after)
})


context("training continuation")

test_that("training continuation works", {
dtrain <- lgb.Dataset(
train$data
Expand Down Expand Up @@ -360,8 +355,6 @@ test_that("training continuation works", {
expect_lt(abs(err_bst - err_bst2), 0.01)
})

context("lgb.cv()")

test_that("cv works", {
dtrain <- lgb.Dataset(train$data, label = train$label)
params <- list(
Expand Down Expand Up @@ -593,8 +586,6 @@ test_that("lgb.cv() respects eval_train_metric argument", {
)
})

context("lgb.train()")

test_that("lgb.train() works as expected with multiple eval metrics", {
metrics <- c("binary_error", "auc", "binary_logloss")
bst <- lgb.train(
Expand Down Expand Up @@ -2139,8 +2130,6 @@ test_that("lgb.cv() updates params based on keyword arguments", {

})

context("linear learner")

test_that("lgb.train() fit on linearly-relatead data improves when using linear learners", {
set.seed(708L)
.new_dataset <- function() {
Expand Down Expand Up @@ -2380,8 +2369,6 @@ test_that("lgb.train() works with linear learners when Dataset has categorical f
expect_true(bst_lin_last_mse < bst_last_mse)
})

context("interaction constraints")

test_that("lgb.train() throws an informative error if interaction_constraints is not a list", {
dtrain <- lgb.Dataset(train$data, label = train$label)
params <- list(objective = "regression", interaction_constraints = "[1,2],[3]")
Expand Down Expand Up @@ -2482,8 +2469,6 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai

})

context("monotone constraints")

.generate_trainset_for_monotone_constraints_tests <- function(x3_to_categorical) {
n_samples <- 3000L
x1_positively_correlated_with_y <- runif(n = n_samples, min = 0.0, max = 1.0)
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_custom_objective.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("Test models with custom objective")

data(agaricus.train, package = "lightgbm")
data(agaricus.test, package = "lightgbm")
dtrain <- lgb.Dataset(agaricus.train$data, label = agaricus.train$label)
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_dataset.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("testing lgb.Dataset functionality")

data(agaricus.train, package = "lightgbm")
train_data <- agaricus.train$data[seq_len(1000L), ]
train_label <- agaricus.train$label[seq_len(1000L)]
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_learning_to_rank.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("Learning to rank")

# numerical tolerance to use when checking metric values
TOLERANCE <- 1e-06

Expand Down
14 changes: 0 additions & 14 deletions R-package/tests/testthat/test_lgb.Booster.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("Booster")

ON_WINDOWS <- .Platform$OS.type == "windows"
TOLERANCE <- 1e-6

Expand Down Expand Up @@ -31,8 +29,6 @@ test_that("Booster$finalize() should not fail", {
expect_true(lgb.is.null.handle(bst$.__enclos_env__$private$handle))
})

context("lgb.get.eval.result")

test_that("lgb.get.eval.result() should throw an informative error if booster is not an lgb.Booster", {
bad_inputs <- list(
matrix(1.0:10.0, 2L, 5L)
Expand Down Expand Up @@ -124,8 +120,6 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
}, regexp = "Only the following eval_names exist for dataset.*\\: \\[l2\\]", fixed = FALSE)
})

context("lgb.load()")

test_that("lgb.load() gives the expected error messages given different incorrect inputs", {
set.seed(708L)
data(agaricus.train, package = "lightgbm")
Expand Down Expand Up @@ -394,8 +388,6 @@ test_that("If a string and a file are both passed to lgb.load() the file is used
expect_identical(pred, pred2)
})

context("Booster")

test_that("Creating a Booster from a Dataset should work", {
set.seed(708L)
data(agaricus.train, package = "lightgbm")
Expand Down Expand Up @@ -692,8 +684,6 @@ test_that("Booster$params should include dataset params, before and after Booste
expect_identical(bst$params, expected_params)
})

context("save_model")

test_that("Saving a model with different feature importance types works", {
set.seed(708L)
data(agaricus.train, package = "lightgbm")
Expand Down Expand Up @@ -1032,8 +1022,6 @@ test_that("lgb.cv() correctly handles passing through params to the model file",

})

context("saveRDS.lgb.Booster() and readRDS.lgb.Booster()")

test_that("params (including dataset params) should be stored in .rds file for Booster", {
data(agaricus.train, package = "lightgbm")
dtrain <- lgb.Dataset(
Expand Down Expand Up @@ -1069,8 +1057,6 @@ test_that("params (including dataset params) should be stored in .rds file for B
)
})

context("saveRDS and readRDS work on Booster")

test_that("params (including dataset params) should be stored in .rds file for Booster", {
data(agaricus.train, package = "lightgbm")
dtrain <- lgb.Dataset(
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_lgb.convert_with_rules.R
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
context("lgb.convert_with_rules()")

test_that("lgb.convert_with_rules() rejects inputs that are not a data.table or data.frame", {
bad_inputs <- list(
matrix(1.0:10.0, 2L, 5L)
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_lgb.importance.R
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
context("lgb.importance")

test_that("lgb.importance() should reject bad inputs", {
bad_inputs <- list(
.Machine$integer.max
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_lgb.interprete.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("lgb.interpete")

.sigmoid <- function(x) {
1.0 / (1.0 + exp(-x))
}
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_lgb.plot.importance.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("lgb.plot.importance()")

test_that("lgb.plot.importance() should run without error for well-formed inputs", {
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_lgb.plot.interpretation.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("lgb.plot.interpretation")

.sigmoid <- function(x) {
1.0 / (1.0 + exp(-x))
}
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_lgb.unloader.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("lgb.unloader")

test_that("lgb.unloader works as expected", {
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_metrics.R
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
context(".METRICS_HIGHER_BETTER()")

test_that(".METRICS_HIGHER_BETTER() should be well formed", {
metrics <- .METRICS_HIGHER_BETTER()
metric_names <- names(.METRICS_HIGHER_BETTER())
Expand Down
5 changes: 0 additions & 5 deletions R-package/tests/testthat/test_parameters.R
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@

context("feature penalties")

data(agaricus.train, package = "lightgbm")
data(agaricus.test, package = "lightgbm")
train <- agaricus.train
Expand Down Expand Up @@ -47,8 +44,6 @@ test_that("Feature penalties work properly", {
expect_length(var_gain[[length(var_gain)]], 0L)
})

context("parameter aliases")

test_that(".PARAMETER_ALIASES() returns a named list of character vectors, where names are unique", {
param_aliases <- .PARAMETER_ALIASES()
expect_identical(class(param_aliases), "list")
Expand Down
6 changes: 0 additions & 6 deletions R-package/tests/testthat/test_utils.R
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
context("lgb.params2str")

test_that("lgb.params2str() works as expected for empty lists", {
out_str <- lgb.params2str(
params = list()
Expand Down Expand Up @@ -37,8 +35,6 @@ test_that("lgb.params2str() passes through duplicated params", {
expect_equal(out_str, "objective=regression bagging_fraction=0.8 bagging_fraction=0.5")
})

context("lgb.check.eval")

test_that("lgb.check.eval works as expected with no metric", {
params <- lgb.check.eval(
params = list(device = "cpu")
Expand Down Expand Up @@ -84,8 +80,6 @@ test_that("lgb.check.eval drops duplicate metrics and preserves order", {
expect_identical(params[["metric"]], list("l1", "l2", "rmse"))
})

context("lgb.check.wrapper_param")

test_that("lgb.check.wrapper_param() uses passed-in keyword arg if no alias found in params", {
kwarg_val <- sample(seq_len(100L), size = 1L)
params <- lgb.check.wrapper_param(
Expand Down
2 changes: 0 additions & 2 deletions R-package/tests/testthat/test_weighted_loss.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)

context("Case weights are respected")

test_that("Gamma regression reacts on 'weight'", {
n <- 100L
set.seed(87L)
Expand Down

0 comments on commit a55ff18

Please sign in to comment.