@@ -960,17 +960,17 @@ xgb.cb.cv.predict <- function(save_models = FALSE, outputmargin = FALSE) {
960960# ' booster = "gblinear",
961961# ' objective = "reg:logistic",
962962# ' eval_metric = "auc",
963- # ' lambda = 0.0003,
964- # ' alpha = 0.0003,
963+ # ' reg_lambda = 0.0003,
964+ # ' reg_alpha = 0.0003,
965965# ' nthread = nthread
966966# ' )
967967# '
968- # ' # For 'shotgun', which is a default linear updater, using high eta values may result in
968+ # ' # For 'shotgun', which is a default linear updater, using high learning_rate values may result in
969969# ' # unstable behaviour in some datasets. With this simple dataset, however, the high learning
970970# ' # rate does not break the convergence, but allows us to illustrate the typical pattern of
971971# ' # "stochastic explosion" behaviour of this lock-free algorithm at early boosting iterations.
972972# ' bst <- xgb.train(
973- # ' c(param, list(eta = 1.)),
973+ # ' c(param, list(learning_rate = 1.)),
974974# ' dtrain,
975975# ' evals = list(tr = dtrain),
976976# ' nrounds = 200,
@@ -987,7 +987,7 @@ xgb.cb.cv.predict <- function(save_models = FALSE, outputmargin = FALSE) {
987987# ' c(
988988# ' param,
989989# ' xgb.params(
990- # ' eta = 0.8,
990+ # ' learning_rate = 0.8,
991991# ' updater = "coord_descent",
992992# ' feature_selector = "thrifty",
993993# ' top_k = 1
@@ -1000,12 +1000,20 @@ xgb.cb.cv.predict <- function(save_models = FALSE, outputmargin = FALSE) {
10001000# ' )
10011001# ' matplot(xgb.gblinear.history(bst), type = "l")
10021002# ' # Componentwise boosting is known to have similar effect to Lasso regularization.
1003- # ' # Try experimenting with various values of top_k, eta , nrounds,
1003+ # ' # Try experimenting with various values of top_k, learning_rate , nrounds,
10041004# ' # as well as different feature_selectors.
10051005# '
10061006# ' # For xgb.cv:
10071007# ' bst <- xgb.cv(
1008- # ' c(param, list(eta = 0.8)),
1008+ # ' c(
1009+ # ' param,
1010+ # ' xgb.params(
1011+ # ' learning_rate = 0.8,
1012+ # ' updater = "coord_descent",
1013+ # ' feature_selector = "thrifty",
1014+ # ' top_k = 1
1015+ # ' )
1016+ # ' ),
10091017# ' dtrain,
10101018# ' nfold = 5,
10111019# ' nrounds = 100,
@@ -1022,15 +1030,15 @@ xgb.cb.cv.predict <- function(save_models = FALSE, outputmargin = FALSE) {
10221030# ' booster = "gblinear",
10231031# ' objective = "multi:softprob",
10241032# ' num_class = 3,
1025- # ' lambda = 0.0003,
1026- # ' alpha = 0.0003,
1033+ # ' reg_lambda = 0.0003,
1034+ # ' reg_alpha = 0.0003,
10271035# ' nthread = nthread
10281036# ' )
10291037# '
10301038# ' # For the default linear updater 'shotgun' it sometimes is helpful
1031- # ' # to use smaller eta to reduce instability
1039+ # ' # to use smaller learning_rate to reduce instability
10321040# ' bst <- xgb.train(
1033- # ' c(param, list(eta = 0.5)),
1041+ # ' c(param, list(learning_rate = 0.5)),
10341042# ' dtrain,
10351043# ' evals = list(tr = dtrain),
10361044# ' nrounds = 50,
@@ -1044,7 +1052,7 @@ xgb.cb.cv.predict <- function(save_models = FALSE, outputmargin = FALSE) {
10441052# '
10451053# ' # CV:
10461054# ' bst <- xgb.cv(
1047- # ' c(param, list(eta = 0.5)),
1055+ # ' c(param, list(learning_rate = 0.5)),
10481056# ' dtrain,
10491057# ' nfold = 5,
10501058# ' nrounds = 70,
0 commit comments