diff --git a/python-package/lightgbm/engine.py b/python-package/lightgbm/engine.py index 646c1ec8505d..2bca6cbdb97b 100644 --- a/python-package/lightgbm/engine.py +++ b/python-package/lightgbm/engine.py @@ -178,7 +178,7 @@ def train( params["num_iterations"] = num_boost_round # show deprecation warning only for early stop argument, setting early stop via global params should still be possible if early_stopping_rounds is not None and early_stopping_rounds > 0: - _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in 4.0.0 release. " + _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'early_stopping()' callback via 'callbacks' argument instead.") for alias in _ConfigAliases.get("early_stopping_round"): if alias in params: @@ -237,9 +237,9 @@ def train( # Most of legacy advanced options becomes callbacks if verbose_eval != "warn": - _log_warning("'verbose_eval' argument is deprecated and will be removed in 4.0.0 release. " + _log_warning("'verbose_eval' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'print_evaluation()' callback via 'callbacks' argument instead.") - if verbose_eval == "warn": + else: if callbacks: # assume user has already specified print_evaluation callback verbose_eval = False else: @@ -253,12 +253,12 @@ def train( callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=bool(verbose_eval))) if learning_rates is not None: - _log_warning("'learning_rates' argument is deprecated and will be removed in 4.0.0 release. " + _log_warning("'learning_rates' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'reset_parameter()' callback via 'callbacks' argument instead.") callbacks.add(callback.reset_parameter(learning_rate=learning_rates)) if evals_result is not None: - _log_warning("'evals_result' argument is deprecated and will be removed in 4.0.0 release. " + _log_warning("'evals_result' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'record_evaluation()' callback via 'callbacks' argument instead.") callbacks.add(callback.record_evaluation(evals_result)) @@ -575,7 +575,7 @@ def cv(params, train_set, num_boost_round=100, num_boost_round = params.pop(alias) params["num_iterations"] = num_boost_round if early_stopping_rounds is not None and early_stopping_rounds > 0: - _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in 4.0.0 release. " + _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'early_stopping()' callback via 'callbacks' argument instead.") for alias in _ConfigAliases.get("early_stopping_round"): if alias in params: @@ -618,7 +618,7 @@ def cv(params, train_set, num_boost_round=100, if early_stopping_rounds is not None and early_stopping_rounds > 0: callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=False)) if verbose_eval is not None: - _log_warning("'verbose_eval' argument is deprecated and will be removed in 4.0.0 release. " + _log_warning("'verbose_eval' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'print_evaluation()' callback via 'callbacks' argument instead.") if verbose_eval is True: callbacks.add(callback.print_evaluation(show_stdv=show_stdv)) diff --git a/python-package/lightgbm/sklearn.py b/python-package/lightgbm/sklearn.py index 332f971b4c3d..ba1c447a16f7 100644 --- a/python-package/lightgbm/sklearn.py +++ b/python-package/lightgbm/sklearn.py @@ -711,7 +711,7 @@ def _get_meta_data(collection, name, i): init_model = init_model.booster_ if early_stopping_rounds is not None and early_stopping_rounds > 0: - _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in 4.0.0 release. " + _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'early_stopping()' callback via 'callbacks' argument instead.") params['early_stopping_rounds'] = early_stopping_rounds @@ -721,9 +721,9 @@ def _get_meta_data(collection, name, i): callbacks = copy.deepcopy(callbacks) if verbose != 'warn': - _log_warning("'verbose' argument is deprecated and will be removed in 4.0.0 release. " + _log_warning("'verbose' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'print_evaluation()' callback via 'callbacks' argument instead.") - if verbose == 'warn': + else: if callbacks: # assume user has already specified print_evaluation callback verbose = False else: @@ -748,8 +748,14 @@ def _get_meta_data(collection, name, i): if evals_result: self._evals_result = evals_result + else: # reset after previous call to fit() + self._evals_result = None + + if self._Booster.best_iteration != 0: + self._best_iteration = self._Booster.best_iteration + else: # reset after previous call to fit() + self._best_iteration = None - self._best_iteration = self._Booster.best_iteration self._best_score = self._Booster.best_score self.fitted_ = True @@ -810,14 +816,14 @@ def n_features_in_(self): @property def best_score_(self): - """:obj:`dict`: The best score of fitted model.""" + "":obj:`dict`: The best score of fitted model.""" if self._n_features is None: raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.') return self._best_score @property def best_iteration_(self): - """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified.""" + """:obj:`int` or :obj:`None`: The best iteration of fitted model if ``early_stopping()`` callback has been specified.""" if self._n_features is None: raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping callback beforehand.') return self._best_iteration diff --git a/tests/python_package_test/test_dask.py b/tests/python_package_test/test_dask.py index 11e512ffd996..2f84933cfd37 100644 --- a/tests/python_package_test/test_dask.py +++ b/tests/python_package_test/test_dask.py @@ -916,7 +916,7 @@ def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, # check that early stopping was not applied. assert dask_model.booster_.num_trees() == model_trees - assert dask_model.best_iteration_ == 0 + assert dask_model.best_iteration_ is None # checks that evals_result_ and best_score_ contain expected data and eval_set names. evals_result = dask_model.evals_result_