Skip to content

Commit

Permalink
Merge branch 'master' into deprecate_args
Browse files Browse the repository at this point in the history
  • Loading branch information
StrikerRUS authored Aug 30, 2021
2 parents bca79aa + 32445ab commit 59cb379
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 34 deletions.
10 changes: 10 additions & 0 deletions R-package/R/lgb.Dataset.R
Original file line number Diff line number Diff line change
Expand Up @@ -500,6 +500,16 @@ Dataset <- R6::R6Class(
# Slice dataset
slice = function(idxset, ...) {

additional_params <- list(...)
if (length(additional_params) > 0L) {
warning(paste0(
"Dataset$slice(): Found the following passed through '...': "
, paste(names(additional_params), collapse = ", ")
, ". These are ignored and should be removed. "
, "In future releases of lightgbm, this warning will become an error."
))
}

# Perform slicing
return(
Dataset$new(
Expand Down
10 changes: 5 additions & 5 deletions python-package/lightgbm/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1130,7 +1130,7 @@ def __init__(self, data, label=None, reference=None,
Parameters
----------
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequences or list of numpy arrays
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequence or list of numpy array
Data source of Dataset.
If str or pathlib.Path, it represents the path to a text file (CSV, TSV, or LibSVM) or a LightGBM Dataset binary file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Expand Down Expand Up @@ -1260,9 +1260,9 @@ def _init_from_sample(
Parameters
----------
sample_data : list of numpy arrays
sample_data : list of numpy array
Sample data for each column.
sample_indices : list of numpy arrays
sample_indices : list of numpy array
Sample data row index for each column.
sample_cnt : int
Number of samples.
Expand Down Expand Up @@ -1774,7 +1774,7 @@ def create_valid(self, data, label=None, weight=None, group=None,
Parameters
----------
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequences or list of numpy arrays
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequence or list of numpy array
Data source of Dataset.
If str or pathlib.Path, it represents the path to a text file (CSV, TSV, or LibSVM) or a LightGBM Dataset binary file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Expand Down Expand Up @@ -2241,7 +2241,7 @@ def get_data(self):
Returns
-------
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequences or list of numpy arrays or None
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequence or list of numpy array or None
Raw data used in the Dataset construction.
"""
if self.handle is None:
Expand Down
14 changes: 7 additions & 7 deletions python-package/lightgbm/callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def print_evaluation(period: int = 1, show_stdv: bool = True) -> Callable:
Returns
-------
callback : function
callback : callable
The callback that logs the evaluation results every ``period`` boosting iteration(s).
"""
def _callback(env: CallbackEnv) -> None:
Expand Down Expand Up @@ -110,7 +110,7 @@ def record_evaluation(eval_result: Dict[str, Dict[str, List[Any]]]) -> Callable:
Returns
-------
callback : function
callback : callable
The callback that records the evaluation history into the passed dictionary.
"""
if not isinstance(eval_result, dict):
Expand Down Expand Up @@ -140,16 +140,16 @@ def reset_parameter(**kwargs: Union[list, Callable]) -> Callable:
Parameters
----------
**kwargs : value should be list or function
**kwargs : value should be list or callable
List of parameters for each boosting round
or a customized function that calculates the parameter in terms of
or a callable that calculates the parameter in terms of
current number of round (e.g. yields learning rate decay).
If list lst, parameter = lst[current_round].
If function func, parameter = func(current_round).
If callable func, parameter = func(current_round).
Returns
-------
callback : function
callback : callable
The callback that resets the parameter after the first iteration.
"""
def _callback(env: CallbackEnv) -> None:
Expand Down Expand Up @@ -196,7 +196,7 @@ def early_stopping(stopping_rounds: int, first_metric_only: bool = False, verbos
Returns
-------
callback : function
callback : callable
The callback that activates early stopping.
"""
best_score = []
Expand Down
24 changes: 12 additions & 12 deletions python-package/lightgbm/dask.py
Original file line number Diff line number Diff line change
Expand Up @@ -435,13 +435,13 @@ def _train(
of evals_result_ and best_score_ will be 'not_evaluated'.
eval_names : list of str, or None, optional (default=None)
Names of eval_set.
eval_sample_weight : list of Dask Arrays or Dask Series, or None, optional (default=None)
eval_sample_weight : list of Dask Array or Dask Series, or None, optional (default=None)
Weights for each validation set in eval_set.
eval_class_weight : list of dict or str, or None, optional (default=None)
Class weights, one dict or str for each validation set in eval_set.
eval_init_score : list of Dask Arrays or Dask Series, or None, optional (default=None)
eval_init_score : list of Dask Array or Dask Series, or None, optional (default=None)
Initial model score for each validation set in eval_set.
eval_group : list of Dask Arrays or Dask Series, or None, optional (default=None)
eval_group : list of Dask Array or Dask Series, or None, optional (default=None)
Group/query for each validation set in eval_set.
eval_metric : str, callable, list or None, optional (default=None)
If str, it should be a built-in evaluation metric to use.
Expand Down Expand Up @@ -1194,9 +1194,9 @@ def fit(
sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
group_shape="Dask Array or Dask Series or None, optional (default=None)",
eval_sample_weight_shape="list of Dask Arrays or Dask Series, or None, optional (default=None)",
eval_init_score_shape="list of Dask Arrays or Dask Series, or None, optional (default=None)",
eval_group_shape="list of Dask Arrays or Dask Series, or None, optional (default=None)"
eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
)

# DaskLGBMClassifier does not support group, eval_group, early_stopping_rounds.
Expand Down Expand Up @@ -1371,9 +1371,9 @@ def fit(
sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
group_shape="Dask Array or Dask Series or None, optional (default=None)",
eval_sample_weight_shape="list of Dask Arrays or Dask Series, or None, optional (default=None)",
eval_init_score_shape="list of Dask Arrays or Dask Series, or None, optional (default=None)",
eval_group_shape="list of Dask Arrays or Dask Series, or None, optional (default=None)"
eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
)

# DaskLGBMRegressor does not support group, eval_class_weight, eval_group, early_stopping_rounds.
Expand Down Expand Up @@ -1538,9 +1538,9 @@ def fit(
sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
group_shape="Dask Array or Dask Series or None, optional (default=None)",
eval_sample_weight_shape="list of Dask Arrays or Dask Series, or None, optional (default=None)",
eval_init_score_shape="list of Dask Arrays or Dask Series, or None, optional (default=None)",
eval_group_shape="list of Dask Arrays or Dask Series, or None, optional (default=None)"
eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
)

# DaskLGBMRanker does not support eval_class_weight or early stopping
Expand Down
12 changes: 6 additions & 6 deletions python-package/lightgbm/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def train(
Data to be trained on.
num_boost_round : int, optional (default=100)
Number of boosting iterations.
valid_sets : list of Datasets, or None, optional (default=None)
valid_sets : list of Dataset, or None, optional (default=None)
List of data to be evaluated on during training.
valid_names : list of str, or None, optional (default=None)
Names of ``valid_sets``.
Expand All @@ -76,7 +76,7 @@ def train(
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
feval : callable, list of callable functions, or None, optional (default=None)
feval : callable, list of callable, or None, optional (default=None)
Customized evaluation function.
Each evaluation function should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
Expand Down Expand Up @@ -147,7 +147,7 @@ def train(
learning_rates : list, callable or None, optional (default=None)
List of learning rates for each boosting round
or a customized function that calculates ``learning_rate``
or a callable that calculates ``learning_rate``
in terms of current number of round (e.g. yields learning rate decay).
keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training.
Expand All @@ -156,7 +156,7 @@ def train(
When your model is very large and cause the memory error,
you can try to set this param to ``True`` to avoid the model conversion performed during the internal call of ``model_to_string``.
You can still use _InnerPredictor as ``init_model`` for future continue training.
callbacks : list of callables, or None, optional (default=None)
callbacks : list of callable, or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
Expand Down Expand Up @@ -487,7 +487,7 @@ def cv(params, train_set, num_boost_round=100,
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
feval : callable, list of callable functions, or None, optional (default=None)
feval : callable, list of callable, or None, optional (default=None)
Customized evaluation function.
Each evaluation function should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
Expand Down Expand Up @@ -542,7 +542,7 @@ def cv(params, train_set, num_boost_round=100,
Results are not affected by this parameter, and always contain std.
seed : int, optional (default=0)
Seed used to generate the folds (passed to numpy.random.seed).
callbacks : list of callables, or None, optional (default=None)
callbacks : list of callable, or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
eval_train_metric : bool, optional (default=False)
Expand Down
8 changes: 4 additions & 4 deletions python-package/lightgbm/sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ def __call__(self, preds, dataset):
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
callbacks : list of callback functions, or None, optional (default=None)
callbacks : list of callable, or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
Expand Down Expand Up @@ -765,9 +765,9 @@ def _get_meta_data(collection, name, i):
sample_weight_shape="array-like of shape = [n_samples] or None, optional (default=None)",
init_score_shape="array-like of shape = [n_samples] or None, optional (default=None)",
group_shape="array-like or None, optional (default=None)",
eval_sample_weight_shape="list of arrays, or None, optional (default=None)",
eval_init_score_shape="list of arrays, or None, optional (default=None)",
eval_group_shape="list of arrays, or None, optional (default=None)"
eval_sample_weight_shape="list of array, or None, optional (default=None)",
eval_init_score_shape="list of array, or None, optional (default=None)",
eval_group_shape="list of array, or None, optional (default=None)"
) + "\n\n" + _lgbmmodel_doc_custom_eval_note

def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
Expand Down

0 comments on commit 59cb379

Please sign in to comment.