You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
/Users/cchen1/anaconda/lib/python2.7/site-packages/lightgbm-0.1-py2.7.egg/lightgbm/callback.pyc in callback(env)
176 """internal function"""
177 if len(best_score) == 0:
--> 178 init(env)
179 for i in range(len(env.evaluation_result_list)):
180 score = env.evaluation_result_list[i][2] * factor_to_bigger_better[i]
/Users/cchen1/anaconda/lib/python2.7/site-packages/lightgbm-0.1-py2.7.egg/lightgbm/callback.pyc in init(env)
158
159 if len(env.evaluation_result_list) == 0:
--> 160 raise ValueError('For early stopping you need at least one set in evals.')
161
162 if verbose:
ValueError: For early stopping you need at least one set in evals.
The text was updated successfully, but these errors were encountered:
This issue has been automatically locked since there has not been any recent activity since it was closed. To start a new related discussion, open a new issue at https://github.com/microsoft/LightGBM/issues including a reference to this.
It appears for early stopping the current version doesn't specify a default metric and therefore if we didn't explicitly define a metric it will fail:
import lightgbm as lgb
from sklearn import datasets, metrics, model_selection
rng = np.random.RandomState(2016)
X, y = datasets.make_classification(n_samples=10000, n_features=100)
x_train, x_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.1, random_state=1)
lgb_model = lgb.LGBMClassifier(n_estimators=100).fit(x_train, y_train, [(x_test, y_test)]
#, eval_metric="auc" ## commented out the metric
,early_stopping_rounds=1
)
ValueError Traceback (most recent call last)
in ()
10 lgb_model = lgb.LGBMClassifier(n_estimators=100).fit(x_train, y_train, [(x_test, y_test)]
11 #, eval_metric="auc"
---> 12 ,early_stopping_rounds=1
13 )
14 # lgb_model.predict(x_test)
/Users/cchen1/anaconda/lib/python2.7/site-packages/lightgbm-0.1-py2.7.egg/lightgbm/sklearn.pyc in fit(self, X, y, eval_set, eval_metric, early_stopping_rounds, verbose, train_fields, valid_fields, other_params)
333 eval_metric, early_stopping_rounds,
334 verbose, train_fields, valid_fields,
--> 335 other_params)
336 return self
337
/Users/cchen1/anaconda/lib/python2.7/site-packages/lightgbm-0.1-py2.7.egg/lightgbm/sklearn.pyc in fit(self, X, y, eval_set, eval_metric, early_stopping_rounds, verbose, train_fields, valid_fields, other_params)
250 early_stopping_rounds=early_stopping_rounds,
251 evals_result=evals_result, fobj=self.fobj, feval=feval,
--> 252 verbose_eval=verbose, train_fields=train_fields, valid_fields=valid_fields)
253
254 if evals_result:
/Users/cchen1/anaconda/lib/python2.7/site-packages/lightgbm-0.1-py2.7.egg/lightgbm/engine.pyc in train(params, train_data, num_boost_round, valid_datas, valid_names, fobj, feval, init_model, train_fields, valid_fields, early_stopping_rounds, evals_result, verbose_eval, learning_rates, callbacks)
206 begin_iteration=0,
207 end_iteration=num_boost_round,
--> 208 evaluation_result_list=evaluation_result_list))
209 except callback.EarlyStopException:
210 break
/Users/cchen1/anaconda/lib/python2.7/site-packages/lightgbm-0.1-py2.7.egg/lightgbm/callback.pyc in callback(env)
176 """internal function"""
177 if len(best_score) == 0:
--> 178 init(env)
179 for i in range(len(env.evaluation_result_list)):
180 score = env.evaluation_result_list[i][2] * factor_to_bigger_better[i]
/Users/cchen1/anaconda/lib/python2.7/site-packages/lightgbm-0.1-py2.7.egg/lightgbm/callback.pyc in init(env)
158
159 if len(env.evaluation_result_list) == 0:
--> 160 raise ValueError('For early stopping you need at least one set in evals.')
161
162 if verbose:
ValueError: For early stopping you need at least one set in evals.
The text was updated successfully, but these errors were encountered: