-
-
Notifications
You must be signed in to change notification settings - Fork 8.7k
/
learner.cc
1495 lines (1309 loc) · 55.7 KB
/
learner.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/**
* Copyright 2014-2024, XGBoost Contributors
* \file learner.cc
* \brief Implementation of learning algorithm.
* \author Tianqi Chen
*/
#include "xgboost/learner.h"
#include <dmlc/io.h> // for Stream
#include <dmlc/parameter.h> // for FieldEntry, DMLC_DECLARE_FIELD, Parameter, DMLC...
#include <dmlc/thread_local.h> // for ThreadLocalStore
#include <algorithm> // for equal, max, transform, sort, find_if, all_of
#include <array> // for array
#include <atomic> // for atomic
#include <cctype> // for isalpha, isspace
#include <cmath> // for isnan, isinf
#include <cstdint> // for int32_t, uint32_t, int64_t, uint64_t
#include <cstdlib> // for atoi
#include <cstring> // for memcpy, size_t, memset
#include <iomanip> // for operator<<, setiosflags
#include <iterator> // for back_insert_iterator, distance, back_inserter
#include <limits> // for numeric_limits
#include <memory> // for allocator, unique_ptr, shared_ptr, operator==
#include <mutex> // for mutex, lock_guard
#include <set> // for set
#include <sstream> // for operator<<, basic_ostream, basic_ostream::opera...
#include <stack> // for stack
#include <string> // for basic_string, char_traits, operator<, string
#include <system_error> // for errc
#include <tuple> // for get
#include <unordered_map> // for operator!=, unordered_map
#include <utility> // for pair, as_const, move, swap
#include <vector> // for vector
#include "collective/aggregator.h" // for ApplyWithLabels
#include "collective/communicator-inl.h" // for Allreduce, Broadcast, GetRank, IsDistributed
#include "common/api_entry.h" // for XGBAPIThreadLocalEntry
#include "common/charconv.h" // for to_chars, to_chars_result, NumericLimits, from_...
#include "common/common.h" // for ToString, Split
#include "common/error_msg.h" // for MaxFeatureSize, WarnOldSerialization, ...
#include "common/io.h" // for PeekableInStream, ReadAll, FixedSizeStream, Mem...
#include "common/observer.h" // for TrainingObserver
#include "common/random.h" // for GlobalRandom
#include "common/timer.h" // for Monitor
#include "common/version.h" // for Version
#include "dmlc/endian.h" // for ByteSwap, DMLC_IO_NO_ENDIAN_SWAP
#include "xgboost/base.h" // for Args, bst_float, GradientPair, bst_feature_t, ...
#include "xgboost/context.h" // for Context
#include "xgboost/data.h" // for DMatrix, MetaInfo
#include "xgboost/gbm.h" // for GradientBooster
#include "xgboost/global_config.h" // for GlobalConfiguration, GlobalConfigThreadLocalStore
#include "xgboost/host_device_vector.h" // for HostDeviceVector
#include "xgboost/json.h" // for Json, get, Object, String, IsA, Array, ToJson
#include "xgboost/linalg.h" // for Tensor, TensorView
#include "xgboost/logging.h" // for CHECK, LOG, CHECK_EQ
#include "xgboost/metric.h" // for Metric
#include "xgboost/objective.h" // for ObjFunction
#include "xgboost/parameter.h" // for DECLARE_FIELD_ENUM_CLASS, XGBoostParameter
#include "xgboost/predictor.h" // for PredictionContainer, PredictionCacheEntry
#include "xgboost/string_view.h" // for operator<<, StringView
#include "xgboost/task.h" // for ObjInfo
namespace {
const char* kMaxDeltaStepDefaultValue = "0.7";
} // anonymous namespace
DECLARE_FIELD_ENUM_CLASS(xgboost::MultiStrategy);
namespace xgboost {
Learner::~Learner() = default;
namespace {
StringView ModelNotFitted() { return "Model is not yet initialized (not fitted)."; }
template <typename T>
T& UsePtr(T& ptr) { // NOLINT
CHECK(ptr);
return ptr;
}
} // anonymous namespace
/*! \brief training parameter for regression
*
* Should be deprecated, but still used for being compatible with binary IO.
* Once it's gone, `LearnerModelParam` should handle transforming `base_margin`
* with objective by itself.
*/
struct LearnerModelParamLegacy : public dmlc::Parameter<LearnerModelParamLegacy> {
/* \brief global bias */
bst_float base_score;
/* \brief number of features */
bst_feature_t num_feature;
/* \brief number of classes, if it is multi-class classification */
std::int32_t num_class;
/*! \brief Model contain additional properties */
int32_t contain_extra_attrs;
/*! \brief Model contain eval metrics */
int32_t contain_eval_metrics;
/*! \brief the version of XGBoost. */
std::uint32_t major_version;
std::uint32_t minor_version;
/**
* \brief Number of target variables.
*/
bst_target_t num_target;
/**
* \brief Whether we should calculate the base score from training data.
*
* This is a private parameter as we can't expose it as boolean due to binary model
* format. Exposing it as integer creates inconsistency with other parameters.
*
* Automatically disabled when base_score is specifed by user. int32 is used instead
* of bool for the ease of serialization.
*/
std::int32_t boost_from_average{true};
/*! \brief reserved field */
int reserved[25];
/*! \brief constructor */
LearnerModelParamLegacy() {
std::memset(this, 0, sizeof(LearnerModelParamLegacy));
base_score = ObjFunction::DefaultBaseScore();
num_target = 1;
major_version = std::get<0>(Version::Self());
minor_version = std::get<1>(Version::Self());
boost_from_average = true;
static_assert(sizeof(LearnerModelParamLegacy) == 136,
"Do not change the size of this struct, as it will break binary IO.");
}
// Skip other legacy fields.
[[nodiscard]] Json ToJson() const {
Json obj{Object{}};
char floats[NumericLimits<float>::kToCharsSize];
auto ret = to_chars(floats, floats + NumericLimits<float>::kToCharsSize, base_score);
CHECK(ret.ec == std::errc{});
obj["base_score"] = std::string{floats, static_cast<size_t>(std::distance(floats, ret.ptr))};
char integers[NumericLimits<int64_t>::kToCharsSize];
ret = to_chars(integers, integers + NumericLimits<int64_t>::kToCharsSize,
static_cast<int64_t>(num_feature));
CHECK(ret.ec == std::errc());
obj["num_feature"] =
std::string{integers, static_cast<size_t>(std::distance(integers, ret.ptr))};
ret = to_chars(integers, integers + NumericLimits<int64_t>::kToCharsSize,
static_cast<int64_t>(num_class));
CHECK(ret.ec == std::errc());
obj["num_class"] = std::string{integers, static_cast<size_t>(std::distance(integers, ret.ptr))};
ret = to_chars(integers, integers + NumericLimits<int64_t>::kToCharsSize,
static_cast<int64_t>(num_target));
obj["num_target"] =
std::string{integers, static_cast<size_t>(std::distance(integers, ret.ptr))};
ret = to_chars(integers, integers + NumericLimits<std::int64_t>::kToCharsSize,
static_cast<std::int64_t>(boost_from_average));
obj["boost_from_average"] =
std::string{integers, static_cast<std::size_t>(std::distance(integers, ret.ptr))};
return obj;
}
void FromJson(Json const& obj) {
auto const& j_param = get<Object const>(obj);
std::map<std::string, std::string> m;
m["num_feature"] = get<String const>(j_param.at("num_feature"));
m["num_class"] = get<String const>(j_param.at("num_class"));
auto n_targets_it = j_param.find("num_target");
if (n_targets_it != j_param.cend()) {
m["num_target"] = get<String const>(n_targets_it->second);
}
auto bse_it = j_param.find("boost_from_average");
if (bse_it != j_param.cend()) {
m["boost_from_average"] = get<String const>(bse_it->second);
}
this->Init(m);
std::string str = get<String const>(j_param.at("base_score"));
from_chars(str.c_str(), str.c_str() + str.size(), base_score);
}
[[nodiscard]] LearnerModelParamLegacy ByteSwap() const {
LearnerModelParamLegacy x = *this;
dmlc::ByteSwap(&x.base_score, sizeof(x.base_score), 1);
dmlc::ByteSwap(&x.num_feature, sizeof(x.num_feature), 1);
dmlc::ByteSwap(&x.num_class, sizeof(x.num_class), 1);
dmlc::ByteSwap(&x.contain_extra_attrs, sizeof(x.contain_extra_attrs), 1);
dmlc::ByteSwap(&x.contain_eval_metrics, sizeof(x.contain_eval_metrics), 1);
dmlc::ByteSwap(&x.major_version, sizeof(x.major_version), 1);
dmlc::ByteSwap(&x.minor_version, sizeof(x.minor_version), 1);
dmlc::ByteSwap(&x.num_target, sizeof(x.num_target), 1);
dmlc::ByteSwap(&x.boost_from_average, sizeof(x.boost_from_average), 1);
dmlc::ByteSwap(x.reserved, sizeof(x.reserved[0]), sizeof(x.reserved) / sizeof(x.reserved[0]));
return x;
}
template <typename Container>
Args UpdateAllowUnknown(Container const& kwargs) {
// Detect whether user has made their own base score.
auto find_key = [&kwargs](char const* key) {
return std::find_if(kwargs.cbegin(), kwargs.cend(),
[key](auto const& kv) { return kv.first == key; });
};
auto it = find_key("base_score");
if (it != kwargs.cend()) {
boost_from_average = false;
}
return dmlc::Parameter<LearnerModelParamLegacy>::UpdateAllowUnknown(kwargs);
}
// sanity check
void Validate(Context const* ctx) {
if (!collective::IsDistributed()) {
return;
}
std::array<std::int32_t, 6> data;
std::size_t pos{0};
std::memcpy(data.data() + pos, &base_score, sizeof(base_score));
pos += 1;
std::memcpy(data.data() + pos, &num_feature, sizeof(num_feature));
pos += 1;
std::memcpy(data.data() + pos, &num_class, sizeof(num_class));
pos += 1;
std::memcpy(data.data() + pos, &num_target, sizeof(num_target));
pos += 1;
std::memcpy(data.data() + pos, &major_version, sizeof(major_version));
pos += 1;
std::memcpy(data.data() + pos, &minor_version, sizeof(minor_version));
std::array<std::int32_t, 6> sync;
std::copy(data.cbegin(), data.cend(), sync.begin());
auto rc = collective::Broadcast(ctx, linalg::MakeVec(sync.data(), sync.size()), 0);
collective::SafeColl(rc);
CHECK(std::equal(data.cbegin(), data.cend(), sync.cbegin()))
<< "Different model parameter across workers.";
}
// declare parameters
DMLC_DECLARE_PARAMETER(LearnerModelParamLegacy) {
DMLC_DECLARE_FIELD(base_score)
.set_default(ObjFunction::DefaultBaseScore())
.describe("Global bias of the model.");
DMLC_DECLARE_FIELD(num_feature)
.set_default(0)
.describe(
"Number of features in training data, this parameter will be automatically detected by "
"learner.");
DMLC_DECLARE_FIELD(num_class).set_default(0).set_lower_bound(0).describe(
"Number of class option for multi-class classifier. "
" By default equals 0 and corresponds to binary classifier.");
DMLC_DECLARE_FIELD(num_target)
.set_default(1)
.set_lower_bound(1)
.describe("Number of output targets. Can be set automatically if not specified.");
DMLC_DECLARE_FIELD(boost_from_average)
.set_default(true)
.describe("Whether we should calculate the base score from training data.");
}
};
LearnerModelParam::LearnerModelParam(LearnerModelParamLegacy const& user_param, ObjInfo t,
MultiStrategy multi_strategy)
: num_feature{user_param.num_feature},
num_output_group{
std::max(static_cast<std::uint32_t>(user_param.num_class), user_param.num_target)},
task{t},
multi_strategy{multi_strategy} {
if (user_param.num_class > 1 && user_param.num_target > 1) {
LOG(FATAL) << "multi-target-multi-class is not yet supported. Output classes:"
<< user_param.num_class << ", output targets:" << user_param.num_target;
}
}
LearnerModelParam::LearnerModelParam(Context const* ctx, LearnerModelParamLegacy const& user_param,
linalg::Tensor<float, 1> base_margin, ObjInfo t,
MultiStrategy multi_strategy)
: LearnerModelParam{user_param, t, multi_strategy} {
std::swap(base_score_, base_margin);
// Make sure read access everywhere for thread-safe prediction.
std::as_const(base_score_).HostView();
if (ctx->IsCUDA()) {
std::as_const(base_score_).View(ctx->Device());
}
CHECK(std::as_const(base_score_).Data()->HostCanRead());
}
linalg::TensorView<float const, 1> LearnerModelParam::BaseScore(DeviceOrd device) const {
// multi-class is not yet supported.
CHECK_EQ(base_score_.Size(), 1) << ModelNotFitted();
if (!device.IsCUDA()) {
// Make sure that we won't run into race condition.
CHECK(base_score_.Data()->HostCanRead());
return base_score_.HostView();
}
// Make sure that we won't run into race condition.
CHECK(base_score_.Data()->DeviceCanRead());
auto v = base_score_.View(device);
CHECK(base_score_.Data()->HostCanRead()); // make sure read access is not removed.
return v;
}
linalg::TensorView<float const, 1> LearnerModelParam::BaseScore(Context const* ctx) const {
return this->BaseScore(ctx->Device());
}
void LearnerModelParam::Copy(LearnerModelParam const& that) {
base_score_.Reshape(that.base_score_.Shape());
base_score_.Data()->SetDevice(that.base_score_.Device());
base_score_.Data()->Copy(*that.base_score_.Data());
std::as_const(base_score_).HostView();
if (!that.base_score_.Device().IsCPU()) {
std::as_const(base_score_).View(that.base_score_.Device());
}
CHECK_EQ(base_score_.Data()->DeviceCanRead(), that.base_score_.Data()->DeviceCanRead());
CHECK(base_score_.Data()->HostCanRead());
num_feature = that.num_feature;
num_output_group = that.num_output_group;
task = that.task;
multi_strategy = that.multi_strategy;
}
struct LearnerTrainParam : public XGBoostParameter<LearnerTrainParam> {
// flag to disable default metric
bool disable_default_eval_metric {false};
// FIXME(trivialfis): The following parameters belong to model itself, but can be
// specified by users. Move them to model parameter once we can get rid of binary IO.
std::string booster;
std::string objective;
// This is a training parameter and is not saved (nor loaded) in the model.
MultiStrategy multi_strategy{MultiStrategy::kOneOutputPerTree};
// declare parameters
DMLC_DECLARE_PARAMETER(LearnerTrainParam) {
DMLC_DECLARE_FIELD(disable_default_eval_metric)
.set_default(false)
.describe("Flag to disable default metric. Set to >0 to disable");
DMLC_DECLARE_FIELD(booster).set_default("gbtree").describe(
"Gradient booster used for training.");
DMLC_DECLARE_FIELD(objective)
.set_default("reg:squarederror")
.describe("Objective function used for obtaining gradient.");
DMLC_DECLARE_FIELD(multi_strategy)
.add_enum("one_output_per_tree", MultiStrategy::kOneOutputPerTree)
.add_enum("multi_output_tree", MultiStrategy::kMultiOutputTree)
.set_default(MultiStrategy::kOneOutputPerTree)
.describe(
"Strategy used for training multi-target models. `multi_output_tree` means building "
"one single tree for all targets.");
}
};
DMLC_REGISTER_PARAMETER(LearnerModelParamLegacy);
DMLC_REGISTER_PARAMETER(LearnerTrainParam);
using LearnerAPIThreadLocalStore =
dmlc::ThreadLocalStore<std::map<Learner const *, XGBAPIThreadLocalEntry>>;
class LearnerConfiguration : public Learner {
private:
std::mutex config_lock_;
protected:
static std::string const kEvalMetric; // NOLINT
protected:
std::atomic<bool> need_configuration_;
std::map<std::string, std::string> cfg_;
// Stores information like best-iteration for early stopping.
std::map<std::string, std::string> attributes_;
// Name of each feature, usually set from DMatrix.
std::vector<std::string> feature_names_;
// Type of each feature, usually set from DMatrix.
std::vector<std::string> feature_types_;
common::Monitor monitor_;
LearnerModelParamLegacy mparam_;
LearnerModelParam learner_model_param_;
LearnerTrainParam tparam_;
// Initial prediction.
PredictionContainer prediction_container_;
std::vector<std::string> metric_names_;
void ConfigureModelParamWithoutBaseScore() {
// Convert mparam to learner_model_param
this->ConfigureTargets();
auto task = UsePtr(obj_)->Task();
linalg::Tensor<float, 1> base_score({1}, Ctx()->Device());
auto h_base_score = base_score.HostView();
// transform to margin
h_base_score(0) = obj_->ProbToMargin(mparam_.base_score);
CHECK(tparam_.GetInitialised());
// move it to model param, which is shared with all other components.
learner_model_param_ =
LearnerModelParam(Ctx(), mparam_, std::move(base_score), task, tparam_.multi_strategy);
CHECK(learner_model_param_.Initialized());
CHECK_NE(learner_model_param_.BaseScore(Ctx()).Size(), 0);
}
/**
* \brief Calculate the `base_score` based on input data.
*
* \param p_fmat The training DMatrix used to estimate the base score.
*/
void InitBaseScore(DMatrix const* p_fmat) {
// Before 1.0.0, we save `base_score` into binary as a transformed value by objective.
// After 1.0.0 we save the value provided by user and keep it immutable instead. To
// keep the stability, we initialize it in binary LoadModel instead of configuration.
// Under what condition should we omit the transformation:
//
// - base_score is loaded from old binary model.
//
// What are the other possible conditions:
//
// - model loaded from new binary or JSON.
// - model is created from scratch.
// - model is configured second time due to change of parameter
if (!learner_model_param_.Initialized()) {
this->ConfigureModelParamWithoutBaseScore();
}
if (mparam_.boost_from_average && !UsePtr(gbm_)->ModelFitted()) {
if (p_fmat) {
auto const& info = p_fmat->Info();
info.Validate(Ctx()->Device());
// We estimate it from input data.
linalg::Tensor<float, 1> base_score;
InitEstimation(info, &base_score);
CHECK_EQ(base_score.Size(), 1);
mparam_.base_score = base_score(0);
CHECK(!std::isnan(mparam_.base_score));
}
// Update the shared model parameter
this->ConfigureModelParamWithoutBaseScore();
mparam_.Validate(&ctx_);
}
CHECK(!std::isnan(mparam_.base_score));
CHECK(!std::isinf(mparam_.base_score));
}
public:
explicit LearnerConfiguration(std::vector<std::shared_ptr<DMatrix>> cache)
: need_configuration_{true} {
monitor_.Init("Learner");
for (std::shared_ptr<DMatrix> const& d : cache) {
if (d) {
prediction_container_.Cache(d, DeviceOrd::CPU());
}
}
}
// Configuration before data is known.
void Configure() override {
// Varient of double checked lock
if (!this->need_configuration_) {
return;
}
std::lock_guard<std::mutex> guard(config_lock_);
if (!this->need_configuration_) {
return;
}
monitor_.Start("Configure");
auto old_tparam = tparam_;
Args args = {cfg_.cbegin(), cfg_.cend()};
tparam_.UpdateAllowUnknown(args);
mparam_.UpdateAllowUnknown(args);
auto initialized = ctx_.GetInitialised();
auto old_seed = ctx_.seed;
ctx_.UpdateAllowUnknown(args);
ConsoleLogger::Configure(args);
// set seed only before the model is initialized
if (!initialized || ctx_.seed != old_seed) {
common::GlobalRandom().seed(ctx_.seed);
}
// must precede configure gbm since num_features is required for gbm
this->ConfigureNumFeatures();
args = {cfg_.cbegin(), cfg_.cend()}; // renew
this->ConfigureObjective(old_tparam, &args);
learner_model_param_.task = obj_->Task(); // required by gbm configuration.
this->ConfigureGBM(old_tparam, args);
ctx_.ConfigureGpuId(this->gbm_->UseGPU());
this->ConfigureModelParamWithoutBaseScore();
this->ConfigureMetrics(args);
this->need_configuration_ = false;
if (ctx_.validate_parameters) {
this->ValidateParameters();
}
cfg_.clear();
monitor_.Stop("Configure");
}
void CheckModelInitialized() const {
CHECK(learner_model_param_.Initialized()) << ModelNotFitted();
CHECK_NE(learner_model_param_.BaseScore(this->Ctx()).Size(), 0) << ModelNotFitted();
}
void LoadConfig(Json const& in) override {
// If configuration is loaded, ensure that the model came from the same version
CHECK(IsA<Object>(in));
auto origin_version = Version::Load(in);
if (std::get<0>(Version::kInvalid) == std::get<0>(origin_version)) {
LOG(WARNING) << "Invalid version string in config";
}
if (!Version::Same(origin_version)) {
error::WarnOldSerialization();
return; // skip configuration if version is not matched
}
auto const& learner_parameters = get<Object>(in["learner"]);
FromJson(learner_parameters.at("learner_train_param"), &tparam_);
auto const& gradient_booster = learner_parameters.at("gradient_booster");
auto const& objective_fn = learner_parameters.at("objective");
if (!obj_) {
CHECK_EQ(get<String const>(objective_fn["name"]), tparam_.objective);
obj_.reset(ObjFunction::Create(tparam_.objective, &ctx_));
}
obj_->LoadConfig(objective_fn);
learner_model_param_.task = obj_->Task();
tparam_.booster = get<String>(gradient_booster["name"]);
if (!gbm_) {
gbm_.reset(GradientBooster::Create(tparam_.booster, &ctx_, &learner_model_param_));
}
gbm_->LoadConfig(gradient_booster);
auto const& j_metrics = learner_parameters.at("metrics");
auto n_metrics = get<Array const>(j_metrics).size();
metric_names_.resize(n_metrics);
metrics_.resize(n_metrics);
for (size_t i = 0; i < n_metrics; ++i) {
auto old_serialization = IsA<String>(j_metrics[i]);
if (old_serialization) {
error::WarnOldSerialization();
metric_names_[i] = get<String>(j_metrics[i]);
} else {
metric_names_[i] = get<String>(j_metrics[i]["name"]);
}
metrics_[i] = std::unique_ptr<Metric>(Metric::Create(metric_names_[i], &ctx_));
if (!old_serialization) {
metrics_[i]->LoadConfig(j_metrics[i]);
}
}
FromJson(learner_parameters.at("generic_param"), &ctx_);
// make sure the GPU ID is valid in new environment before start running configure.
ctx_.ConfigureGpuId(false);
this->need_configuration_ = true;
}
void SaveConfig(Json* p_out) const override {
CHECK(!this->need_configuration_) << "Call Configure before saving model.";
Version::Save(p_out);
Json& out { *p_out };
// parameters
out["learner"] = Object();
auto& learner_parameters = out["learner"];
learner_parameters["learner_train_param"] = ToJson(tparam_);
learner_parameters["learner_model_param"] = mparam_.ToJson();
learner_parameters["gradient_booster"] = Object();
auto& gradient_booster = learner_parameters["gradient_booster"];
gbm_->SaveConfig(&gradient_booster);
learner_parameters["objective"] = Object();
auto& objective_fn = learner_parameters["objective"];
obj_->SaveConfig(&objective_fn);
std::vector<Json> metrics(metrics_.size());
for (size_t i = 0; i < metrics_.size(); ++i) {
metrics[i] = Object{};
metrics_[i]->SaveConfig(&metrics[i]);
}
learner_parameters["metrics"] = Array(std::move(metrics));
learner_parameters["generic_param"] = ToJson(ctx_);
}
void SetParam(const std::string& key, const std::string& value) override {
this->need_configuration_ = true;
if (key == kEvalMetric) {
if (std::find(metric_names_.cbegin(), metric_names_.cend(),
value) == metric_names_.cend()) {
metric_names_.emplace_back(value);
}
} else {
cfg_[key] = value;
}
}
// Short hand for setting multiple parameters
void SetParams(std::vector<std::pair<std::string, std::string>> const& args) override {
for (auto const& kv : args) {
this->SetParam(kv.first, kv.second);
}
}
uint32_t GetNumFeature() const override {
return learner_model_param_.num_feature;
}
void SetAttr(const std::string& key, const std::string& value) override {
attributes_[key] = value;
mparam_.contain_extra_attrs = 1;
}
bool GetAttr(const std::string& key, std::string* out) const override {
auto it = attributes_.find(key);
if (it == attributes_.end()) return false;
*out = it->second;
return true;
}
bool DelAttr(const std::string& key) override {
auto it = attributes_.find(key);
if (it == attributes_.end()) { return false; }
attributes_.erase(it);
return true;
}
void SetFeatureNames(std::vector<std::string> const& fn) override {
feature_names_ = fn;
}
void GetFeatureNames(std::vector<std::string>* fn) const override {
*fn = feature_names_;
}
void SetFeatureTypes(std::vector<std::string> const& ft) override {
this->feature_types_ = ft;
}
void GetFeatureTypes(std::vector<std::string>* p_ft) const override {
auto& ft = *p_ft;
ft = this->feature_types_;
}
std::vector<std::string> GetAttrNames() const override {
std::vector<std::string> out;
for (auto const& kv : attributes_) {
out.emplace_back(kv.first);
}
return out;
}
const std::map<std::string, std::string>& GetConfigurationArguments() const override {
return cfg_;
}
Context const* Ctx() const override { return &ctx_; }
private:
void ValidateParameters() {
Json config { Object() };
this->SaveConfig(&config);
std::stack<Json> stack;
stack.push(config);
std::string const postfix{"_param"};
auto is_parameter = [&postfix](std::string const &key) {
return key.size() > postfix.size() &&
std::equal(postfix.rbegin(), postfix.rend(), key.rbegin());
};
// Extract all parameters
std::vector<std::string> keys;
// First global parameters
Json const global_config{ToJson(*GlobalConfigThreadLocalStore::Get())};
for (auto const& items : get<Object const>(global_config)) {
keys.emplace_back(items.first);
}
// Parameters in various xgboost components.
while (!stack.empty()) {
auto j_obj = stack.top();
stack.pop();
auto const &obj = get<Object const>(j_obj);
for (auto const& kv : obj) {
if (is_parameter(kv.first)) {
auto parameter = get<Object const>(kv.second);
std::transform(
parameter.begin(), parameter.end(), std::back_inserter(keys),
[](std::pair<std::string const&, Json const&> const& kv) { return kv.first; });
} else if (IsA<Object>(kv.second)) {
stack.push(kv.second);
} else if (IsA<Array>(kv.second)) {
auto const& array = get<Array const>(kv.second);
for (auto const& v : array) {
if (IsA<Object>(v) || IsA<Array>(v)) {
stack.push(v);
}
}
}
}
}
// FIXME(trivialfis): Make eval_metric a training parameter.
keys.emplace_back(kEvalMetric);
keys.emplace_back("num_output_group");
keys.emplace_back("gpu_id"); // deprecated param.
std::sort(keys.begin(), keys.end());
std::vector<std::string> provided;
for (auto const &kv : cfg_) {
if (std::any_of(kv.first.cbegin(), kv.first.cend(),
[](char ch) { return std::isspace(ch); })) {
LOG(FATAL) << "Invalid parameter \"" << kv.first << "\" contains whitespace.";
}
provided.push_back(kv.first);
}
std::sort(provided.begin(), provided.end());
std::vector<std::string> diff;
std::set_difference(provided.begin(), provided.end(), keys.begin(),
keys.end(), std::back_inserter(diff));
if (diff.size() != 0) {
std::stringstream ss;
ss << "\nParameters: { ";
for (size_t i = 0; i < diff.size() - 1; ++i) {
ss << "\"" << diff[i] << "\", ";
}
ss << "\"" << diff.back() << "\"";
ss << R"W( } are not used.
)W";
LOG(WARNING) << ss.str();
}
}
void ConfigureNumFeatures() {
// Compute number of global features if parameter not already set
if (mparam_.num_feature == 0) {
// TODO(hcho3): Change num_feature to 64-bit integer
unsigned num_feature = 0;
for (auto const& matrix : prediction_container_.Container()) {
CHECK(matrix.first.ptr);
CHECK(!matrix.second.ref.expired());
const uint64_t num_col = matrix.first.ptr->Info().num_col_;
error::MaxFeatureSize(num_col);
num_feature = std::max(num_feature, static_cast<uint32_t>(num_col));
}
auto rc =
collective::Allreduce(&ctx_, linalg::MakeVec(&num_feature, 1), collective::Op::kMax);
collective::SafeColl(rc);
if (num_feature > mparam_.num_feature) {
mparam_.num_feature = num_feature;
}
}
CHECK_NE(mparam_.num_feature, 0)
<< "0 feature is supplied. Are you using raw Booster interface?";
}
void ConfigureGBM(LearnerTrainParam const& old, Args const& args) {
if (gbm_ == nullptr || old.booster != tparam_.booster) {
gbm_.reset(GradientBooster::Create(tparam_.booster, &ctx_,
&learner_model_param_));
}
gbm_->Configure(args);
}
void ConfigureObjective(LearnerTrainParam const& old, Args* p_args) {
// Once binary IO is gone, NONE of these config is useful.
if (cfg_.find("num_class") != cfg_.cend() && cfg_.at("num_class") != "0" &&
tparam_.objective != "multi:softprob") {
cfg_["num_output_group"] = cfg_["num_class"];
if (atoi(cfg_["num_class"].c_str()) > 1 && cfg_.count("objective") == 0) {
tparam_.objective = "multi:softmax";
}
}
if (cfg_.find("max_delta_step") == cfg_.cend() &&
cfg_.find("objective") != cfg_.cend() &&
tparam_.objective == "count:poisson") {
// max_delta_step is a duplicated parameter in Poisson regression and tree param.
// Rename one of them once binary IO is gone.
cfg_["max_delta_step"] = kMaxDeltaStepDefaultValue;
}
if (obj_ == nullptr || tparam_.objective != old.objective) {
obj_.reset(ObjFunction::Create(tparam_.objective, &ctx_));
}
bool has_nc {cfg_.find("num_class") != cfg_.cend()};
// Inject num_class into configuration.
// FIXME(jiamingy): Remove the duplicated parameter in softmax
cfg_["num_class"] = std::to_string(mparam_.num_class);
auto& args = *p_args;
args = {cfg_.cbegin(), cfg_.cend()}; // renew
obj_->Configure(args);
if (!has_nc) {
cfg_.erase("num_class");
}
}
void ConfigureMetrics(Args const& args) {
for (auto const& name : metric_names_) {
auto DupCheck = [&name](std::unique_ptr<Metric> const& m) { return m->Name() != name; };
if (std::all_of(metrics_.begin(), metrics_.end(), DupCheck)) {
metrics_.emplace_back(std::unique_ptr<Metric>(Metric::Create(name, &ctx_)));
mparam_.contain_eval_metrics = 1;
}
}
for (auto& p_metric : metrics_) {
p_metric->Configure(args);
}
}
/**
* Get number of targets from objective function.
*/
void ConfigureTargets() {
CHECK(this->obj_);
auto const& cache = prediction_container_.Container();
bst_target_t n_targets = 1;
for (auto const& d : cache) {
if (n_targets == 1) {
n_targets = this->obj_->Targets(d.first.ptr->Info());
} else {
auto t = this->obj_->Targets(d.first.ptr->Info());
CHECK(n_targets == t || 1 == t) << "Inconsistent labels.";
}
}
if (mparam_.num_target > 1) {
CHECK(n_targets == 1 || n_targets == mparam_.num_target)
<< "Inconsistent configuration of num_target. Configuration result from input data:"
<< n_targets << ", configuration from parameter:" << mparam_.num_target;
} else {
mparam_.num_target = n_targets;
}
}
void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_score) {
base_score->Reshape(1);
collective::ApplyWithLabels(this->Ctx(), info, base_score->Data(),
[&] { UsePtr(obj_)->InitEstimation(info, base_score); });
}
};
std::string const LearnerConfiguration::kEvalMetric {"eval_metric"}; // NOLINT
class LearnerIO : public LearnerConfiguration {
private:
// Used to identify the offset of JSON string when
// Will be removed once JSON takes over. Right now we still loads some RDS files from R.
std::string const serialisation_header_ { u8"CONFIG-offset:" };
void ClearCaches() { this->prediction_container_ = PredictionContainer{}; }
public:
explicit LearnerIO(std::vector<std::shared_ptr<DMatrix>> cache) : LearnerConfiguration{cache} {}
void LoadModel(Json const& in) override {
CHECK(IsA<Object>(in));
auto version = Version::Load(in);
if (std::get<0>(version) == 1 && std::get<1>(version) < 6) {
LOG(WARNING)
<< "Found JSON model saved before XGBoost 1.6, please save the model using current "
"version again. The support for old JSON model will be discontinued in XGBoost 2.3.";
}
auto const& learner = get<Object>(in["learner"]);
mparam_.FromJson(learner.at("learner_model_param"));
auto const& objective_fn = learner.at("objective");
std::string name = get<String>(objective_fn["name"]);
tparam_.UpdateAllowUnknown(Args{{"objective", name}});
obj_.reset(ObjFunction::Create(name, &ctx_));
obj_->LoadConfig(objective_fn);
auto const& gradient_booster = learner.at("gradient_booster");
name = get<String>(gradient_booster["name"]);
tparam_.UpdateAllowUnknown(Args{{"booster", name}});
gbm_.reset(
GradientBooster::Create(tparam_.booster, &ctx_, &learner_model_param_));
gbm_->LoadModel(gradient_booster);
auto const& j_attributes = get<Object const>(learner.at("attributes"));
attributes_.clear();
for (auto const& kv : j_attributes) {
attributes_[kv.first] = get<String const>(kv.second);
}
// feature names and types are saved in xgboost 1.4
auto it = learner.find("feature_names");
if (it != learner.cend()) {
auto const& feature_names = get<Array const>(it->second);
feature_names_.resize(feature_names.size());
std::transform(feature_names.cbegin(), feature_names.cend(), feature_names_.begin(),
[](Json const& fn) { return get<String const>(fn); });
}
it = learner.find("feature_types");
if (it != learner.cend()) {
auto const& feature_types = get<Array const>(it->second);
feature_types_.resize(feature_types.size());
std::transform(feature_types.cbegin(), feature_types.cend(), feature_types_.begin(),
[](Json const& fn) { return get<String const>(fn); });
}
this->need_configuration_ = true;
this->ClearCaches();
}
void SaveModel(Json* p_out) const override {
CHECK(!this->need_configuration_) << "Call Configure before saving model.";
this->CheckModelInitialized();
Version::Save(p_out);
Json& out { *p_out };
out["learner"] = Object();
auto& learner = out["learner"];
learner["learner_model_param"] = mparam_.ToJson();
learner["gradient_booster"] = Object();
auto& gradient_booster = learner["gradient_booster"];
gbm_->SaveModel(&gradient_booster);
learner["objective"] = Object();
auto& objective_fn = learner["objective"];
obj_->SaveConfig(&objective_fn);
learner["attributes"] = Object();
for (auto const& kv : attributes_) {
learner["attributes"][kv.first] = String(kv.second);
}
learner["feature_names"] = Array();
auto& feature_names = get<Array>(learner["feature_names"]);
for (auto const& name : feature_names_) {
feature_names.emplace_back(name);
}
learner["feature_types"] = Array();
auto& feature_types = get<Array>(learner["feature_types"]);
for (auto const& type : feature_types_) {
feature_types.emplace_back(type);
}
}
// About to be deprecated by JSON format
void LoadModel(dmlc::Stream* fi) override {
ctx_.UpdateAllowUnknown(Args{});
tparam_.Init(std::vector<std::pair<std::string, std::string>>{});
// TODO(tqchen) mark deprecation of old format.
common::PeekableInStream fp(fi);
// backward compatible header check.
std::string header;
header.resize(4);
if (fp.PeekRead(&header[0], 4) == 4) {
CHECK_NE(header, "bs64")
<< "Base64 format is no longer supported in brick.";
if (header == "binf") {
CHECK_EQ(fp.Read(&header[0], 4), 4U);
}
}
// FIXME(jiamingy): Move this out of learner after the old binary model is remove.
auto first_non_space = [&](std::string::const_iterator beg, std::string::const_iterator end) {
for (auto i = beg; i != end; ++i) {
if (!std::isspace(*i)) {
return i;
}
}
return end;
};
if (header[0] == '{') { // Dispatch to JSON
auto buffer = common::ReadAll(fi, &fp);
Json model;
auto it = first_non_space(buffer.cbegin() + 1, buffer.cend());
if (it != buffer.cend() && *it == '"') {
model = Json::Load(StringView{buffer});
} else if (it != buffer.cend() && std::isalpha(*it)) {
model = Json::Load(StringView{buffer}, std::ios::binary);
} else {
LOG(FATAL) << "Invalid model format";
}
this->LoadModel(model);
return;
}
// use the peekable reader.
fi = &fp;