Skip to content

Revert "fp32 fix for objectives calculations" #73

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 7 additions & 18 deletions plugin/sycl/common/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,24 +20,13 @@ void LaunchSyclKernel(DeviceOrd device, Functor&& _func, xgboost::common::Range
auto* qu = device_manager.GetQueue(device);

size_t size = *(_range.end());
const bool has_fp64_support = qu->get_device().has(::sycl::aspect::fp64);
if (has_fp64_support) {
qu->submit([&](::sycl::handler& cgh) {
cgh.parallel_for<>(::sycl::range<1>(size),
[=](::sycl::id<1> pid) {
const size_t idx = pid[0];
const_cast<Functor&&>(_func)(idx, std::true_type(), _spans...);
});
}).wait();
} else {
qu->submit([&](::sycl::handler& cgh) {
cgh.parallel_for<>(::sycl::range<1>(size),
[=](::sycl::id<1> pid) {
const size_t idx = pid[0];
const_cast<Functor&&>(_func)(idx, std::false_type(), _spans...);
});
}).wait();
}
qu->submit([&](::sycl::handler& cgh) {
cgh.parallel_for<>(::sycl::range<1>(size),
[=](::sycl::id<1> pid) {
const size_t idx = pid[0];
const_cast<Functor&&>(_func)(idx, _spans...);
});
}).wait();
}

} // namespace common
Expand Down
5 changes: 2 additions & 3 deletions src/common/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ template <typename Functor, typename... SpanType>
__global__ void LaunchCUDAKernel(Functor _func, Range _range,
SpanType... _spans) {
for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) {
_func(i, std::true_type(), _spans...);
_func(i, _spans...);
}
}
#endif // defined(__CUDACC__)
Expand Down Expand Up @@ -184,8 +184,7 @@ class Transform {
void LaunchCPU(Functor func, HDV *...vectors) const {
omp_ulong end = static_cast<omp_ulong>(*(range_.end()));
SyncHost(vectors...);
ParallelFor(end, n_threads_, [&](omp_ulong idx) { func(idx, std::true_type(),
UnpackHDV(vectors)...); });
ParallelFor(end, n_threads_, [&](omp_ulong idx) { func(idx, UnpackHDV(vectors)...); });
}

private:
Expand Down
4 changes: 2 additions & 2 deletions src/objective/aft_obj.cu
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class AFTObj : public ObjFunction {
linalg::Matrix<GradientPair>* out_gpair, size_t ndata, DeviceOrd device,
bool is_null_weight, float aft_loss_distribution_scale) {
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx, auto has_fp64_support,
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels_lower_bound,
Expand Down Expand Up @@ -104,7 +104,7 @@ class AFTObj : public ObjFunction {
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
// Trees give us a prediction in log scale, so exponentiate
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, auto has_fp64_support, common::Span<bst_float> _preds) {
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = exp(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
Expand Down
2 changes: 1 addition & 1 deletion src/objective/hinge.cu
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ class HingeObj : public FitIntercept {

void PredTransform(HostDeviceVector<float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(std::size_t _idx, auto has_fp64_support, common::Span<float> _preds) {
[] XGBOOST_DEVICE(std::size_t _idx, common::Span<float> _preds) {
_preds[_idx] = _preds[_idx] > 0.0 ? 1.0 : 0.0;
},
common::Range{0, static_cast<int64_t>(io_preds->Size()), 1}, this->ctx_->Threads(),
Expand Down
25 changes: 8 additions & 17 deletions src/objective/multiclass_obj.cu
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
}

common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t idx, auto has_fp64_support,
[=] XGBOOST_DEVICE(size_t idx,
common::Span<GradientPair> gpair,
common::Span<bst_float const> labels,
common::Span<bst_float const> preds,
Expand All @@ -86,16 +86,8 @@ class SoftmaxMultiClassObj : public ObjFunction {
// Part of Softmax function
bst_float wmax = std::numeric_limits<bst_float>::min();
for (auto const i : point) { wmax = fmaxf(i, wmax); }

float wsum = 0.0f;
if constexpr (has_fp64_support) {
double wsum_fp64 = 0;
for (auto const i : point) { wsum_fp64 += expf(i - wmax); }
wsum = static_cast<float>(wsum_fp64);
} else {
for (auto const i : point) { wsum += expf(i - wmax); }
}

double wsum = 0.0f;
for (auto const i : point) { wsum += expf(i - wmax); }
auto label = labels[idx];
if (label < 0 || label >= nclass) {
_label_correct[0] = 0;
Expand All @@ -104,11 +96,11 @@ class SoftmaxMultiClassObj : public ObjFunction {
bst_float wt = is_null_weight ? 1.0f : weights[idx];
for (int k = 0; k < nclass; ++k) {
// Computation duplicated to avoid creating a cache.
bst_float p = expf(point[k] - wmax) / wsum;
bst_float p = expf(point[k] - wmax) / static_cast<float>(wsum);
const float eps = 1e-16f;
const bst_float h = 2.0f * p * (1.0f - p) * wt;
const bst_float h = fmax(2.0f * p * (1.0f - p) * wt, eps);
p = label == k ? p - 1.0f : p;
gpair[idx * nclass + k] = GradientPair(p * wt, h < eps ? eps : h);
gpair[idx * nclass + k] = GradientPair(p * wt, h);
}
}, common::Range{0, ndata}, ctx_->Threads(), device)
.Eval(out_gpair->Data(), info.labels.Data(), &preds, &info.weights_, &label_correct_);
Expand Down Expand Up @@ -137,7 +129,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
auto device = io_preds->Device();
if (prob) {
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx, auto has_fp64_support, common::Span<bst_float> _preds) {
[=] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
common::Span<bst_float> point =
_preds.subspan(_idx * nclass, nclass);
common::Softmax(point.begin(), point.end());
Expand All @@ -150,8 +142,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
max_preds.SetDevice(device);
max_preds.Resize(ndata);
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx, auto has_fp64_support,
common::Span<const bst_float> _preds,
[=] XGBOOST_DEVICE(size_t _idx, common::Span<const bst_float> _preds,
common::Span<bst_float> _max_preds) {
common::Span<const bst_float> point =
_preds.subspan(_idx * nclass, nclass);
Expand Down
13 changes: 6 additions & 7 deletions src/objective/regression_obj.cu
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,7 @@ class RegLossObj : public FitInterceptGlmLike {

common::Transform<>::Init(
[block_size, ndata, n_targets] XGBOOST_DEVICE(
size_t data_block_idx, auto has_fp64_support,
common::Span<float> _additional_input,
size_t data_block_idx, common::Span<float> _additional_input,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
Expand Down Expand Up @@ -180,7 +179,7 @@ class RegLossObj : public FitInterceptGlmLike {

void PredTransform(HostDeviceVector<float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, auto has_fp64_support, common::Span<float> _preds) {
[] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) {
_preds[_idx] = Loss::PredTransform(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
Expand Down Expand Up @@ -361,7 +360,7 @@ class PoissonRegression : public FitInterceptGlmLike {
}
bst_float max_delta_step = param_.max_delta_step;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx, auto has_fp64_support,
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
Expand All @@ -388,7 +387,7 @@ class PoissonRegression : public FitInterceptGlmLike {
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, auto has_fp64_support, common::Span<bst_float> _preds) {
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
Expand Down Expand Up @@ -567,7 +566,7 @@ class TweedieRegression : public FitInterceptGlmLike {

const float rho = param_.tweedie_variance_power;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx, auto has_fp64_support,
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
Expand Down Expand Up @@ -598,7 +597,7 @@ class TweedieRegression : public FitInterceptGlmLike {
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, auto has_fp64_support, common::Span<bst_float> _preds) {
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
Expand Down
3 changes: 1 addition & 2 deletions src/tree/split_evaluator.h
Original file line number Diff line number Diff line change
Expand Up @@ -180,8 +180,7 @@ class TreeEvaluator {
}

common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t, auto has_fp64_support,
common::Span<float> lower,
[=] XGBOOST_DEVICE(size_t, common::Span<float> lower,
common::Span<float> upper,
common::Span<int> monotone) {
lower[leftid] = lower[nodeid];
Expand Down
5 changes: 2 additions & 3 deletions tests/cpp/common/test_transform_range.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@ constexpr DeviceOrd TransformDevice() {

template <typename T>
struct TestTransformRange {
template <class kBoolConst>
void XGBOOST_DEVICE operator()(std::size_t _idx, kBoolConst has_fp64_support, Span<float> _out, Span<const float> _in) {
void XGBOOST_DEVICE operator()(std::size_t _idx, Span<float> _out, Span<const float> _in) {
_out[_idx] = _in[_idx];
}
};
Expand Down Expand Up @@ -60,7 +59,7 @@ TEST(TransformDeathTest, Exception) {
const HostDeviceVector<float> in_vec{h_in, DeviceOrd::CPU()};
EXPECT_DEATH(
{
Transform<>::Init([](size_t idx, auto has_fp64_support, common::Span<float const> _in) { _in[idx + 1]; },
Transform<>::Init([](size_t idx, common::Span<float const> _in) { _in[idx + 1]; },
Range(0, static_cast<Range::DifferenceType>(kSize)), AllThreadsForTest(),
DeviceOrd::CPU())
.Eval(&in_vec);
Expand Down
3 changes: 1 addition & 2 deletions tests/cpp/plugin/test_sycl_transform_range.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ namespace xgboost::common {

template <typename T>
struct TestTransformRange {
template <class kBoolConst>
void operator()(std::size_t _idx, kBoolConst has_fp64_support, Span<float> _out, Span<const float> _in) {
void operator()(std::size_t _idx, Span<float> _out, Span<const float> _in) {
_out[_idx] = _in[_idx];
}
};
Expand Down
Loading