Skip to content

Commit

Permalink
Unify test helpers for creating ctx. (#9274)
Browse files Browse the repository at this point in the history
  • Loading branch information
trivialfis authored Jun 9, 2023
1 parent ea0deec commit 152e2fb
Show file tree
Hide file tree
Showing 36 changed files with 161 additions and 169 deletions.
4 changes: 2 additions & 2 deletions tests/cpp/common/test_algorithm.cu
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

#include "../../../src/common/algorithm.cuh"
#include "../../../src/common/device_helpers.cuh"
#include "../helpers.h" // CreateEmptyGenericParam
#include "../helpers.h" // MakeCUDACtx

namespace xgboost {
namespace common {
Expand Down Expand Up @@ -83,7 +83,7 @@ TEST(Algorithm, GpuArgSort) {
TEST(Algorithm, SegmentedSequence) {
dh::device_vector<std::size_t> idx(16);
dh::device_vector<std::size_t> ptr(3);
Context ctx = CreateEmptyGenericParam(0);
Context ctx = MakeCUDACtx(0);
ptr[0] = 0;
ptr[1] = 4;
ptr[2] = idx.size();
Expand Down
6 changes: 3 additions & 3 deletions tests/cpp/common/test_column_matrix.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ TEST(DenseColumn, Test) {
int32_t max_num_bins[] = {static_cast<int32_t>(std::numeric_limits<uint8_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 2};
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
BinTypeSize last{kUint8BinsTypeSize};
for (int32_t max_num_bin : max_num_bins) {
auto dmat = RandomDataGenerator(100, 10, 0.0).GenerateDMatrix();
Expand Down Expand Up @@ -63,7 +63,7 @@ TEST(SparseColumn, Test) {
int32_t max_num_bins[] = {static_cast<int32_t>(std::numeric_limits<uint8_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 2};
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (int32_t max_num_bin : max_num_bins) {
auto dmat = RandomDataGenerator(100, 1, 0.85).GenerateDMatrix();
GHistIndexMatrix gmat{&ctx, dmat.get(), max_num_bin, 0.5f, false};
Expand Down Expand Up @@ -92,7 +92,7 @@ TEST(DenseColumnWithMissing, Test) {
int32_t max_num_bins[] = {static_cast<int32_t>(std::numeric_limits<uint8_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 2};
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (int32_t max_num_bin : max_num_bins) {
auto dmat = RandomDataGenerator(100, 1, 0.5).GenerateDMatrix();
GHistIndexMatrix gmat(&ctx, dmat.get(), max_num_bin, 0.2, false);
Expand Down
52 changes: 26 additions & 26 deletions tests/cpp/common/test_hist_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -156,28 +156,28 @@ TEST(CutsBuilder, SearchGroupInd) {
}

TEST(HistUtil, DenseCutsCategorical) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
std::vector<float> x_sorted(x);
std::sort(x_sorted.begin(), x_sorted.end());
auto dmat = GetDMatrixFromData(x, n, 1);
HistogramCuts cuts = SketchOnDMatrix(&ctx, dmat.get(), num_bins);
auto cuts_from_sketch = cuts.Values();
EXPECT_LT(cuts.MinValues()[0], x_sorted.front());
EXPECT_GT(cuts_from_sketch.front(), x_sorted.front());
EXPECT_GE(cuts_from_sketch.back(), x_sorted.back());
EXPECT_EQ(cuts_from_sketch.size(), static_cast<size_t>(num_categories));
}
}
Context ctx;
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
std::vector<float> x_sorted(x);
std::sort(x_sorted.begin(), x_sorted.end());
auto dmat = GetDMatrixFromData(x, n, 1);
HistogramCuts cuts = SketchOnDMatrix(&ctx, dmat.get(), num_bins);
auto cuts_from_sketch = cuts.Values();
EXPECT_LT(cuts.MinValues()[0], x_sorted.front());
EXPECT_GT(cuts_from_sketch.front(), x_sorted.front());
EXPECT_GE(cuts_from_sketch.back(), x_sorted.back());
EXPECT_EQ(cuts_from_sketch.size(), static_cast<size_t>(num_categories));
}
}
}

TEST(HistUtil, DenseCutsAccuracyTest) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100};
int num_columns = 5;
Expand All @@ -195,7 +195,7 @@ TEST(HistUtil, DenseCutsAccuracyTestWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
Expand All @@ -218,7 +218,7 @@ void TestQuantileWithHessian(bool use_sorted) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {1000, 1500};
int num_columns = 5;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
Expand Down Expand Up @@ -257,7 +257,7 @@ TEST(HistUtil, DenseCutsExternalMemory) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
dmlc::TemporaryDirectory tmpdir;
Expand All @@ -278,7 +278,7 @@ TEST(HistUtil, IndexBinBound) {
kUint32BinsTypeSize};
size_t constexpr kRows = 100;
size_t constexpr kCols = 10;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
size_t bin_id = 0;
for (auto max_bin : bin_sizes) {
auto p_fmat = RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();
Expand All @@ -303,7 +303,7 @@ TEST(HistUtil, IndexBinData) {
static_cast<uint64_t>(std::numeric_limits<uint16_t>::max()) + 2 };
size_t constexpr kRows = 100;
size_t constexpr kCols = 10;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;

for (auto max_bin : kBinSizes) {
auto p_fmat = RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();
Expand Down Expand Up @@ -331,7 +331,7 @@ void TestSketchFromWeights(bool with_group) {
size_t constexpr kRows = 300, kCols = 20, kBins = 256;
size_t constexpr kGroups = 10;
auto m = RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateDMatrix();
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
common::HistogramCuts cuts = SketchOnDMatrix(&ctx, m.get(), kBins);

MetaInfo info;
Expand Down Expand Up @@ -397,7 +397,7 @@ TEST(HistUtil, SketchFromWeights) {
}

TEST(HistUtil, SketchCategoricalFeatures) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
TestCategoricalSketch(1000, 256, 32, false, [&ctx](DMatrix* p_fmat, int32_t num_bins) {
return SketchOnDMatrix(&ctx, p_fmat, num_bins);
});
Expand Down
2 changes: 1 addition & 1 deletion tests/cpp/common/test_hist_util.cu
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ TEST(HistUtil, AdapterDeviceSketch) {
data::CupyAdapter adapter(str);

auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing);
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
auto host_cuts = GetHostCuts(&ctx, &adapter, num_bins, missing);

EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
Expand Down
2 changes: 1 addition & 1 deletion tests/cpp/common/test_quantile.cc
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ namespace {
void TestSameOnAllWorkers() {
auto const world = collective::GetWorldSize();
constexpr size_t kRows = 1000, kCols = 100;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;

RunWithSeedsAndBins(
kRows, [=, &ctx](int32_t seed, size_t n_bins, MetaInfo const&) {
Expand Down
6 changes: 3 additions & 3 deletions tests/cpp/data/test_gradient_index.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,15 @@
#include "../../../src/data/adapter.h" // for SparsePageAdapterBatch
#include "../../../src/data/gradient_index.h" // for GHistIndexMatrix
#include "../../../src/tree/param.h" // for TrainParam
#include "../helpers.h" // for CreateEmptyGenericParam, GenerateRandomCa...
#include "../helpers.h" // for GenerateRandomCategoricalSingleColumn...
#include "xgboost/base.h" // for bst_bin_t
#include "xgboost/context.h" // for Context
#include "xgboost/host_device_vector.h" // for HostDeviceVector

namespace xgboost {
namespace data {
TEST(GradientIndex, ExternalMemory) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
std::unique_ptr<DMatrix> dmat = CreateSparsePageDMatrix(10000);
std::vector<size_t> base_rowids;
std::vector<float> hessian(dmat->Info().num_row_, 1);
Expand Down Expand Up @@ -58,7 +58,7 @@ TEST(GradientIndex, FromCategoricalBasic) {
size_t max_bins = 8;
auto x = GenerateRandomCategoricalSingleColumn(kRows, kCats);
auto m = GetDMatrixFromData(x, kRows, 1);
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;

auto &h_ft = m->Info().feature_types.HostVector();
h_ft.resize(kCols, FeatureType::kCategorical);
Expand Down
8 changes: 4 additions & 4 deletions tests/cpp/data/test_sparse_page_dmatrix.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ void TestSparseDMatrixLoadFile(Context const* ctx) {
}

TEST(SparsePageDMatrix, LoadFile) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
TestSparseDMatrixLoadFile<SparsePage>(&ctx);
TestSparseDMatrixLoadFile<CSCPage>(&ctx);
TestSparseDMatrixLoadFile<SortedCSCPage>(&ctx);
Expand All @@ -77,7 +77,7 @@ TEST(SparsePageDMatrix, LoadFile) {
template <typename Page>
void TestRetainPage() {
auto m = CreateSparsePageDMatrix(10000);
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
auto batches = m->GetBatches<Page>(&ctx);
auto begin = batches.begin();
auto end = batches.end();
Expand Down Expand Up @@ -145,7 +145,7 @@ TEST(SparsePageDMatrix, ColAccess) {
const std::string tmp_file = tempdir.path + "/simple.libsvm";
CreateSimpleTestData(tmp_file);
xgboost::DMatrix *dmat = xgboost::DMatrix::Load(UriSVM(tmp_file, tmp_file));
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;

// Loop over the batches and assert the data is as expected
size_t iter = 0;
Expand Down Expand Up @@ -224,7 +224,7 @@ TEST(SparsePageDMatrix, ColAccessBatches) {
// Create multiple sparse pages
std::unique_ptr<xgboost::DMatrix> dmat{xgboost::CreateSparsePageDMatrix(kEntries)};
ASSERT_EQ(dmat->Ctx()->Threads(), AllThreadsForTest());
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (auto const &page : dmat->GetBatches<xgboost::CSCPage>(&ctx)) {
ASSERT_EQ(dmat->Info().num_col_, page.Size());
}
Expand Down
2 changes: 1 addition & 1 deletion tests/cpp/data/test_sparse_page_dmatrix.cu
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ TEST(SparsePageDMatrix, RetainEllpackPage) {
}

TEST(SparsePageDMatrix, EllpackPageContent) {
auto ctx = CreateEmptyGenericParam(0);
auto ctx = MakeCUDACtx(0);
constexpr size_t kRows = 6;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1;
Expand Down
16 changes: 7 additions & 9 deletions tests/cpp/helpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -382,13 +382,6 @@ std::unique_ptr<GradientBooster> CreateTrainedGBM(std::string name, Args kwargs,
LearnerModelParam const* learner_model_param,
Context const* generic_param);

inline Context CreateEmptyGenericParam(int gpu_id) {
xgboost::Context tparam;
std::vector<std::pair<std::string, std::string>> args{{"gpu_id", std::to_string(gpu_id)}};
tparam.Init(args);
return tparam;
}

inline std::unique_ptr<HostDeviceVector<GradientPair>> GenerateGradients(
std::size_t rows, bst_target_t n_targets = 1) {
auto p_gradients = std::make_unique<HostDeviceVector<GradientPair>>(rows * n_targets);
Expand All @@ -407,9 +400,14 @@ inline std::unique_ptr<HostDeviceVector<GradientPair>> GenerateGradients(
}

/**
* \brief Make a context that uses CUDA.
* \brief Make a context that uses CUDA if device >= 0.
*/
inline Context MakeCUDACtx(std::int32_t device) { return Context{}.MakeCUDA(device); }
inline Context MakeCUDACtx(std::int32_t device) {
if (device == Context::kCpuId) {
return Context{};
}
return Context{}.MakeCUDA(device);
}

inline HostDeviceVector<GradientPair> GenerateRandomGradients(const size_t n_rows,
float lower= 0.0f, float upper = 1.0f) {
Expand Down
10 changes: 5 additions & 5 deletions tests/cpp/linear/test_json_io.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,19 @@

namespace xgboost {
inline void TestUpdaterJsonIO(std::string updater_str) {
auto runtime = xgboost::CreateEmptyGenericParam(GPUIDX);
Context ctx{MakeCUDACtx(GPUIDX)};
Json config_0 {Object() };

{
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create(updater_str, &runtime));
auto updater =
std::unique_ptr<xgboost::LinearUpdater>(xgboost::LinearUpdater::Create(updater_str, &ctx));
updater->Configure({{"eta", std::to_string(3.14)}});
updater->SaveConfig(&config_0);
}

{
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create(updater_str, &runtime));
auto updater =
std::unique_ptr<xgboost::LinearUpdater>(xgboost::LinearUpdater::Create(updater_str, &ctx));
updater->LoadConfig(config_0);
Json config_1 { Object() };
updater->SaveConfig(&config_1);
Expand Down
4 changes: 2 additions & 2 deletions tests/cpp/linear/test_linear.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ TEST(Linear, Shotgun) {

auto p_fmat = xgboost::RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();

auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
LearnerModelParam mparam{MakeMP(kCols, .5, 1)};

{
Expand Down Expand Up @@ -49,7 +49,7 @@ TEST(Linear, coordinate) {

auto p_fmat = xgboost::RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();

auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
LearnerModelParam mparam{MakeMP(kCols, .5, 1)};

auto updater = std::unique_ptr<xgboost::LinearUpdater>(
Expand Down
2 changes: 1 addition & 1 deletion tests/cpp/linear/test_linear.cu
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ TEST(Linear, GPUCoordinate) {
size_t constexpr kCols = 10;

auto mat = xgboost::RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(0);

LearnerModelParam mparam{MakeMP(kCols, .5, 1)};
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
Expand Down
12 changes: 6 additions & 6 deletions tests/cpp/metric/test_auc.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ namespace xgboost {
namespace metric {

inline void VerifyBinaryAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> uni_ptr{Metric::Create("auc", &ctx)};
Metric* metric = uni_ptr.get();
ASSERT_STREQ(metric->Name(), "auc");
Expand Down Expand Up @@ -54,7 +54,7 @@ inline void VerifyBinaryAUC(DataSplitMode data_split_mode = DataSplitMode::kRow)
}

inline void VerifyMultiClassAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> uni_ptr{Metric::Create("auc", &ctx)};
auto metric = uni_ptr.get();

Expand Down Expand Up @@ -115,7 +115,7 @@ inline void VerifyMultiClassAUC(DataSplitMode data_split_mode = DataSplitMode::k
}

inline void VerifyRankingAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> metric{Metric::Create("auc", &ctx)};

// single group
Expand Down Expand Up @@ -149,7 +149,7 @@ inline void VerifyRankingAUC(DataSplitMode data_split_mode = DataSplitMode::kRow
}

inline void VerifyPRAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);

xgboost::Metric* metric = xgboost::Metric::Create("aucpr", &ctx);
ASSERT_STREQ(metric->Name(), "aucpr");
Expand Down Expand Up @@ -186,7 +186,7 @@ inline void VerifyPRAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}

inline void VerifyMultiClassPRAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);

std::unique_ptr<Metric> metric{Metric::Create("aucpr", &ctx)};

Expand All @@ -210,7 +210,7 @@ inline void VerifyMultiClassPRAUC(DataSplitMode data_split_mode = DataSplitMode:
}

inline void VerifyRankingPRAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);

std::unique_ptr<Metric> metric{Metric::Create("aucpr", &ctx)};

Expand Down
Loading

0 comments on commit 152e2fb

Please sign in to comment.