Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Work around issues with cccl main #15552

Merged
merged 49 commits into from
May 28, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
49 commits
Select commit Hold shift + click to select a range
b74a236
Always use `cuda::proclaim_return_type` with device lambdas
miscco Apr 17, 2024
942291c
Work around issue with `thrust::pair` that prevents CTAD
miscco Apr 17, 2024
e172942
Apply suggestions from code review
bdice Apr 17, 2024
83bc096
Merge branch 'branch-24.06' into fix_cccl_compat
ttnghia Apr 19, 2024
32eab67
Merge branch 'fix_cccl_compat' of github.com:miscco/cudf into miscco-…
trxcllnt Apr 22, 2024
dec2915
Merge branch 'branch-24.06' of github.com:rapidsai/cudf into miscco-f…
trxcllnt Apr 22, 2024
6ca0c2a
fix lint
trxcllnt Apr 22, 2024
9b09a9e
Merge branch 'branch-24.06' of github.com:rapidsai/cudf into miscco-f…
trxcllnt May 8, 2024
c39db41
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 8, 2024
4bda146
Merge branch 'fix_cccl_compat' of github.com:miscco/cudf into miscco-…
trxcllnt May 8, 2024
04f1990
use std::min instead of cuda::std::min
trxcllnt May 8, 2024
586f502
use cuda::proclaim_return_type
trxcllnt May 8, 2024
3598c8f
remove test for int16_t key type that's unsupported by cuda::atomic_ref
trxcllnt May 8, 2024
69796d5
regenerate and add patches that apply to CCCL main
trxcllnt May 8, 2024
8c43425
don't modify whitespace in patches
trxcllnt May 8, 2024
991c789
don't run clang-format on files in cpp/build/*
trxcllnt May 8, 2024
60aecd8
Merge branch 'branch-24.06' of github.com:rapidsai/cudf into fix/cccl…
trxcllnt May 9, 2024
9320968
update devcontainer workflow to use NVIDIA/cccl#pull-request/1667
trxcllnt May 9, 2024
69bf346
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 13, 2024
6a758bf
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 14, 2024
bb67523
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 14, 2024
4ff0c59
test rapids-cmake with CCCL 2.5
trxcllnt May 14, 2024
f122905
pass cuco::cuda_stream_ref
trxcllnt May 14, 2024
434600e
revert changes to pr.yaml
trxcllnt May 14, 2024
75a1606
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 15, 2024
9872c7c
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 15, 2024
d85c763
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 15, 2024
7c1abf8
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 16, 2024
b14899f
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 21, 2024
ffdab59
Update cpp/src/join/distinct_hash_join.cu
trxcllnt May 21, 2024
57d9eea
fix lint
trxcllnt May 21, 2024
7b0e75e
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 21, 2024
9b5bc7a
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 21, 2024
a011739
Apply suggestions from reviewers
trxcllnt May 21, 2024
78c1a89
revert more thrust::pair changes
trxcllnt May 21, 2024
68c5997
fix lint
trxcllnt May 22, 2024
a4e123a
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 22, 2024
114db08
Apply suggestions from code review
trxcllnt May 22, 2024
64264a7
fix lint
trxcllnt May 23, 2024
48f22e2
Merge branch 'branch-24.06' of github.com:rapidsai/cudf into fix/cccl…
trxcllnt May 23, 2024
902f06b
include cuda/std/functional
trxcllnt May 23, 2024
3b29345
cuda::std::min -> std::min
trxcllnt May 23, 2024
264dda7
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 23, 2024
3fce393
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 23, 2024
d19d41d
Merge branch 'branch-24.06' into fix_cccl_compat
trxcllnt May 24, 2024
422538e
fix orc tests
trxcllnt May 24, 2024
774520c
Merge branch 'branch-24.06' of github.com:rapidsai/cudf into fix/cccl…
trxcllnt May 24, 2024
ef42695
compute and compare num_blocks instead of num_streams
trxcllnt May 24, 2024
d9a4947
revert changes to use rapids-cmake CCCL 2.5 branch
trxcllnt May 24, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions cpp/src/groupby/sort/group_rank_scan.cu
Original file line number Diff line number Diff line change
Expand Up @@ -133,10 +133,10 @@ std::unique_ptr<column> rank_generator(column_view const& grouped_values,

auto [group_labels_begin, mutable_rank_begin] = [&]() {
if constexpr (forward) {
return thrust::pair{group_labels.begin(), mutable_ranks.begin<size_type>()};
return thrust::make_pair(group_labels.begin(), mutable_ranks.begin<size_type>());
trxcllnt marked this conversation as resolved.
Show resolved Hide resolved
} else {
return thrust::pair{thrust::reverse_iterator(group_labels.end()),
thrust::reverse_iterator(mutable_ranks.end<size_type>())};
return thrust::make_pair(thrust::reverse_iterator(group_labels.end()),
thrust::reverse_iterator(mutable_ranks.end<size_type>()));
}
}();
thrust::inclusive_scan_by_key(rmm::exec_policy(stream),
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/io/comp/nvcomp_adapter.cu
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ void skip_unsupported_inputs(device_span<size_t> input_sizes,
input_sizes.begin(),
status_size_it,
[] __device__(auto const& status) {
return thrust::pair{0, compression_result{0, compression_status::SKIPPED}};
return thrust::make_pair(0, compression_result{0, compression_status::SKIPPED});
},
[max_size = max_valid_input_size.value()] __device__(size_t input_size) {
return input_size > max_size;
Expand Down
9 changes: 5 additions & 4 deletions cpp/src/io/comp/statistics.cu
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#include <rmm/exec_policy.hpp>

#include <cuda/functional>
#include <thrust/transform_reduce.h>

namespace cudf::io {
Expand All @@ -32,9 +33,9 @@ writer_compression_statistics collect_compression_statistics(
rmm::exec_policy(stream),
results.begin(),
results.end(),
[] __device__(auto& res) {
cuda::proclaim_return_type<size_t>([] __device__(const compression_result& res) {
bdice marked this conversation as resolved.
Show resolved Hide resolved
return res.status == compression_status::SUCCESS ? res.bytes_written : 0;
},
}),
0ul,
thrust::plus<size_t>());

Expand All @@ -47,9 +48,9 @@ writer_compression_statistics collect_compression_statistics(
rmm::exec_policy(stream),
zipped_begin,
zipped_end,
[status] __device__(auto tup) {
cuda::proclaim_return_type<size_t>([status] __device__(auto tup) {
return thrust::get<1>(tup).status == status ? thrust::get<0>(tup).size() : 0;
},
}),
0ul,
thrust::plus<size_t>());
};
Expand Down
6 changes: 3 additions & 3 deletions cpp/src/io/parquet/page_string_decode.cu
Original file line number Diff line number Diff line change
Expand Up @@ -1186,13 +1186,13 @@ void ComputePageStringSizes(cudf::detail::hostdevice_span<PageInfo> pages,

// check for needed temp space for DELTA_BYTE_ARRAY
auto const need_sizes = thrust::any_of(
rmm::exec_policy(stream), pages.device_begin(), pages.device_end(), [] __device__(auto& page) {
rmm::exec_policy(stream), pages.device_begin(), pages.device_end(), cuda::proclaim_return_type<bool>([] __device__(auto& page) {
bdice marked this conversation as resolved.
Show resolved Hide resolved
return page.temp_string_size != 0;
});
}));

if (need_sizes) {
// sum up all of the temp_string_sizes
auto const page_sizes = [] __device__(PageInfo const& page) { return page.temp_string_size; };
auto const page_sizes = cuda::proclaim_return_type<int64_t>([] __device__(PageInfo const& page) { return page.temp_string_size; });
auto const total_size = thrust::transform_reduce(rmm::exec_policy(stream),
pages.device_begin(),
pages.device_end(),
Expand Down
4 changes: 2 additions & 2 deletions cpp/src/io/parquet/reader_impl_preprocess.cu
Original file line number Diff line number Diff line change
Expand Up @@ -452,9 +452,9 @@ std::string encoding_to_string(Encoding encoding)
[[nodiscard]] std::string list_unsupported_encodings(device_span<PageInfo const> pages,
rmm::cuda_stream_view stream)
{
auto const to_mask = [] __device__(auto const& page) {
auto const to_mask = cuda::proclaim_return_type<uint32_t>([] __device__(auto const& page) {
return is_supported_encoding(page.encoding) ? 0U : encoding_to_mask(page.encoding);
};
});
uint32_t const unsupported = thrust::transform_reduce(
rmm::exec_policy(stream), pages.begin(), pages.end(), to_mask, 0U, thrust::bit_or<uint32_t>());
return encoding_bitmask_to_str(unsupported);
Expand Down
5 changes: 3 additions & 2 deletions cpp/src/io/utilities/data_casting.cu
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include <rmm/exec_policy.hpp>

#include <cub/cub.cuh>
#include <cuda/functional>
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
Expand Down Expand Up @@ -782,7 +783,7 @@ template <typename SymbolT>
struct to_string_view_pair {
SymbolT const* data;
to_string_view_pair(SymbolT const* _data) : data(_data) {}
__device__ auto operator()(thrust::tuple<size_type, size_type> ip)
__device__ thrust::pair<char const*, std::size_t> operator()(thrust::tuple<size_type, size_type> ip)
{
return thrust::pair<char const*, std::size_t>{data + thrust::get<0>(ip),
static_cast<std::size_t>(thrust::get<1>(ip))};
Expand All @@ -804,7 +805,7 @@ static std::unique_ptr<column> parse_string(string_view_pair_it str_tuples,
rmm::exec_policy(stream),
str_tuples,
str_tuples + col_size,
[] __device__(auto t) { return t.second; },
cuda::proclaim_return_type<std::size_t>([] __device__(auto t) { return t.second; }),
size_type{0},
thrust::maximum<size_type>{});

Expand Down
4 changes: 2 additions & 2 deletions cpp/src/lists/contains.cu
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,9 @@ __device__ auto element_index_pair_iter(size_type const size)
auto const end = thrust::make_counting_iterator(size);

if constexpr (forward) {
return thrust::pair{begin, end};
return thrust::make_pair(begin, end);
} else {
return thrust::pair{thrust::make_reverse_iterator(end), thrust::make_reverse_iterator(begin)};
return thrust::make_pair(thrust::make_reverse_iterator(end), thrust::make_reverse_iterator(begin));
}
}

Expand Down
Loading