Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update the update_frontier_v_push_if_out_nbr primitive & BFS performance #1988

Merged
merged 18 commits into from
Jan 10, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
26b8b64
update update_frontier_v_push_if_out_nbr to use fewer atomic instruct…
seunghwak Dec 10, 2021
f86b170
add result_type to evaluate_edge_op
seunghwak Dec 10, 2021
4ca39ec
update FIXME comments
seunghwak Dec 14, 2021
a0161e8
Merge branch 'branch-22.02' of github.com:rapidsai/cugraph into enh_b…
seunghwak Dec 14, 2021
a0504ac
bfs performance tuning
seunghwak Dec 15, 2021
0274e35
Merge branch 'branch-22.02' of github.com:rapidsai/cugraph into enh_b…
seunghwak Dec 15, 2021
1c03998
pass rmm memory allocator to cuco
seunghwak Dec 16, 2021
8a063e3
Merge branch 'pr_upstream1994' into enh_bfs_perf
seunghwak Dec 16, 2021
aa012be
avoid unnecessary atomicAdd
seunghwak Dec 17, 2021
90fd980
add prev_visited_flags to reduce the number of atomic operations
seunghwak Dec 20, 2021
45859bb
additional update_frontier_v_push_if_out_nbr performance improvements
seunghwak Dec 21, 2021
915c5c2
update hypersparse case of update_frontier_v_push_if_out_nbr
seunghwak Dec 21, 2021
4f53673
Merge branch 'branch-22.02' of github.com:rapidsai/cugraph into enh_b…
seunghwak Dec 21, 2021
c35bf8e
test with unweighted graphs for BFS
seunghwak Jan 3, 2022
1fb706c
Merge branch 'branch-22.02' of github.com:rapidsai/cugraph into enh_b…
seunghwak Jan 4, 2022
cdf56d1
update copyright year
seunghwak Jan 4, 2022
83ef043
Merge branch 'branch-22.02' of github.com:rapidsai/cugraph into enh_b…
seunghwak Jan 5, 2022
0990958
Merge branch 'branch-22.02' of github.com:rapidsai/cugraph into enh_b…
seunghwak Jan 6, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 63 additions & 7 deletions cpp/include/cugraph/prims/property_op_utils.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -31,6 +31,8 @@

namespace cugraph {

namespace detail {

template <typename InvokeResultEdgeOp, typename Enable = void>
struct is_valid_edge_op {
static constexpr bool value = false;
Expand All @@ -43,6 +45,55 @@ struct is_valid_edge_op<
static constexpr bool valid = true;
};

template <typename key_t,
typename vertex_t,
typename weight_t,
typename row_value_t,
typename col_value_t,
typename EdgeOp,
typename Enable = void>
struct edge_op_result_type;

template <typename key_t,
typename vertex_t,
typename weight_t,
typename row_value_t,
typename col_value_t,
typename EdgeOp>
struct edge_op_result_type<
key_t,
vertex_t,
weight_t,
row_value_t,
col_value_t,
EdgeOp,
std::enable_if_t<is_valid_edge_op<
typename std::invoke_result<EdgeOp, key_t, vertex_t, weight_t, row_value_t, col_value_t>>::
valid>> {
using type =
typename std::invoke_result<EdgeOp, key_t, vertex_t, weight_t, row_value_t, col_value_t>::type;
};

template <typename key_t,
typename vertex_t,
typename weight_t,
typename row_value_t,
typename col_value_t,
typename EdgeOp>
struct edge_op_result_type<
key_t,
vertex_t,
weight_t,
row_value_t,
col_value_t,
EdgeOp,
std::enable_if_t<is_valid_edge_op<
typename std::invoke_result<EdgeOp, key_t, vertex_t, row_value_t, col_value_t>>::valid>> {
using type = typename std::invoke_result<EdgeOp, key_t, vertex_t, row_value_t, col_value_t>::type;
};

} // namespace detail

template <typename GraphViewType,
typename key_t,
typename AdjMatrixRowValueInputWrapper,
Expand All @@ -53,6 +104,9 @@ struct evaluate_edge_op {
using weight_type = typename GraphViewType::weight_type;
using row_value_type = typename AdjMatrixRowValueInputWrapper::value_type;
using col_value_type = typename AdjMatrixColValueInputWrapper::value_type;
using result_type = typename detail::
edge_op_result_type<key_t, vertex_type, weight_type, row_value_type, col_value_type, EdgeOp>::
type;

template <typename K = key_t,
typename V = vertex_type,
Expand All @@ -61,7 +115,7 @@ struct evaluate_edge_op {
typename C = col_value_type,
typename E = EdgeOp>
__device__
std::enable_if_t<is_valid_edge_op<typename std::invoke_result<E, K, V, W, R, C>>::valid,
std::enable_if_t<detail::is_valid_edge_op<typename std::invoke_result<E, K, V, W, R, C>>::valid,
typename std::invoke_result<E, K, V, W, R, C>::type>
compute(K r, V c, W w, R rv, C cv, E e)
{
Expand All @@ -74,9 +128,10 @@ struct evaluate_edge_op {
typename R = row_value_type,
typename C = col_value_type,
typename E = EdgeOp>
__device__ std::enable_if_t<is_valid_edge_op<typename std::invoke_result<E, K, V, R, C>>::valid,
typename std::invoke_result<E, K, V, R, C>::type>
compute(K r, V c, W w, R rv, C cv, E e)
__device__
std::enable_if_t<detail::is_valid_edge_op<typename std::invoke_result<E, K, V, R, C>>::valid,
typename std::invoke_result<E, K, V, R, C>::type>
compute(K r, V c, W w, R rv, C cv, E e)
{
return e(r, c, rv, cv);
}
Expand Down Expand Up @@ -104,7 +159,8 @@ struct cast_edge_op_bool_to_integer {
typename C = col_value_type,
typename E = EdgeOp>
__device__
std::enable_if_t<is_valid_edge_op<typename std::invoke_result<E, K, V, W, R, C>>::valid, T>
std::enable_if_t<detail::is_valid_edge_op<typename std::invoke_result<E, K, V, W, R, C>>::valid,
T>
operator()(K r, V c, W w, R rv, C cv)
{
return e_op(r, c, w, rv, cv) ? T{1} : T{0};
Expand All @@ -116,7 +172,7 @@ struct cast_edge_op_bool_to_integer {
typename C = col_value_type,
typename E = EdgeOp>
__device__
std::enable_if_t<is_valid_edge_op<typename std::invoke_result<E, K, V, R, C>>::valid, T>
std::enable_if_t<detail::is_valid_edge_op<typename std::invoke_result<E, K, V, R, C>>::valid, T>
operator()(K r, V c, R rv, C cv)
{
return e_op(r, c, rv, cv) ? T{1} : T{0};
Expand Down
Loading