Skip to content

Commit

Permalink
Merge branch 'branch-23.10' into branch-23.10-large-datasets
Browse files Browse the repository at this point in the history
  • Loading branch information
betochimas authored Sep 20, 2023
2 parents 8e0cfac + 686c372 commit d15f601
Show file tree
Hide file tree
Showing 65 changed files with 7,235 additions and 898 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ repos:
additional_dependencies:
- flake8==6.0.0
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v16.0.1
rev: v16.0.6
hooks:
- id: clang-format
exclude: |
Expand Down
2 changes: 1 addition & 1 deletion ci/build_cpp.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@ rapids-print-env

rapids-logger "Begin cpp build"

rapids-mamba-retry mambabuild conda/recipes/libcugraph
rapids-conda-retry mambabuild conda/recipes/libcugraph

rapids-upload-conda-to-s3 cpp
12 changes: 6 additions & 6 deletions ci/build_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@ rapids-logger "Begin py build"

# TODO: Remove `--no-test` flags once importing on a CPU
# node works correctly
rapids-mamba-retry mambabuild \
rapids-conda-retry mambabuild \
--no-test \
--channel "${CPP_CHANNEL}" \
conda/recipes/pylibcugraph

rapids-mamba-retry mambabuild \
rapids-conda-retry mambabuild \
--no-test \
--channel "${CPP_CHANNEL}" \
--channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \
Expand All @@ -30,7 +30,7 @@ rapids-mamba-retry mambabuild \
# platform to ensure it is included in each set of artifacts, since test
# scripts only install from one set of artifacts based on the CUDA version used
# for the test run.
rapids-mamba-retry mambabuild \
rapids-conda-retry mambabuild \
--no-test \
--channel "${CPP_CHANNEL}" \
--channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \
Expand All @@ -40,7 +40,7 @@ rapids-mamba-retry mambabuild \
# built on each CUDA platform to ensure they are included in each set of
# artifacts, since test scripts only install from one set of artifacts based on
# the CUDA version used for the test run.
rapids-mamba-retry mambabuild \
rapids-conda-retry mambabuild \
--no-test \
--channel "${CPP_CHANNEL}" \
--channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \
Expand All @@ -50,7 +50,7 @@ RAPIDS_CUDA_MAJOR="${RAPIDS_CUDA_VERSION%%.*}"

if [[ ${RAPIDS_CUDA_MAJOR} == "11" ]]; then
# Only CUDA 11 is supported right now due to PyTorch requirement.
rapids-mamba-retry mambabuild \
rapids-conda-retry mambabuild \
--no-test \
--channel "${CPP_CHANNEL}" \
--channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \
Expand All @@ -60,7 +60,7 @@ if [[ ${RAPIDS_CUDA_MAJOR} == "11" ]]; then
conda/recipes/cugraph-pyg

# Only CUDA 11 is supported right now due to PyTorch requirement.
rapids-mamba-retry mambabuild \
rapids-conda-retry mambabuild \
--no-test \
--channel "${CPP_CHANNEL}" \
--channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \
Expand Down
1 change: 1 addition & 0 deletions conda/recipes/cugraph-dgl/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ requirements:
- dgl >=1.1.0.cu*
- numba >=0.57
- numpy >=1.21
- pylibcugraphops ={{ version }}
- python
- pytorch

Expand Down
3 changes: 3 additions & 0 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,7 @@ endif()

include(cmake/thirdparty/get_nccl.cmake)
include(cmake/thirdparty/get_cuhornet.cmake)
include(cmake/thirdparty/get_ucp.cmake)

if(BUILD_TESTS)
include(cmake/thirdparty/get_gtest.cmake)
Expand Down Expand Up @@ -228,6 +229,7 @@ set(CUGRAPH_SOURCES
src/sampling/uniform_neighbor_sampling_mg.cpp
src/sampling/uniform_neighbor_sampling_sg.cpp
src/sampling/renumber_sampled_edgelist_sg.cu
src/sampling/sampling_post_processing_sg.cu
src/cores/core_number_sg.cu
src/cores/core_number_mg.cu
src/cores/k_core_sg.cu
Expand Down Expand Up @@ -291,6 +293,7 @@ set(CUGRAPH_SOURCES
src/community/triangle_count_mg.cu
src/traversal/k_hop_nbrs_sg.cu
src/traversal/k_hop_nbrs_mg.cu
src/mtmg/vertex_result.cu
)

if(USE_CUGRAPH_OPS)
Expand Down
35 changes: 35 additions & 0 deletions cpp/cmake/thirdparty/get_ucp.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================

function(find_and_configure_ucp)

if(TARGET UCP::UCP)
return()
endif()

rapids_find_generate_module(UCP
HEADER_NAMES ucp.h
LIBRARY_NAMES ucp
INCLUDE_SUFFIXES ucp/api
)

# Currently UCP has no CMake build-system so we require
# it built and installed on the machine already
rapids_find_package(UCP REQUIRED)

endfunction()

find_and_configure_ucp()
4 changes: 2 additions & 2 deletions cpp/include/cugraph/detail/utility_wrappers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ namespace detail {
* @param[in] stream_view stream view
* @param[out] d_value device array to fill
* @param[in] size number of elements in array
* @param[in] min_value minimum value
* @param[in] max_value maximum value
* @param[in] min_value minimum value (inclusive)
* @param[in] max_value maximum value (exclusive)
* @param[in] rng_state The RngState instance holding pseudo-random number generator state.
*
*/
Expand Down
4 changes: 4 additions & 0 deletions cpp/include/cugraph/graph_functions.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -919,6 +919,10 @@ rmm::device_uvector<vertex_t> select_random_vertices(
/**
* @brief renumber sampling output
*
* @deprecated This API will be deprecated and will be replaced by the
* renumber_and_compress_sampled_edgelist and renumber_and_sort_sampled_edgelist functions in
* sampling_functions.hpp.
*
* This function renumbers sampling function (e.g. uniform_neighbor_sample) outputs satisfying the
* following requirements.
*
Expand Down
39 changes: 39 additions & 0 deletions cpp/include/cugraph/mtmg/detail/device_shared_device_span.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cugraph/mtmg/detail/device_shared_wrapper.hpp>
#include <raft/core/device_span.hpp>

namespace cugraph {
namespace mtmg {
namespace detail {

/**
* @brief Wrap an object to be available for each GPU
*
* In the MTMG environment we need the ability to manage a collection of objects
* that are associated with a particular GPU, and fetch the objects from an
* arbitrary GPU thread. This object will wrap any object and allow it to be
* accessed from different threads.
*/
template <typename T>
using device_shared_device_span_t = device_shared_wrapper_t<raft::device_span<T>>;

} // namespace detail
} // namespace mtmg
} // namespace cugraph
58 changes: 58 additions & 0 deletions cpp/include/cugraph/mtmg/detail/device_shared_device_vector.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cugraph/mtmg/detail/device_shared_device_span.hpp>
#include <rmm/device_uvector.hpp>

namespace cugraph {
namespace mtmg {
namespace detail {

/**
* @brief Wrap an object to be available for each GPU
*
* In the MTMG environment we need the ability to manage a collection of objects
* that are associated with a particular GPU, and fetch the objects from an
* arbitrary GPU thread. This object will wrap any object and allow it to be
* accessed from different threads.
*/
template <typename T>
class device_shared_device_vector_t : public device_shared_wrapper_t<rmm::device_uvector<T>> {
using parent_t = detail::device_shared_wrapper_t<rmm::device_uvector<T>>;

public:
/**
* @brief Create a device_shared_device_span (read only view)
*/
auto view()
{
std::lock_guard<std::mutex> lock(parent_t::lock_);

device_shared_device_span_t<T const> result;

std::for_each(parent_t::objects_.begin(), parent_t::objects_.end(), [&result](auto& p) {
result.set(p.first, raft::device_span<T const>{p.second.data(), p.second.size()});
});

return result;
}
};

} // namespace detail
} // namespace mtmg
} // namespace cugraph
123 changes: 123 additions & 0 deletions cpp/include/cugraph/mtmg/detail/device_shared_wrapper.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cugraph/mtmg/handle.hpp>
#include <cugraph/utilities/error.hpp>

#include <map>
#include <mutex>

namespace cugraph {
namespace mtmg {
namespace detail {

/**
* @brief Wrap an object to be available for each GPU
*
* In the MTMG environment we need the ability to manage a collection of objects
* that are associated with a particular GPU, and fetch the objects from an
* arbitrary GPU thread. This object will wrap any object and allow it to be
* accessed from different threads.
*/
template <typename T>
class device_shared_wrapper_t {
public:
using wrapped_t = T;

device_shared_wrapper_t() = default;
device_shared_wrapper_t(device_shared_wrapper_t&& other) : objects_{std::move(other.objects_)} {}
device_shared_wrapper_t& operator=(device_shared_wrapper_t&& other)
{
objects_ = std::move(other.objects_);
return *this;
}

/**
* @brief Move a wrapped object into the wrapper for this thread
*
* @param handle Handle is used to identify the GPU we associated this object with
* @param obj Wrapped object
*/
void set(cugraph::mtmg::handle_t const& handle, wrapped_t&& obj)
{
std::lock_guard<std::mutex> lock(lock_);

auto pos = objects_.find(handle.get_local_rank());
CUGRAPH_EXPECTS(pos == objects_.end(), "Cannot overwrite wrapped object");

objects_.insert(std::make_pair(handle.get_local_rank(), std::move(obj)));
}

/**
* @brief Move a wrapped object into the wrapper for this thread
*
* @param local_rank Identify which GPU to associated this object with
* @param obj Wrapped object
*/
void set(int local_rank, wrapped_t&& obj)
{
std::lock_guard<std::mutex> lock(lock_);

auto pos = objects_.find(local_rank);
CUGRAPH_EXPECTS(pos == objects_.end(), "Cannot overwrite wrapped object");

objects_.insert(std::make_pair(local_rank, std::move(obj)));
}

public:
/**
* @brief Get reference to an object for a particular thread
*
* @param handle Handle is used to identify the GPU we associated this object with
* @return Reference to the wrapped object
*/
wrapped_t& get(cugraph::mtmg::handle_t const& handle)
{
std::lock_guard<std::mutex> lock(lock_);

auto pos = objects_.find(handle.get_local_rank());
CUGRAPH_EXPECTS(pos != objects_.end(), "Uninitialized wrapped object");

return pos->second;
}

/**
* @brief Get the pointer to an object for a particular thread from this wrapper
*
* @param handle Handle is used to identify the GPU we associated this object with
* @return Shared pointer the wrapped object
*/
wrapped_t const& get(cugraph::mtmg::handle_t const& handle) const
{
std::lock_guard<std::mutex> lock(lock_);

auto pos = objects_.find(handle.get_local_rank());

CUGRAPH_EXPECTS(pos != objects_.end(), "Uninitialized wrapped object");

return pos->second;
}

protected:
mutable std::mutex lock_{};
std::map<int, wrapped_t> objects_{};
};

} // namespace detail
} // namespace mtmg
} // namespace cugraph
Loading

0 comments on commit d15f601

Please sign in to comment.