Skip to content

Commit

Permalink
Merge branch 'branch-24.06' into run_nx_example
Browse files Browse the repository at this point in the history
  • Loading branch information
acostadon authored May 8, 2024
2 parents a9948f7 + 9e3f745 commit f0202ba
Show file tree
Hide file tree
Showing 89 changed files with 3,711 additions and 1,521 deletions.
4 changes: 4 additions & 0 deletions ci/release/update-version.sh
Original file line number Diff line number Diff line change
Expand Up @@ -103,5 +103,9 @@ sed_runner "s/branch-.*/branch-${NEXT_SHORT_TAG}/g" python/nx-cugraph/README.md
find .devcontainer/ -type f -name devcontainer.json -print0 | while IFS= read -r -d '' filename; do
sed_runner "s@rapidsai/devcontainers:[0-9.]*@rapidsai/devcontainers:${NEXT_SHORT_TAG}@g" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/ucx:[0-9.]*@rapidsai/devcontainers/features/ucx:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/cuda:[0-9.]*@rapidsai/devcontainers/features/cuda:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/rapids-build-utils:[0-9.]*@rapidsai/devcontainers/features/rapids-build-utils:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
done

sed_runner "s/:[0-9][0-9]\.[0-9][0-9]/:${NEXT_SHORT_TAG}/" ./notebooks/README.md
sed_runner "s/branch-[0-9][0-9].[0-9][0-9]/branch-${NEXT_SHORT_TAG}/" ./docs/cugraph/source/nx_cugraph/nx_cugraph.md
2 changes: 1 addition & 1 deletion ci/test_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ fi

if [[ "${RAPIDS_CUDA_VERSION}" == "11.8.0" ]]; then
if [[ "${RUNNER_ARCH}" != "ARM64" ]]; then
rapids-mamba-retry env create --force -f env.yaml -n test_cugraph_pyg
rapids-mamba-retry env create --yes -f env.yaml -n test_cugraph_pyg

# Temporarily allow unbound variables for conda activation.
set +u
Expand Down
5 changes: 4 additions & 1 deletion conda/recipes/cugraph-pyg/conda_build_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,8 @@ cuda_compiler:
cmake_version:
- ">=3.26.4"

sysroot_version:
c_stdlib:
- sysroot

c_stdlib_version:
- "2.17"
2 changes: 1 addition & 1 deletion conda/recipes/cugraph-pyg/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ build:

requirements:
build:
- sysroot_{{ target_platform }} {{ sysroot_version }}
- {{ stdlib("c") }}
host:
- cython >=3.0.0
- python
Expand Down
5 changes: 4 additions & 1 deletion conda/recipes/cugraph/conda_build_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@ cuda11_compiler:
cmake_version:
- ">=3.26.4"

sysroot_version:
c_stdlib:
- sysroot

c_stdlib_version:
- "2.17"

ucx_py_version:
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/cugraph/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ requirements:
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
- {{ stdlib("c") }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
Expand Down
5 changes: 4 additions & 1 deletion conda/recipes/libcugraph/conda_build_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,10 @@ doxygen_version:
nccl_version:
- ">=2.9.9"

sysroot_version:
c_stdlib:
- sysroot

c_stdlib_version:
- "2.17"

# The CTK libraries below are missing from the conda-forge::cudatoolkit
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/libcugraph/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ requirements:
- cmake {{ cmake_version }}
- ninja
- openmpi # Required for building cpp-mgtests (multi-GPU tests)
- sysroot_{{ target_platform }} {{ sysroot_version }}
- {{ stdlib("c") }}
host:
{% if cuda_major == "11" %}
- cudatoolkit
Expand Down
5 changes: 4 additions & 1 deletion conda/recipes/pylibcugraph/conda_build_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@ cuda11_compiler:
cmake_version:
- ">=3.26.4"

sysroot_version:
c_stdlib:
- sysroot

c_stdlib_version:
- "2.17"

ucx_py_version:
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/pylibcugraph/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ requirements:
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
- {{ stdlib("c") }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
Expand Down
4 changes: 3 additions & 1 deletion cpp/include/cugraph/algorithms.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
#include <cugraph/legacy/graph.hpp>
#include <cugraph/legacy/internals.hpp>

#include <rmm/resource_ref.hpp>

#ifndef NO_CUGRAPH_OPS
#include <cugraph-ops/graph/sampling.hpp>
#endif
Expand Down Expand Up @@ -830,7 +832,7 @@ template <typename vertex_t, typename edge_t, typename weight_t>
std::unique_ptr<legacy::GraphCOO<vertex_t, edge_t, weight_t>> minimum_spanning_tree(
raft::handle_t const& handle,
legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
rmm::device_async_resource_ref mr = rmm::mr::get_current_device_resource());

namespace subgraph {
/**
Expand Down
5 changes: 3 additions & 2 deletions cpp/include/cugraph/dendrogram.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
* Copyright (c) 2021-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -16,6 +16,7 @@
#pragma once

#include <rmm/device_uvector.hpp>
#include <rmm/resource_ref.hpp>

#include <memory>
#include <vector>
Expand All @@ -28,7 +29,7 @@ class Dendrogram {
void add_level(vertex_t first_index,
vertex_t num_verts,
rmm::cuda_stream_view stream_view,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
rmm::device_async_resource_ref mr = rmm::mr::get_current_device_resource())
{
level_ptr_.push_back(
std::make_unique<rmm::device_uvector<vertex_t>>(num_verts, stream_view, mr));
Expand Down
3 changes: 2 additions & 1 deletion cpp/include/cugraph/legacy/functions.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <raft/core/handle.hpp>

#include <rmm/device_buffer.hpp>
#include <rmm/resource_ref.hpp>

namespace cugraph {

Expand All @@ -43,7 +44,7 @@ namespace cugraph {
template <typename VT, typename ET, typename WT>
std::unique_ptr<legacy::GraphCSR<VT, ET, WT>> coo_to_csr(
legacy::GraphCOOView<VT, ET, WT> const& graph,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
rmm::device_async_resource_ref mr = rmm::mr::get_current_device_resource());

/**
* @brief Broadcast using handle communicator
Expand Down
19 changes: 10 additions & 9 deletions cpp/include/cugraph/legacy/graph.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <raft/core/handle.hpp>

#include <rmm/device_buffer.hpp>
#include <rmm/resource_ref.hpp>

#include <unistd.h>

Expand Down Expand Up @@ -349,9 +350,9 @@ class GraphCOO {
*/
GraphCOO(vertex_t number_of_vertices,
edge_t number_of_edges,
bool has_data = false,
cudaStream_t stream = nullptr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
bool has_data = false,
cudaStream_t stream = nullptr,
rmm::device_async_resource_ref mr = rmm::mr::get_current_device_resource())
: number_of_vertices_p(number_of_vertices),
number_of_edges_p(number_of_edges),
src_indices_p(sizeof(vertex_t) * number_of_edges, stream, mr),
Expand All @@ -361,8 +362,8 @@ class GraphCOO {
}

GraphCOO(GraphCOOView<vertex_t, edge_t, weight_t> const& graph,
cudaStream_t stream = nullptr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
cudaStream_t stream = nullptr,
rmm::device_async_resource_ref mr = rmm::mr::get_current_device_resource())
: number_of_vertices_p(graph.number_of_vertices),
number_of_edges_p(graph.number_of_edges),
src_indices_p(graph.src_indices, graph.number_of_edges * sizeof(vertex_t), stream, mr),
Expand Down Expand Up @@ -457,7 +458,7 @@ class GraphCompressedSparseBase {
edge_t number_of_edges,
bool has_data,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
rmm::device_async_resource_ref mr)
: number_of_vertices_p(number_of_vertices),
number_of_edges_p(number_of_edges),
offsets_p(sizeof(edge_t) * (number_of_vertices + 1), stream, mr),
Expand Down Expand Up @@ -525,9 +526,9 @@ class GraphCSR : public GraphCompressedSparseBase<vertex_t, edge_t, weight_t> {
*/
GraphCSR(vertex_t number_of_vertices_,
edge_t number_of_edges_,
bool has_data_ = false,
cudaStream_t stream = nullptr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
bool has_data_ = false,
cudaStream_t stream = nullptr,
rmm::device_async_resource_ref mr = rmm::mr::get_current_device_resource())
: GraphCompressedSparseBase<vertex_t, edge_t, weight_t>(
number_of_vertices_, number_of_edges_, has_data_, stream, mr)
{
Expand Down
Loading

0 comments on commit f0202ba

Please sign in to comment.