Skip to content

Commit

Permalink
Migrate upstream models to cugraph-pyg (rapidsai#3763)
Browse files Browse the repository at this point in the history
This PR migrates SAGEConv and RGCNConv to cugraph-pyg, in preparation for removing these models from upstream. 
`pylibcugraphops` now becomes a dependency of cugraph-pyg.

Authors:
  - Tingyu Wang (https://github.com/tingyu66)

Approvers:
  - Alex Barghi (https://github.com/alexbarghi-nv)
  - AJ Schmidt (https://github.com/ajschmidt8)

URL: rapidsai#3763
  • Loading branch information
tingyu66 authored Sep 6, 2023
1 parent b27d99f commit 6b57f56
Show file tree
Hide file tree
Showing 19 changed files with 603 additions and 101 deletions.
1 change: 1 addition & 0 deletions conda/environments/all_cuda-118_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ channels:
- rapidsai-nightly
- dask/label/dev
- pytorch
- pyg
- dglteam/label/cu118
- conda-forge
- nvidia
Expand Down
1 change: 1 addition & 0 deletions conda/environments/all_cuda-120_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ channels:
- rapidsai-nightly
- dask/label/dev
- pytorch
- pyg
- dglteam/label/cu118
- conda-forge
- nvidia
Expand Down
1 change: 1 addition & 0 deletions conda/recipes/cugraph-pyg/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ requirements:
- pytorch >=2.0
- cupy >=12.0.0
- cugraph ={{ version }}
- pylibcugraphops ={{ version }}
- pyg >=2.3,<2.4

tests:
Expand Down
24 changes: 21 additions & 3 deletions dependencies.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -199,12 +199,24 @@ files:
output: conda
conda_dir: python/cugraph-dgl/conda
includes:
- checks
- cugraph_dgl_dev
- test_python_common
cugraph_pyg_dev:
matrix:
cuda: ["11.8"]
output: conda
conda_dir: python/cugraph-pyg/conda
includes:
- checks
- cugraph_pyg_dev
- test_python_common
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- pytorch
- pyg
- dglteam/label/cu118
- conda-forge
- nvidia
Expand Down Expand Up @@ -498,6 +510,12 @@ dependencies:
- pytorch>=2.0
- pytorch-cuda==11.8
- dgl>=1.1.0.cu*
- setuptools
- pre-commit
- pytest
cugraph_pyg_dev:
common:
- output_types: [conda]
packages:
- cugraph==23.10.*
- pylibcugraphops==23.10.*
- pytorch==2.0
- pytorch-cuda==11.8
- pyg=2.3.1=*torch_2.0.0*cu118*
7 changes: 6 additions & 1 deletion python/cugraph-dgl/conda/cugraph_dgl_dev_cuda-118.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,21 @@ channels:
- rapidsai-nightly
- dask/label/dev
- pytorch
- pyg
- dglteam/label/cu118
- conda-forge
- nvidia
dependencies:
- cugraph==23.10.*
- dgl>=1.1.0.cu*
- pandas
- pre-commit
- pylibcugraphops==23.10.*
- pytest
- pytest-benchmark
- pytest-cov
- pytest-xdist
- pytorch-cuda==11.8
- pytorch>=2.0
- setuptools
- scipy
name: cugraph_dgl_dev_cuda-118
25 changes: 25 additions & 0 deletions python/cugraph-pyg/conda/cugraph_pyg_dev_cuda-118.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- pytorch
- pyg
- dglteam/label/cu118
- conda-forge
- nvidia
dependencies:
- cugraph==23.10.*
- pandas
- pre-commit
- pyg=2.3.1=*torch_2.0.0*cu118*
- pylibcugraphops==23.10.*
- pytest
- pytest-benchmark
- pytest-cov
- pytest-xdist
- pytorch-cuda==11.8
- pytorch==2.0
- scipy
name: cugraph_pyg_dev_cuda-118
4 changes: 4 additions & 0 deletions python/cugraph-pyg/cugraph_pyg/nn/conv/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,14 @@

from .gat_conv import GATConv
from .gatv2_conv import GATv2Conv
from .rgcn_conv import RGCNConv
from .sage_conv import SAGEConv
from .transformer_conv import TransformerConv

__all__ = [
"GATConv",
"GATv2Conv",
"RGCNConv",
"SAGEConv",
"TransformerConv",
]
26 changes: 13 additions & 13 deletions python/cugraph-pyg/cugraph_pyg/nn/conv/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
from typing import Optional, Tuple, Union

from cugraph.utilities.utils import import_optional
from pylibcugraphops.pytorch import CSC, HeteroCSC

torch = import_optional("torch")
torch_geometric = import_optional("torch_geometric")
ops_torch = import_optional("pylibcugraphops.pytorch")


class BaseConv(torch.nn.Module): # pragma: no cover
Expand Down Expand Up @@ -74,7 +74,7 @@ def get_cugraph(
csc: Tuple[torch.Tensor, torch.Tensor, int],
bipartite: bool = False,
max_num_neighbors: Optional[int] = None,
) -> ops_torch.CSC:
) -> CSC:
r"""Constructs a :obj:`cugraph-ops` graph object from CSC representation.
Supports both bipartite and non-bipartite graphs.
Expand All @@ -87,22 +87,22 @@ def get_cugraph(
bipartite (bool): If set to :obj:`True`, will create the bipartite
structure in cugraph-ops. (default: :obj:`False`)
max_num_neighbors (int, optional): The maximum number of neighbors
of a target node. It is only effective when operating in a
bipartite graph. When not given, will be computed on-the-fly,
leading to slightly worse performance. (default: :obj:`None`)
of a destination node. When enabled, it allows models to use
the message-flow-graph primitives in cugraph-ops.
(default: :obj:`None`)
"""
row, colptr, num_src_nodes = csc

if not row.is_cuda:
raise RuntimeError(
f"'{self.__class__.__name__}' requires GPU-"
f"based processing (got CPU tensor)"
f"'{self.__class__.__name__}' requires GPU-based processing "
f"but got CPU tensor."
)

if max_num_neighbors is None:
max_num_neighbors = -1

return ops_torch.CSC(
return CSC(
offsets=colptr,
indices=row,
num_src_nodes=num_src_nodes,
Expand All @@ -117,7 +117,7 @@ def get_typed_cugraph(
num_edge_types: Optional[int] = None,
bipartite: bool = False,
max_num_neighbors: Optional[int] = None,
) -> ops_torch.HeteroCSC:
) -> HeteroCSC:
r"""Constructs a typed :obj:`cugraph` graph object from a CSC
representation where each edge corresponds to a given edge type.
Supports both bipartite and non-bipartite graphs.
Expand All @@ -135,9 +135,9 @@ def get_typed_cugraph(
bipartite (bool): If set to :obj:`True`, will create the bipartite
structure in cugraph-ops. (default: :obj:`False`)
max_num_neighbors (int, optional): The maximum number of neighbors
of a target node. It is only effective when operating in a
bipartite graph. When not given, will be computed on-the-fly,
leading to slightly worse performance. (default: :obj:`None`)
of a destination node. When enabled, it allows models to use
the message-flow-graph primitives in cugraph-ops.
(default: :obj:`None`)
"""
if num_edge_types is None:
num_edge_types = int(edge_type.max()) + 1
Expand All @@ -148,7 +148,7 @@ def get_typed_cugraph(
row, colptr, num_src_nodes = csc
edge_type = edge_type.int()

return ops_torch.HeteroCSC(
return HeteroCSC(
offsets=colptr,
indices=row,
edge_types=edge_type,
Expand Down
11 changes: 6 additions & 5 deletions python/cugraph-pyg/cugraph_pyg/nn/conv/gat_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Optional, Tuple, Union

from cugraph.utilities.utils import import_optional
from pylibcugraphops.pytorch.operators import mha_gat_n2n

from .base import BaseConv

torch = import_optional("torch")
nn = import_optional("torch.nn")
torch_geometric = import_optional("torch_geometric")
ops_torch = import_optional("pylibcugraphops.pytorch")


class GATConv(BaseConv):
Expand Down Expand Up @@ -174,9 +175,9 @@ def forward(
representation to the desired format.
edge_attr: (torch.Tensor, optional) The edge features.
max_num_neighbors (int, optional): The maximum number of neighbors
of a target node. It is only effective when operating in a
bipartite graph. When not given, will be computed on-the-fly,
leading to slightly worse performance. (default: :obj:`None`)
of a destination node. When enabled, it allows models to use
the message-flow-graph primitives in cugraph-ops.
(default: :obj:`None`)
"""
bipartite = not isinstance(x, torch.Tensor)
graph = self.get_cugraph(
Expand Down Expand Up @@ -210,7 +211,7 @@ def forward(
)
x = self.lin(x)

out = ops_torch.operators.mha_gat_n2n(
out = mha_gat_n2n(
(x_src, x_dst) if bipartite else x,
self.att,
graph,
Expand Down
5 changes: 3 additions & 2 deletions python/cugraph-pyg/cugraph_pyg/nn/conv/gatv2_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Optional, Tuple, Union

from cugraph.utilities.utils import import_optional
from pylibcugraphops.pytorch.operators import mha_gat_v2_n2n

from .base import BaseConv

torch = import_optional("torch")
nn = import_optional("torch.nn")
torch_geometric = import_optional("torch_geometric")
ops_torch = import_optional("pylibcugraphops.pytorch")


class GATv2Conv(BaseConv):
Expand Down Expand Up @@ -207,7 +208,7 @@ def forward(
else:
x = self.lin_src(x)

out = ops_torch.operators.mha_gat_v2_n2n(
out = mha_gat_v2_n2n(
(x_src, x_dst) if bipartite else x,
self.att,
graph,
Expand Down
Loading

0 comments on commit 6b57f56

Please sign in to comment.