Skip to content

Commit

Permalink
Fix cugraph tests (#6907)
Browse files Browse the repository at this point in the history
need more tolerance for cugraph-ops
also for cugraph conversion, we NEED to put the cugraph tensor on cpu or
tests fail. this is recommended and required by the cugraph engineers
```
>       assert torch.equal(edge_index, cu_edge_index)
E       RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument other in method wrapper_CUDA__equal)
```
  • Loading branch information
puririshi98 authored Mar 13, 2023
1 parent bf06e08 commit 688c754
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 13 deletions.
10 changes: 5 additions & 5 deletions test/nn/conv/cugraph/test_cugraph_gat_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,17 +39,17 @@ def test_gat_conv_equality(bias, bipartite, concat, heads, max_num_neighbors):

csc = CuGraphGATConv.to_csc(edge_index, size)
out2 = conv2(x, csc, max_num_neighbors=max_num_neighbors)
assert torch.allclose(out1, out2, atol=1e-6)
assert torch.allclose(out1, out2, atol=1e-3)

grad_output = torch.rand_like(out1)
out1.backward(grad_output)
out2.backward(grad_output)

assert torch.allclose(conv1.lin_src.weight.grad, conv2.lin.weight.grad,
atol=1e-6)
atol=1e-3)
assert torch.allclose(conv1.att_src.grad.flatten(),
conv2.att.grad[:heads * out_channels], atol=1e-6)
conv2.att.grad[:heads * out_channels], atol=1e-3)
assert torch.allclose(conv1.att_dst.grad.flatten(),
conv2.att.grad[heads * out_channels:], atol=1e-6)
conv2.att.grad[heads * out_channels:], atol=1e-3)
if bias:
assert torch.allclose(conv1.bias.grad, conv2.bias.grad, atol=1e-6)
assert torch.allclose(conv1.bias.grad, conv2.bias.grad, atol=1e-3)
8 changes: 4 additions & 4 deletions test/nn/conv/cugraph/test_cugraph_rgcn_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,19 +43,19 @@ def test_rgcn_conv_equality(aggr, bias, bipartite, max_num_neighbors,

csc, edge_type = CuGraphRGCNConv.to_csc(edge_index, size, edge_type)
out2 = conv2(x, csc, edge_type, max_num_neighbors=max_num_neighbors)
assert torch.allclose(out1, out2, atol=1e-4)
assert torch.allclose(out1, out2, atol=1e-3)

grad_out = torch.rand_like(out1)
out1.backward(grad_out)
out2.backward(grad_out)

end = -1 if root_weight else None
assert torch.allclose(conv1.weight.grad, conv2.weight.grad[:end],
atol=1e-4)
atol=1e-3)

if root_weight:
assert torch.allclose(conv1.root.grad, conv2.weight.grad[-1],
atol=1e-4)
atol=1e-3)

if num_bases is not None:
assert torch.allclose(conv1.comp.grad, conv2.comp.grad, atol=1e-4)
assert torch.allclose(conv1.comp.grad, conv2.comp.grad, atol=1e-3)
8 changes: 4 additions & 4 deletions test/utils/test_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,9 +436,9 @@ def test_to_cugraph(edge_weight, directed, relabel_nodes):
cu_edge_index, cu_edge_weight = sort_edge_index(cu_edge_index,
cu_edge_weight)

assert torch.equal(edge_index, cu_edge_index)
assert torch.equal(edge_index, cu_edge_index.cpu())
if edge_weight is not None:
assert torch.allclose(edge_weight, cu_edge_weight)
assert torch.allclose(edge_weight, cu_edge_weight.cpu())


@withPackage('cudf')
Expand Down Expand Up @@ -476,8 +476,8 @@ def test_from_cugraph(edge_weight, directed, relabel_nodes):
cu_edge_index, cu_edge_weight = sort_edge_index(cu_edge_index,
cu_edge_weight)

assert torch.equal(edge_index, cu_edge_index)
assert torch.equal(edge_index, cu_edge_index.cpu())
if edge_weight is not None:
assert torch.allclose(edge_weight, cu_edge_weight)
assert torch.allclose(edge_weight, cu_edge_weight.cpu())
else:
assert cu_edge_weight is None

0 comments on commit 688c754

Please sign in to comment.