Skip to content

Commit

Permalink
[Refine] Refine __cuda_array_interface__ (#68227)
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate authored Sep 16, 2024
1 parent 8c05259 commit f97db7a
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 18 deletions.
2 changes: 1 addition & 1 deletion python/paddle/base/dygraph/tensor_patch_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -1260,7 +1260,7 @@ def __cuda_array_interface__(self):

# raise AttributeError for unsupported tensors, so that
# hasattr(cpu_tensor, "__cuda_array_interface__") is False.
if "gpu" not in str(self.place):
if not self.place.is_gpu_place():
raise AttributeError(
"Can't get __cuda_array_interface__ on non-CUDA tensor. "
"If CUDA data is required use tensor.cuda() to copy tensor to device memory."
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/stat.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ def numel(x: Tensor, name: str | None = None) -> Tensor:
Returns the number of elements for a tensor, which is a 0-D int64 Tensor with shape [].
Args:
x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64, complex64, complex128.
x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, uint8, int8, int32, int64, complex64, complex128.
name (str|None, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Expand Down
34 changes: 18 additions & 16 deletions test/legacy_test/test_eager_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1217,18 +1217,18 @@ def test___cuda_array_interface__(self):
# strides should be None if contiguous
tensor = paddle.randn([3, 3]).to(device=gpu_place)
interface = tensor.__cuda_array_interface__
assert interface["strides"] is None
self.assertIsNone(interface["strides"])

# strides should be tuple of int if not contiguous
tensor = paddle.randn([10, 10]).to(device=gpu_place)
tensor = tensor[::2]
interface = tensor.__cuda_array_interface__
assert interface["strides"] == (80, 4)
self.assertEqual(interface["strides"], (80, 4))

# data_ptr should be 0 if tensor is 0-size
tensor = paddle.randn([0, 10]).to(device=gpu_place)
interface = tensor.__cuda_array_interface__
assert interface["data"][0] == 0
self.assertEqual(interface["data"][0], 0)

# raise AttributeError for tensor that requires grad.
tensor = paddle.randn([3, 3]).to(device=gpu_place)
Expand Down Expand Up @@ -1261,22 +1261,24 @@ def test___cuda_array_interface__(self):
.astype(dtype)
)
interface = tensor.__cuda_array_interface__
assert "typestr" in interface and isinstance(
interface["typestr"], str
)
assert "shape" in interface and isinstance(
interface["shape"], tuple
)
assert "strides" in interface and (
self.assertIn("typestr", interface)
self.assertIsInstance(interface["typestr"], str)

self.assertIn("shape", interface)
self.assertIsInstance(interface["shape"], tuple)

self.assertIn("strides", interface)
self.assertTrue(
isinstance(interface["strides"], tuple)
or interface["strides"] is None
)
assert (
"data" in interface
and isinstance(interface["data"], tuple)
and len(interface["data"]) == 2
)
assert "version" in interface and interface["version"] == 2

self.assertIn("data", interface)
self.assertIsInstance(interface["data"], tuple)
self.assertEqual(len(interface["data"]), 2)

self.assertIn("version", interface)
self.assertEqual(interface["version"], 2)


class TestEagerTensorSetitem(unittest.TestCase):
Expand Down

0 comments on commit f97db7a

Please sign in to comment.