From 04d4112a2794d644e6b58e35f83612459c219aad Mon Sep 17 00:00:00 2001 From: zhaoyinglia Date: Thu, 29 Dec 2022 22:11:08 +0800 Subject: [PATCH] fix_reshape_0_dim_tensor --- .../tests/unittests/test_zero_dim_tensor.py | 4 ++-- .../unittests/test_zero_dim_tensor_mlu.py | 19 +++++-------------- .../unittests/test_zero_dim_tensor_npu.py | 19 +++++-------------- 3 files changed, 12 insertions(+), 30 deletions(-) diff --git a/backends/custom_cpu/tests/unittests/test_zero_dim_tensor.py b/backends/custom_cpu/tests/unittests/test_zero_dim_tensor.py index 623916d58..158278487 100644 --- a/backends/custom_cpu/tests/unittests/test_zero_dim_tensor.py +++ b/backends/custom_cpu/tests/unittests/test_zero_dim_tensor.py @@ -210,14 +210,14 @@ def test_reshape_tensor(self): self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) - new_shape = paddle.full([], 1, "int32") + new_shape = paddle.full([1], 1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) - new_shape = paddle.full([], -1, "int32") + new_shape = paddle.full([1], -1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) diff --git a/backends/mlu/tests/unittests/test_zero_dim_tensor_mlu.py b/backends/mlu/tests/unittests/test_zero_dim_tensor_mlu.py index a0acf8840..28ce2bb8d 100644 --- a/backends/mlu/tests/unittests/test_zero_dim_tensor_mlu.py +++ b/backends/mlu/tests/unittests/test_zero_dim_tensor_mlu.py @@ -575,14 +575,14 @@ def test_reshape_tensor(self): self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) - new_shape = paddle.full([], 1, "int32") + new_shape = paddle.full([1], 1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) - new_shape = paddle.full([], -1, "int32") + new_shape = paddle.full([1], -1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) @@ -615,27 +615,18 @@ def test_reshape__tensor(self): out = paddle.reshape_(x, []) self.assertEqual(out.shape, []) - new_shape = paddle.pull([], 1, "int32") + new_shape = paddle.full([1], 1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) - new_shape = paddle.pull([], -1, "int32") + new_shape = paddle.full([1], -1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) - new_shape = [paddle.pull([], -1, "int32"), paddle.pull([], 1, "int32")] + new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")] out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1, 1]) - def test_reverse(self): - x = paddle.rand([]) - x.stop_gradient = False - out = paddle.reverse(x, axis=[]) - out.backward() - self.assertEqual(x.shape, []) - self.assertEqual(out.shape, []) - self.assertEqual(out.grad.shape, []) - def test_scale(self): x = paddle.rand([]) x.stop_gradient = False diff --git a/backends/npu/tests/unittests/test_zero_dim_tensor_npu.py b/backends/npu/tests/unittests/test_zero_dim_tensor_npu.py index 2f0c31ed2..5391a158e 100644 --- a/backends/npu/tests/unittests/test_zero_dim_tensor_npu.py +++ b/backends/npu/tests/unittests/test_zero_dim_tensor_npu.py @@ -575,14 +575,14 @@ def test_reshape_tensor(self): self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) - new_shape = paddle.full([], 1, "int32") + new_shape = paddle.full([1], 1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) - new_shape = paddle.full([], -1, "int32") + new_shape = paddle.full([1], -1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) @@ -615,27 +615,18 @@ def test_reshape__tensor(self): out = paddle.reshape_(x, []) self.assertEqual(out.shape, []) - new_shape = paddle.pull([], 1, "int32") + new_shape = paddle.full([1], 1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) - new_shape = paddle.pull([], -1, "int32") + new_shape = paddle.full([1], -1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) - new_shape = [paddle.pull([], -1, "int32"), paddle.pull([], 1, "int32")] + new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")] out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1, 1]) - def test_reverse(self): - x = paddle.rand([]) - x.stop_gradient = False - out = paddle.reverse(x, axis=[]) - out.backward() - self.assertEqual(x.shape, []) - self.assertEqual(out.shape, []) - self.assertEqual(out.grad.shape, []) - def test_scale(self): x = paddle.rand([]) x.stop_gradient = False