diff --git a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py index 174172b026f21..dcfee03f40cfa 100644 --- a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py @@ -14,16 +14,20 @@ import paddle import paddle.fluid as fluid +from decorator_helper import prog_scope import paddle.nn.functional as F import numpy as np import unittest +fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) unary_api_list = [ paddle.nn.functional.elu, paddle.nn.functional.gelu, paddle.nn.functional.hardsigmoid, paddle.nn.functional.hardswish, + paddle.nn.functional.hardshrink, + paddle.nn.functional.hardtanh, paddle.nn.functional.leaky_relu, paddle.nn.functional.log_sigmoid, paddle.nn.functional.relu, @@ -37,9 +41,11 @@ paddle.nn.functional.thresholded_relu, paddle.stanh, paddle.nn.functional.celu, + paddle.nn.functional.selu, paddle.nn.functional.mish, paddle.nn.functional.silu, paddle.nn.functional.tanh, + paddle.nn.functional.dropout, paddle.cosh, paddle.sinh, paddle.abs, @@ -65,6 +71,24 @@ paddle.log10, paddle.log2, paddle.tan, + paddle.erf, + paddle.erfinv, + paddle.rsqrt, + paddle.sign, + paddle.deg2rad, + paddle.rad2deg, + paddle.neg, + paddle.logit, + paddle.trunc, + paddle.digamma, + paddle.lgamma, + paddle.poisson, + paddle.bernoulli, +] + +inplace_api_list = [ + paddle.nn.functional.relu_, + paddle.nn.functional.tanh_, ] @@ -72,7 +96,6 @@ class TestUnaryAPI(unittest.TestCase): def test_dygraph_unary(self): paddle.disable_static() - fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) for api in unary_api_list: x = paddle.rand([]) x.stop_gradient = False @@ -81,8 +104,15 @@ def test_dygraph_unary(self): self.assertEqual(x.shape, []) self.assertEqual(out.shape, []) - self.assertEqual(x.grad.shape, []) - self.assertEqual(out.grad.shape, []) + if x.grad is not None: + self.assertEqual(x.grad.shape, []) + self.assertEqual(out.grad.shape, []) + + for api in inplace_api_list: + x = paddle.rand([]) + out = api(x) + self.assertEqual(x.shape, []) + self.assertEqual(out.shape, []) paddle.enable_static() @@ -95,28 +125,32 @@ def test_static_unary(self): x = paddle.rand([]) x.stop_gradient = False out = api(x) - fluid.backward.append_backward(out) + paddle.static.append_backward(out) - # ScaleLossGradOp / append_backward always set grad shape to [1] - prog = paddle.static.default_main_program() - block = prog.global_block() - - x_grad = block.var(fluid.framework.grad_var_name(x.name)) - out_grad = block.var(fluid.framework.grad_var_name(out.name)) - - # Test compile shape, grad is always [1] + # Test compile shape self.assertEqual(x.shape, ()) self.assertEqual(out.shape, ()) - exe = fluid.Executor() - result = exe.run( - main_prog, fetch_list=[x, out, x_grad, out_grad] - ) + fetch_list = [x, out] + # TODO(zhouwei): ScaleLossGradOp / append_backward set grad shape to [1] + # will change to [] after kernel is fixed + prog = paddle.static.default_main_program() + block = prog.global_block() + if block.has_var(fluid.framework.grad_var_name(x.name)): + out_grad = block.var( + fluid.framework.grad_var_name(out.name) + ) + fetch_list.append(out_grad) + self.assertEqual(out_grad.shape, ()) # Test runtime shape + exe = fluid.Executor() + result = exe.run(main_prog, fetch_list=fetch_list) self.assertEqual(result[0].shape, ()) self.assertEqual(result[1].shape, ()) - self.assertEqual(result[3].shape, (1,)) + if len(result) == 3: + # TODO(zhouwei): will change to [] after kernel is fixed + self.assertEqual(result[2].shape, (1,)) # 0D will be stacked when 1+ place, due to it cannot be concated # for 1 place: [ x-place1 ] @@ -135,28 +169,30 @@ def test_static_unary(self): ).with_data_parallel(out.name, places=places) result = exe.run( compiled_program, - fetch_list=[x, out, x_grad, out_grad], + fetch_list=fetch_list, return_merged=True, ) # Test runtime parallel shape self.assertEqual(result[0].shape, expect_shape) self.assertEqual(result[1].shape, expect_shape) - self.assertEqual(result[3].shape, (device_num,)) + if len(result) == 3: + self.assertEqual(result[2].shape, (device_num,)) compiled_program = fluid.CompiledProgram( main_prog ).with_data_parallel(out.name, places=places) result = exe.run( compiled_program, - fetch_list=[x, out, x_grad, out_grad], + fetch_list=fetch_list, return_merged=False, ) # [[x-place1, x-place2, ...], [], [], ...] self.assertEqual(np.array(result[0]).shape, (device_num,)) self.assertEqual(np.array(result[1]).shape, (device_num,)) - self.assertEqual(np.array(result[3]).shape, (device_num, 1)) + if len(result) == 3: + self.assertEqual(np.array(result[2]).shape, (device_num, 1)) paddle.disable_static() @@ -181,7 +217,6 @@ def test_static_unary(self): class TestReduceAPI(unittest.TestCase): def test_dygraph(self): paddle.disable_static() - fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) for api in reduce_api_list: if api in [paddle.all, paddle.any]: x = paddle.randint(0, 2, []).astype('bool') @@ -234,9 +269,6 @@ def test_static(self): {'func': paddle.multiply, 'cls_method': '__mul__'}, {'func': paddle.divide, 'cls_method': '__div__'}, {'func': paddle.pow, 'cls_method': '__pow__'}, -] - -binary_api_list_without_grad = [ {'func': paddle.equal, 'cls_method': '__eq__'}, {'func': paddle.not_equal, 'cls_method': '__ne__'}, {'func': paddle.greater_equal, 'cls_method': '__ge__'}, @@ -251,7 +283,7 @@ def test_static(self): paddle.logical_xor, ] -binary_int_api_list_without_grad = [ +binary_int_api_list = [ paddle.bitwise_and, paddle.bitwise_or, paddle.bitwise_xor, @@ -262,8 +294,7 @@ def test_static(self): class TestBinaryAPI(unittest.TestCase): def test_dygraph_binary(self): paddle.disable_static() - fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - for api in binary_api_list + binary_api_list_without_grad: + for api in binary_api_list: # 1) x/y is 0D x = paddle.rand([]) y = paddle.rand([]) @@ -275,10 +306,10 @@ def test_dygraph_binary(self): np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) else: out = api(x, y) - self.assertEqual(out.shape, []) - if api not in binary_api_list_without_grad: - out.backward() + + out.backward() + if x.grad is not None: self.assertEqual(x.grad.shape, []) self.assertEqual(y.grad.shape, []) self.assertEqual(out.grad.shape, []) @@ -294,10 +325,10 @@ def test_dygraph_binary(self): np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) else: out = api(x, y) - self.assertEqual(out.shape, [2, 3, 4]) - if api not in binary_api_list_without_grad: - out.backward() + + out.backward() + if x.grad is not None: self.assertEqual(x.grad.shape, [2, 3, 4]) self.assertEqual(y.grad.shape, []) self.assertEqual(out.grad.shape, [2, 3, 4]) @@ -313,10 +344,10 @@ def test_dygraph_binary(self): np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) else: out = api(x, y) - self.assertEqual(out.shape, [2, 3, 4]) - if api not in binary_api_list_without_grad: - out.backward() + + out.backward() + if x.grad is not None: self.assertEqual(x.grad.shape, []) self.assertEqual(y.grad.shape, [2, 3, 4]) self.assertEqual(out.grad.shape, [2, 3, 4]) @@ -329,7 +360,7 @@ def test_dygraph_binary(self): out = getattr(paddle.Tensor, api['cls_method'])(x, y) self.assertEqual(out.shape, []) - for api in binary_int_api_list_without_grad: + for api in binary_int_api_list: # 1) x/y is 0D x = paddle.randint(-10, 10, []) y = paddle.randint(-10, 10, []) @@ -352,7 +383,7 @@ def test_dygraph_binary(self): def test_static_binary(self): paddle.enable_static() - for api in binary_api_list + binary_api_list_without_grad: + for api in binary_api_list: main_prog = fluid.Program() with fluid.program_guard(main_prog, fluid.Program()): # 1) x/y is 0D @@ -368,16 +399,15 @@ def test_static_binary(self): self.assertEqual(out.shape, out_cls.shape) else: out = api(x, y) - fluid.backward.append_backward(out) + paddle.static.append_backward(out) - # Test compile shape self.assertEqual(out.shape, ()) + exe = fluid.Executor() - out_np = exe.run(main_prog, fetch_list=[out])[0] - # Test runtime shape - self.assertEqual(out_np.shape, ()) + result = exe.run(main_prog, fetch_list=[out]) + self.assertEqual(result[0].shape, ()) - # TODO(zhouwei): will open when create_scalar is [] + # TODO: will open when create_scalar is [] # 2) x is 0D , y is scalar ''' x = paddle.rand([]) @@ -391,7 +421,7 @@ def test_static_binary(self): self.assertEqual(out.shape, ()) ''' - for api in binary_int_api_list_without_grad: + for api in binary_int_api_list: main_prog = fluid.Program() with fluid.program_guard(main_prog, fluid.Program()): # 1) x/y is 0D @@ -415,10 +445,11 @@ def test_static_binary(self): paddle.disable_static() -# Use to test zero-dim of Sundry API, which is simple and do -# not have backward, or is not need to test backward in OpTest. +# Use to test zero-dim of Sundry API, which is unique and can not be classified +# with others. It can be implemented here flexibly. class TestSundryAPI(unittest.TestCase): def setUp(self): + paddle.disable_static() self.x = paddle.rand([]) def test_linear(self): @@ -501,6 +532,130 @@ def test_shape(self): self.assertEqual(out.shape, [0]) np.testing.assert_array_equal(out.numpy(), np.array([])) + def test_pow_factor(self): + x = paddle.rand([]) + x.stop_gradient = False + out = paddle.pow(x, 2.0) + out.backward() + + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + self.assertEqual(x.grad.shape, []) + + def test_cast(self): + x = paddle.full([], 1.0, 'float32') + x.stop_gradient = False + out = paddle.cast(x, 'int32') + out.backward() + + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + self.assertEqual(x.grad.shape, []) + + def test_clip(self): + x = paddle.uniform([], None, -10, 10) + x.stop_gradient = False + out = paddle.clip(x, -5, 5) + out.backward() + + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + self.assertEqual(x.grad.shape, []) + + def test_increment(self): + x = paddle.rand([]) + x.stop_gradient = False + out = paddle.increment(x, 1.0) + out.backward() + + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + self.assertEqual(x.grad.shape, []) + + def test_bitwise_not(self): + x = paddle.randint(-1, 1, []) + out1 = ~x + out2 = paddle.bitwise_not(x) + + self.assertEqual(out1.shape, []) + self.assertEqual(out2.shape, []) + + def test_logical_not(self): + x = paddle.randint(0, 1, []) + out = paddle.logical_not(x) + + self.assertEqual(out.shape, []) + + +class TestSundryAPIStatic(unittest.TestCase): + def setUp(self): + paddle.enable_static() + self.exe = paddle.static.Executor() + + @prog_scope() + def test_pow_factor(self): + x = paddle.rand([]) + x.stop_gradient = False + out = paddle.pow(x, 2.0) + paddle.static.append_backward(out) + + prog = paddle.static.default_main_program() + res = self.exe.run(prog, fetch_list=[out]) + self.assertEqual(res[0].shape, ()) + + @prog_scope() + def test_cast(self): + x = paddle.full([], 1.0, 'float32') + x.stop_gradient = False + out = paddle.cast(x, 'int32') + paddle.static.append_backward(out) + + prog = paddle.static.default_main_program() + res = self.exe.run(prog, fetch_list=[out]) + self.assertEqual(res[0].shape, ()) + + @prog_scope() + def test_clip(self): + x = paddle.uniform([], None, -10, 10) + x.stop_gradient = False + out = paddle.clip(x, -5, 5) + paddle.static.append_backward(out) + + prog = paddle.static.default_main_program() + res = self.exe.run(prog, fetch_list=[out]) + self.assertEqual(res[0].shape, ()) + + @prog_scope() + def test_increment(self): + x = paddle.rand([]) + x.stop_gradient = False + out = paddle.increment(x, 1.0) + paddle.static.append_backward(out) + + prog = paddle.static.default_main_program() + res = self.exe.run(prog, fetch_list=[out]) + self.assertEqual(res[0].shape, ()) + + @prog_scope() + def test_bitwise_not(self): + x = paddle.randint(-1, 1, []) + out = paddle.bitwise_not(x) + paddle.static.append_backward(out) + + prog = paddle.static.default_main_program() + res = self.exe.run(prog, fetch_list=[out]) + self.assertEqual(res[0].shape, ()) + + @prog_scope() + def test_logical_not(self): + x = paddle.randint(0, 1, []) + out = paddle.logical_not(x) + paddle.static.append_backward(out) + + prog = paddle.static.default_main_program() + res = self.exe.run(prog, fetch_list=[out]) + self.assertEqual(res[0].shape, ()) + # Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest. class TestNoBackwardAPI(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py index 5868fe9cb531b..a0925207c957e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py @@ -20,12 +20,15 @@ paddle.set_device('xpu') +fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) unary_api_list = [ paddle.nn.functional.elu, paddle.nn.functional.gelu, paddle.nn.functional.hardsigmoid, paddle.nn.functional.hardswish, + paddle.nn.functional.hardshrink, + paddle.nn.functional.hardtanh, paddle.nn.functional.leaky_relu, paddle.nn.functional.log_sigmoid, paddle.nn.functional.relu, @@ -39,9 +42,11 @@ paddle.nn.functional.thresholded_relu, paddle.stanh, paddle.nn.functional.celu, + paddle.nn.functional.selu, paddle.nn.functional.mish, paddle.nn.functional.silu, paddle.nn.functional.tanh, + paddle.nn.functional.dropout, paddle.cosh, paddle.sinh, paddle.abs, @@ -67,14 +72,31 @@ paddle.log10, paddle.log2, paddle.tan, + paddle.erf, + paddle.erfinv, + paddle.rsqrt, + paddle.sign, + paddle.deg2rad, + paddle.rad2deg, + paddle.neg, + paddle.logit, + paddle.trunc, + paddle.digamma, + paddle.lgamma, + paddle.poisson, + paddle.bernoulli, +] + +inplace_api_list = [ + paddle.nn.functional.relu_, + paddle.nn.functional.tanh_, ] # Use to test zero-dim in unary API. class TestUnaryAPI(unittest.TestCase): - def test(self): + def test_dygraph_unary(self): paddle.disable_static() - fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) for api in unary_api_list: x = paddle.rand([]) x.stop_gradient = False @@ -83,8 +105,15 @@ def test(self): self.assertEqual(x.shape, []) self.assertEqual(out.shape, []) - self.assertEqual(x.grad.shape, []) - self.assertEqual(out.grad.shape, []) + if x.grad is not None: + self.assertEqual(x.grad.shape, []) + self.assertEqual(out.grad.shape, []) + + for api in inplace_api_list: + x = paddle.rand([]) + out = api(x) + self.assertEqual(x.shape, []) + self.assertEqual(out.shape, []) paddle.enable_static() @@ -107,9 +136,8 @@ def test(self): # Use to test zero-dim of reduce API class TestReduceAPI(unittest.TestCase): - def test(self): + def test_dygraph(self): paddle.disable_static() - fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) for api in reduce_api_list: if api in [paddle.all, paddle.any]: x = paddle.randint(0, 2, []).astype('bool') @@ -136,9 +164,6 @@ def test(self): {'func': paddle.multiply, 'cls_method': '__mul__'}, {'func': paddle.divide, 'cls_method': '__div__'}, {'func': paddle.pow, 'cls_method': '__pow__'}, -] - -binary_api_list_without_grad = [ {'func': paddle.equal, 'cls_method': '__eq__'}, {'func': paddle.not_equal, 'cls_method': '__ne__'}, {'func': paddle.greater_equal, 'cls_method': '__ge__'}, @@ -153,7 +178,7 @@ def test(self): paddle.logical_xor, ] -binary_int_api_list_without_grad = [ +binary_int_api_list = [ paddle.bitwise_and, paddle.bitwise_or, paddle.bitwise_xor, @@ -162,10 +187,9 @@ def test(self): # Use to test zero-dim of binary API class TestBinaryAPI(unittest.TestCase): - def test(self): + def test_dygraph_binary(self): paddle.disable_static() - fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - for api in binary_api_list + binary_api_list_without_grad: + for api in binary_api_list: # 1) x/y is 0D x = paddle.rand([]) y = paddle.rand([]) @@ -177,10 +201,10 @@ def test(self): np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) else: out = api(x, y) - self.assertEqual(out.shape, []) - if api not in binary_api_list_without_grad: - out.backward() + + out.backward() + if x.grad is not None: self.assertEqual(x.grad.shape, []) self.assertEqual(y.grad.shape, []) self.assertEqual(out.grad.shape, []) @@ -196,10 +220,10 @@ def test(self): np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) else: out = api(x, y) - self.assertEqual(out.shape, [2, 3, 4]) - if api not in binary_api_list_without_grad: - out.backward() + + out.backward() + if x.grad is not None: self.assertEqual(x.grad.shape, [2, 3, 4]) self.assertEqual(y.grad.shape, []) self.assertEqual(out.grad.shape, [2, 3, 4]) @@ -215,10 +239,10 @@ def test(self): np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) else: out = api(x, y) - self.assertEqual(out.shape, [2, 3, 4]) - if api not in binary_api_list_without_grad: - out.backward() + + out.backward() + if x.grad is not None: self.assertEqual(x.grad.shape, []) self.assertEqual(y.grad.shape, [2, 3, 4]) self.assertEqual(out.grad.shape, [2, 3, 4]) @@ -231,7 +255,7 @@ def test(self): out = getattr(paddle.Tensor, api['cls_method'])(x, y) self.assertEqual(out.shape, []) - for api in binary_int_api_list_without_grad: + for api in binary_int_api_list: # 1) x/y is 0D x = paddle.randint(-10, 10, []) y = paddle.randint(-10, 10, []) @@ -253,8 +277,8 @@ def test(self): paddle.enable_static() -# Use to test zero-dim of Sundry API, which is simple and do -# not have backward, or is not need to test backward in OpTest. +# Use to test zero-dim of Sundry API, which is unique and can not be classified +# with others. It can be implemented here flexibly. class TestSundryAPI(unittest.TestCase): def setUp(self): paddle.disable_static() @@ -336,6 +360,190 @@ def test_shape(self): self.assertEqual(out.shape, [0]) np.testing.assert_array_equal(out.numpy(), np.array([])) + def test_pow_factor(self): + x = paddle.rand([]) + x.stop_gradient = False + out = paddle.pow(x, 2.0) + out.backward() + + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + self.assertEqual(x.grad.shape, []) + + def test_cast(self): + x = paddle.full([], 1.0, 'float32') + x.stop_gradient = False + out = paddle.cast(x, 'int32') + out.backward() + + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + self.assertEqual(x.grad.shape, []) + + def test_clip(self): + x = paddle.uniform([], None, -10, 10) + x.stop_gradient = False + out = paddle.clip(x, -5, 5) + out.backward() + + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + self.assertEqual(x.grad.shape, []) + + def test_increment(self): + x = paddle.rand([]) + x.stop_gradient = False + out = paddle.increment(x, 1.0) + out.backward() + + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + self.assertEqual(x.grad.shape, []) + + def test_bitwise_not(self): + x = paddle.randint(-1, 1, []) + out1 = ~x + out2 = paddle.bitwise_not(x) + + self.assertEqual(out1.shape, []) + self.assertEqual(out2.shape, []) + + def test_logical_not(self): + x = paddle.randint(0, 1, []) + out = paddle.logical_not(x) + + self.assertEqual(out.shape, []) + + +# Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest. +class TestNoBackwardAPI(unittest.TestCase): + def setUp(self): + paddle.disable_static() + self.shape = [ + paddle.full([], 2, 'int32'), + paddle.full([], 3, 'int32'), + paddle.full([], 4, 'int32'), + ] + + def test_slice(self): + starts = [paddle.full([], 1, 'int32'), paddle.full([], 1, 'int32')] + ends = [paddle.full([], 3, 'int32'), paddle.full([], 3, 'int32')] + x = paddle.rand([5, 3, 3]) + out = paddle.slice(x, [1, 2], starts, ends) + self.assertEqual(out.shape, [5, 2, 2]) + + def test_strided_slice(self): + starts = [paddle.full([], 0, 'int32'), paddle.full([], 0, 'int32')] + ends = [paddle.full([], 4, 'int32'), paddle.full([], 4, 'int32')] + strides = [paddle.full([], 2, 'int32'), paddle.full([], 2, 'int32')] + x = paddle.rand([5, 5, 5]) + out = paddle.strided_slice(x, [1, 2], starts, ends, strides) + self.assertEqual(out.shape, [5, 2, 2]) + + def test_linspace(self): + start = paddle.full([], 1.0) + stop = paddle.full([], 5.0) + num = paddle.full([], 5, 'int32') + out = paddle.linspace(start, stop, num) + np.testing.assert_array_equal(out.numpy(), [1.0, 2.0, 3.0, 4.0, 5.0]) + + def test_arange(self): + start = paddle.full([], 1.0) + stop = paddle.full([], 6.0) + step = paddle.full([], 1.0) + out = paddle.arange(start, stop, step) + np.testing.assert_array_equal(out.numpy(), [1.0, 2.0, 3.0, 4.0, 5.0]) + + def test_normal(self): + mean = paddle.full([], 0.0) + std = paddle.full([], 0.0) + out = paddle.normal(mean, std) + self.assertEqual(out.shape, []) + + out = paddle.normal(0.0, 1.0, []) + self.assertEqual(out.shape, []) + + out = paddle.normal(0.0, 1.0, self.shape) + self.assertEqual(out.shape, [2, 3, 4]) + + def test_rand(self): + out = paddle.rand([]) + self.assertEqual(out.shape, []) + + out = paddle.rand(self.shape) + self.assertEqual(out.shape, [2, 3, 4]) + + def test_randn(self): + out = paddle.randn([]) + self.assertEqual(out.shape, []) + + out = paddle.randn(self.shape) + self.assertEqual(out.shape, [2, 3, 4]) + + def test_randint_and_randint_like(self): + out = paddle.randint(-10, 10, []) + self.assertEqual(out.shape, []) + + out = paddle.randint_like(out, -10, 10) + self.assertEqual(out.shape, []) + + out = paddle.randint(-10, 10, self.shape) + self.assertEqual(out.shape, [2, 3, 4]) + + def test_standard_normal(self): + out = paddle.standard_normal([]) + self.assertEqual(out.shape, []) + + out = paddle.standard_normal(self.shape) + self.assertEqual(out.shape, [2, 3, 4]) + + def test_uniform(self): + out = paddle.uniform([]) + self.assertEqual(out.shape, []) + + out = paddle.uniform(self.shape) + self.assertEqual(out.shape, [2, 3, 4]) + + def test_empty_and_empty_like(self): + out = paddle.empty([]) + self.assertEqual(out.shape, []) + + out = paddle.empty_like(out) + self.assertEqual(out.shape, []) + + out = paddle.empty(self.shape) + self.assertEqual(out.shape, [2, 3, 4]) + + def test_full_and_full_like(self): + out = paddle.full([], 0.5) + self.assertEqual(out.shape, []) + + out = paddle.full_like(out, 0.5) + self.assertEqual(out.shape, []) + + out = paddle.full(self.shape, 0.5) + self.assertEqual(out.shape, [2, 3, 4]) + + def test_ones_and_ones_like(self): + out = paddle.ones([]) + self.assertEqual(out.shape, []) + + out = paddle.ones_like(out) + self.assertEqual(out.shape, []) + + out = paddle.ones(self.shape) + self.assertEqual(out.shape, [2, 3, 4]) + + def test_zeros_and_zeros_like(self): + out = paddle.zeros([]) + self.assertEqual(out.shape, []) + + out = paddle.zeros_like(out) + self.assertEqual(out.shape, []) + + out = paddle.zeros(self.shape) + self.assertEqual(out.shape, [2, 3, 4]) + if __name__ == "__main__": unittest.main()