From 10b16ad7ba20912840d66fe046f96f7742647ddf Mon Sep 17 00:00:00 2001 From: YibinLiu666 <2632839426@qq.com> Date: Sun, 19 May 2024 08:16:12 +0000 Subject: [PATCH 01/10] add cumprod_grad composite --- paddle/fluid/prim/api/api.yaml | 1 + .../composite_backward_api.h | 31 +++++ paddle/fluid/primitive/rule/vjp/details.h | 25 ++++ paddle/phi/api/yaml/backward.yaml | 1 + .../vjp/eager/test_comp_eager_cumprod_grad.py | 76 +++++++++++ .../prim/vjp/static/test_comp_cumprod_grad.py | 123 ++++++++++++++++++ 6 files changed, 257 insertions(+) create mode 100644 test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py create mode 100644 test/prim/prim/vjp/static/test_comp_cumprod_grad.py diff --git a/paddle/fluid/prim/api/api.yaml b/paddle/fluid/prim/api/api.yaml index 40d4b4a4ae69c..a951ed4431a57 100644 --- a/paddle/fluid/prim/api/api.yaml +++ b/paddle/fluid/prim/api/api.yaml @@ -29,6 +29,7 @@ - maximum - minimum - prod +- cumprod - roll - scatter - scatter_nd_add diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index 148f249cc4c3d..9835b9ea70799 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -1071,6 +1071,37 @@ void gather_nd_grad(const Tensor& x, } } +template +void cumprod_grad(const Tensor& x, + const Tensor& out, + const Tensor& out_grad, + int dim, + bool exclusive, + bool reverse, + Tensor* x_grad) { + if (x_grad) { + // dx = cumsum(out * out_grad, dim, false, exclusive, !reverse) / x + std::vector x_dim = common::vectorize(x.dims()); + // std::cout<<"start cumprod 1\n"; + auto zero_tensor = full(x_dim, 0.0, x.dtype()); + auto zero_mask = cast(equal(x, zero_tensor), x.dtype()); + // std::cout<<"start cumprod 2\n"; + auto common_dx = + cumsum(out * out_grad, dim, false, exclusive, !reverse) / x; + // std::cout<<"start cumprod 3\n"; + auto ones_tensor = full(x_dim, 1.0, x.dtype()); + auto replace_one = (1 - zero_mask) * x + zero_mask * ones_tensor; + // std::cout<<"start cumprod 4\n"; + auto cumprod_recompute = cumprod(replace_one, dim, exclusive, reverse); + auto zeros_dx = cumsum( + cumprod_recompute * out_grad, dim, false, exclusive, !reverse); + // std::cout<<"start cumprod 5\n"; + auto x_grad_res = (1 - zero_mask) * common_dx + zero_mask * zeros_dx; + // std::cout<<"start cumprod 6\n"; + set_output(x_grad_res, x_grad); + } +} + template void prod_grad(const Tensor& x, const Tensor& out, diff --git a/paddle/fluid/primitive/rule/vjp/details.h b/paddle/fluid/primitive/rule/vjp/details.h index bc460be6f832a..0d136c7a022d2 100644 --- a/paddle/fluid/primitive/rule/vjp/details.h +++ b/paddle/fluid/primitive/rule/vjp/details.h @@ -59,6 +59,31 @@ void cumsum_grad(const Tensor& x, } } +template +void cumprod_grad(const Tensor& x, + const Tensor& out, + const Tensor& out_grad, + int dim, + bool exclusive, + bool reverse, + Tensor* x_grad) { + if (x_grad) { + // dx = cumsum(out * out_grad, dim, false, exclusive, !reverse) / x + std::vector x_dim = common::vectorize(x.dims()); + auto zero_tensor = full(x_dim, 0.0, x.dtype()); + auto zero_mask = cast(equal(x, zero_tensor), x.dtype()); + auto common_dx = + cumsum(out * out_grad, dim, false, exclusive, !reverse) / x; + auto ones_tensor = full(x_dim, 1.0, x.dtype()); + auto replace_one = (1 - zero_mask) * x + zero_mask * ones_tensor; + auto cumprod_recompute = cumprod(replace_one, dim, exclusive, reverse); + auto zeros_dx = cumsum( + cumprod_recompute * out_grad, dim, false, exclusive, !reverse); + auto x_grad_res = (1 - zero_mask) * common_dx + zero_mask * zeros_dx; + set_output(x_grad_res, x_grad); + } +} + template void divide_grad(const Tensor& x, const Tensor& y, diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index 1946306857f7f..428f79fd8b930 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -572,6 +572,7 @@ param: [x] kernel : func : cumprod_grad + composite: cumprod_grad(x, out, out_grad, dim, exclusive, reverse, x_grad) - backward_op : cumsum_grad forward : cumsum(Tensor x, Scalar axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) -> Tensor(out) diff --git a/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py new file mode 100644 index 0000000000000..2925f74f51377 --- /dev/null +++ b/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import parameterized as param + +import paddle +from paddle.base import core + + +@param.parameterized_class( + ('primal', 'dtype'), + [ + ( + np.random.rand(2, 3, 4), + np.float32, + ), + ( + np.random.rand(2, 3, 3, 4), + np.float32, + ), + ], +) +class TestCumprodGradComp(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.primal = cls.primal.astype(cls.dtype) + + def test_cumprod_grad_comp(self): + def actual(primal, dim): + paddle.disable_static() + core.set_prim_eager_enabled(True) + x = paddle.to_tensor(primal, dtype='float32', stop_gradient=False) + x.stop_gradient = False + y = paddle.cumprod(x, dim=dim) + x_cotangent = paddle.grad( + y, x, create_graph=True, retain_graph=True + ) + return x_cotangent[0] + + def desired(primal, dim): + paddle.disable_static() + core.set_prim_eager_enabled(False) + x = paddle.to_tensor(primal, dtype='float32', stop_gradient=False) + x.stop_gradient = False + y = paddle.cumprod(x, dim=dim) + x_cotangent = paddle.grad( + y, x, create_graph=False, retain_graph=True + ) + return x_cotangent[0] + + for i in range(len(self.primal.shape)): + np.testing.assert_allclose( + actual=actual(self.primal, i), + desired=desired(self.primal, i), + rtol=1e-6, + atol=0, + ) + core.set_prim_eager_enabled(False) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/prim/prim/vjp/static/test_comp_cumprod_grad.py b/test/prim/prim/vjp/static/test_comp_cumprod_grad.py new file mode 100644 index 0000000000000..be5f67d1ee1d4 --- /dev/null +++ b/test/prim/prim/vjp/static/test_comp_cumprod_grad.py @@ -0,0 +1,123 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from paddle.base import core + +core._set_prim_backward_enabled(True) + +import numpy as np +import parameterized as param + +import paddle + + +def apply_to_static(net, use_cinn): + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = use_cinn + return paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + + +class PrimeNet(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.fc = paddle.nn.Linear(4, 4) + + def forward(self, x): + tmp = self.fc(x) + out = paddle.cumprod(tmp, -1) + return out + + +@param.parameterized_class( + ('primal', 'cotangent', 'dtype'), + [ + (np.random.rand(10, 10), np.random.rand(10, 10), np.float32), + ], +) +class TestCumprodGradComp(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.primal = cls.primal.astype(cls.dtype) + cls.cotangent = cls.cotangent.astype(cls.dtype) + + def train(self, use_prim, use_cinn): + paddle.seed(2022) + self.x = paddle.randn([2, 4]) + self.x.stop_gradient = False + net = PrimeNet() + core._set_prim_backward_enabled(use_prim) + net = apply_to_static(net, use_cinn) + out = net(self.x) + res = paddle.autograd.grad(out, [self.x]) + + return res + + def test_tanh_grad_comp(self): + paddle.enable_static() + + def actual(primal, cotangent, dim): + core._set_prim_backward_enabled(True) + mp, sp = paddle.static.Program(), paddle.static.Program() + with paddle.static.program_guard(mp, sp): + x = paddle.static.data('primal', primal.shape, primal.dtype) + x.stop_gradient = False + v = paddle.static.data( + 'cotangent', cotangent.shape, cotangent.dtype + ) + y = paddle.cumprod(x, dim) + x_cotangent = paddle.static.gradients(y, x, v) + exe = paddle.static.Executor() + exe.run(sp) + return exe.run( + program=mp, + feed={'primal': primal, 'cotangent': cotangent}, + fetch_list=[x_cotangent[0]], + )[0] + + def desired(primal, cotangent, dim): + core._set_prim_backward_enabled(False) + mp, sp = paddle.static.Program(), paddle.static.Program() + with paddle.static.program_guard(mp, sp): + x = paddle.static.data('primal', primal.shape, primal.dtype) + x.stop_gradient = False + v = paddle.static.data( + 'cotangent', cotangent.shape, cotangent.dtype + ) + y = paddle.cumprod(x, dim) + x_cotangent = paddle.static.gradients(y, x, v) + exe = paddle.static.Executor() + exe.run(sp) + return exe.run( + program=mp, + feed={'primal': primal, 'cotangent': cotangent}, + fetch_list=[x_cotangent[0]], + )[0] + + for i in range(len(self.primal.shape)): + np.testing.assert_allclose( + actual=actual(self.primal, self.cotangent, i), + desired=desired(self.primal, self.cotangent, i), + rtol=1e-6, + atol=0, + ) + core._set_prim_backward_enabled(False) + paddle.disable_static() + + +if __name__ == '__main__': + unittest.main() From d37962e2f7c70b2d4bd3232f5330940c93d2ffd9 Mon Sep 17 00:00:00 2001 From: YibinLiu666 <2632839426@qq.com> Date: Sun, 19 May 2024 13:21:43 +0000 Subject: [PATCH 02/10] remove cout --- .../prim/api/composite_backward/composite_backward_api.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index 9835b9ea70799..5fee155d06db0 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -1082,22 +1082,16 @@ void cumprod_grad(const Tensor& x, if (x_grad) { // dx = cumsum(out * out_grad, dim, false, exclusive, !reverse) / x std::vector x_dim = common::vectorize(x.dims()); - // std::cout<<"start cumprod 1\n"; auto zero_tensor = full(x_dim, 0.0, x.dtype()); auto zero_mask = cast(equal(x, zero_tensor), x.dtype()); - // std::cout<<"start cumprod 2\n"; auto common_dx = cumsum(out * out_grad, dim, false, exclusive, !reverse) / x; - // std::cout<<"start cumprod 3\n"; auto ones_tensor = full(x_dim, 1.0, x.dtype()); auto replace_one = (1 - zero_mask) * x + zero_mask * ones_tensor; - // std::cout<<"start cumprod 4\n"; auto cumprod_recompute = cumprod(replace_one, dim, exclusive, reverse); auto zeros_dx = cumsum( cumprod_recompute * out_grad, dim, false, exclusive, !reverse); - // std::cout<<"start cumprod 5\n"; auto x_grad_res = (1 - zero_mask) * common_dx + zero_mask * zeros_dx; - // std::cout<<"start cumprod 6\n"; set_output(x_grad_res, x_grad); } } From db81952ba5ca5021438a5a5fc69b7b285356643c Mon Sep 17 00:00:00 2001 From: YibinLiu666 <2632839426@qq.com> Date: Mon, 20 May 2024 14:31:27 +0000 Subject: [PATCH 03/10] update test and fix some bug --- .../composite_backward_api.h | 20 ++++++--- .../vjp/eager/test_comp_eager_cumprod_grad.py | 41 +++++++++++++++---- .../prim/vjp/static/test_comp_cumprod_grad.py | 41 +++++++++++++++---- 3 files changed, 83 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index 5fee155d06db0..988769c8ba53c 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -1084,14 +1084,24 @@ void cumprod_grad(const Tensor& x, std::vector x_dim = common::vectorize(x.dims()); auto zero_tensor = full(x_dim, 0.0, x.dtype()); auto zero_mask = cast(equal(x, zero_tensor), x.dtype()); - auto common_dx = - cumsum(out * out_grad, dim, false, exclusive, !reverse) / x; + auto zero_mask_cumsum1 = cumsum(zero_mask, dim, false, false, reverse); + auto zero_mask_cumsum2 = cumsum(zero_mask, dim, false, true, reverse); + auto zero_mask_cumsum = + zero_mask_cumsum1 + + zero_mask_cumsum2; // determine the index of first zero auto ones_tensor = full(x_dim, 1.0, x.dtype()); - auto replace_one = (1 - zero_mask) * x + zero_mask * ones_tensor; - auto cumprod_recompute = cumprod(replace_one, dim, exclusive, reverse); + auto first_zero_mask = + cast(equal(zero_mask_cumsum, ones_tensor), x.dtype()); + auto common_dx = cumsum(out * out_grad, dim, false, exclusive, !reverse); + auto replace_one = (1 - zero_mask) * x + zero_mask; + auto replace_first_one = (1 - first_zero_mask) * x + first_zero_mask; + auto cumprod_recompute = + cumprod(replace_first_one, dim, exclusive, reverse); auto zeros_dx = cumsum( cumprod_recompute * out_grad, dim, false, exclusive, !reverse); - auto x_grad_res = (1 - zero_mask) * common_dx + zero_mask * zeros_dx; + auto x_grad_res = + ((1 - first_zero_mask) * common_dx + first_zero_mask * zeros_dx) / + replace_one; set_output(x_grad_res, x_grad); } } diff --git a/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py index 2925f74f51377..bb8ece76c3315 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import random import unittest import numpy as np @@ -24,6 +25,16 @@ @param.parameterized_class( ('primal', 'dtype'), [ + ( + np.random.rand( + 100, + ), + np.float32, + ), + ( + np.random.rand(10, 10), + np.float32, + ), ( np.random.rand(2, 3, 4), np.float32, @@ -32,12 +43,21 @@ np.random.rand(2, 3, 3, 4), np.float32, ), + ( + np.random.rand(2, 3, 3, 4, 5), + np.float32, + ), + ( + np.random.randint(1, 100, (2, 3, 4)), + np.int64, + ), ], ) class TestCumprodGradComp(unittest.TestCase): @classmethod def setUpClass(cls): cls.primal = cls.primal.astype(cls.dtype) + cls.zero_nums = [0, 1, 10, int(np.prod(cls.primal.shape))] def test_cumprod_grad_comp(self): def actual(primal, dim): @@ -62,13 +82,20 @@ def desired(primal, dim): ) return x_cotangent[0] - for i in range(len(self.primal.shape)): - np.testing.assert_allclose( - actual=actual(self.primal, i), - desired=desired(self.primal, i), - rtol=1e-6, - atol=0, - ) + for zero_num in self.zero_nums: + shape = self.primal.shape + x = self.primal.flatten() + indices = random.sample(range(x.size), zero_num) + for i in indices: + x[i] = 0 + x = np.reshape(x, shape) + for i in range(len(self.primal.shape)): + np.testing.assert_allclose( + actual=actual(x, i), + desired=desired(x, i), + rtol=1e-6, + atol=0, + ) core.set_prim_eager_enabled(False) diff --git a/test/prim/prim/vjp/static/test_comp_cumprod_grad.py b/test/prim/prim/vjp/static/test_comp_cumprod_grad.py index be5f67d1ee1d4..5b6012d9b5242 100644 --- a/test/prim/prim/vjp/static/test_comp_cumprod_grad.py +++ b/test/prim/prim/vjp/static/test_comp_cumprod_grad.py @@ -18,6 +18,8 @@ core._set_prim_backward_enabled(True) +import random + import numpy as np import parameterized as param @@ -46,7 +48,24 @@ def forward(self, x): @param.parameterized_class( ('primal', 'cotangent', 'dtype'), [ + ( + np.random.rand( + 100, + ), + np.random.rand( + 100, + ), + np.float32, + ), (np.random.rand(10, 10), np.random.rand(10, 10), np.float32), + (np.random.rand(3, 4, 5), np.random.rand(3, 4, 5), np.float32), + (np.random.rand(2, 3, 4, 5), np.random.rand(2, 3, 4, 5), np.float32), + ( + np.random.rand(2, 3, 2, 4, 5), + np.random.rand(2, 3, 2, 4, 5), + np.float32, + ), + (np.random.randint(1, 20, (10, 10)), np.random.rand(10, 10), np.int64), ], ) class TestCumprodGradComp(unittest.TestCase): @@ -54,6 +73,7 @@ class TestCumprodGradComp(unittest.TestCase): def setUpClass(cls): cls.primal = cls.primal.astype(cls.dtype) cls.cotangent = cls.cotangent.astype(cls.dtype) + cls.zero_nums = [0, 1, 10, int(np.prod(cls.primal.shape))] def train(self, use_prim, use_cinn): paddle.seed(2022) @@ -108,13 +128,20 @@ def desired(primal, cotangent, dim): fetch_list=[x_cotangent[0]], )[0] - for i in range(len(self.primal.shape)): - np.testing.assert_allclose( - actual=actual(self.primal, self.cotangent, i), - desired=desired(self.primal, self.cotangent, i), - rtol=1e-6, - atol=0, - ) + for zero_num in self.zero_nums: + shape = self.primal.shape + x = self.primal.flatten() + indices = random.sample(range(x.size), zero_num) + for i in indices: + x[i] = 0 + x = np.reshape(x, shape) + for i in range(len(self.primal.shape)): + np.testing.assert_allclose( + actual=actual(x, self.cotangent, i), + desired=desired(x, self.cotangent, i), + rtol=1e-6, + atol=0, + ) core._set_prim_backward_enabled(False) paddle.disable_static() From d23d6d39760d39b61a10eab68f61bea12ef642db Mon Sep 17 00:00:00 2001 From: YibinLiu666 <2632839426@qq.com> Date: Tue, 21 May 2024 13:53:16 +0000 Subject: [PATCH 04/10] update test --- test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py | 4 +--- test/prim/prim/vjp/static/CMakeLists.txt | 1 + test/prim/prim/vjp/static/test_comp_cumprod_grad.py | 8 ++------ 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py index bb8ece76c3315..9e39e609a9823 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py @@ -26,9 +26,7 @@ ('primal', 'dtype'), [ ( - np.random.rand( - 100, - ), + np.random.uniform(1, 5, (50,)), np.float32, ), ( diff --git a/test/prim/prim/vjp/static/CMakeLists.txt b/test/prim/prim/vjp/static/CMakeLists.txt index a29e094a17f05..a7985bf97aa85 100644 --- a/test/prim/prim/vjp/static/CMakeLists.txt +++ b/test/prim/prim/vjp/static/CMakeLists.txt @@ -16,3 +16,4 @@ set_tests_properties(test_comp_add_grad PROPERTIES TIMEOUT 60) set_tests_properties(test_comp_sub_grad PROPERTIES TIMEOUT 60) set_tests_properties(test_comp_add_tanh_grad PROPERTIES TIMEOUT 60) set_tests_properties(test_comp_sqrt_grad PROPERTIES TIMEOUT 60) +set_tests_properties(test_comp_cumprod_grad PROPERTIES TIMEOUT 150) diff --git a/test/prim/prim/vjp/static/test_comp_cumprod_grad.py b/test/prim/prim/vjp/static/test_comp_cumprod_grad.py index 5b6012d9b5242..ab13f19ddc87e 100644 --- a/test/prim/prim/vjp/static/test_comp_cumprod_grad.py +++ b/test/prim/prim/vjp/static/test_comp_cumprod_grad.py @@ -49,12 +49,8 @@ def forward(self, x): ('primal', 'cotangent', 'dtype'), [ ( - np.random.rand( - 100, - ), - np.random.rand( - 100, - ), + np.random.uniform(1, 5, (50,)), + np.random.uniform(1, 5, (50,)), np.float32, ), (np.random.rand(10, 10), np.random.rand(10, 10), np.float32), From a0b8dc0d391b1a54478be64e57f89b137c96d36c Mon Sep 17 00:00:00 2001 From: YibinLiu666 <2632839426@qq.com> Date: Tue, 21 May 2024 14:54:30 +0000 Subject: [PATCH 05/10] add comment and test --- .../composite_backward_api.h | 14 +- paddle/fluid/primitive/rule/vjp/details.h | 26 +- paddle/phi/kernels/cpu/cumprod_kernel.cc | 3 +- test/deprecated/legacy_test/test_reduce_op.py | 3236 +++++++++-------- .../vjp/eager/test_comp_eager_cumprod_grad.py | 46 + .../prim/vjp/static/test_comp_cumprod_grad.py | 82 +- 6 files changed, 1779 insertions(+), 1628 deletions(-) diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index 988769c8ba53c..7d836fa0f8dc6 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -1084,17 +1084,23 @@ void cumprod_grad(const Tensor& x, std::vector x_dim = common::vectorize(x.dims()); auto zero_tensor = full(x_dim, 0.0, x.dtype()); auto zero_mask = cast(equal(x, zero_tensor), x.dtype()); - auto zero_mask_cumsum1 = cumsum(zero_mask, dim, false, false, reverse); - auto zero_mask_cumsum2 = cumsum(zero_mask, dim, false, true, reverse); + // determine the index of first zero + auto zero_mask_cumsum_inclusive = + cumsum(zero_mask, dim, false, false, reverse); + auto zero_mask_cumsum_exclusive = + cumsum(zero_mask, dim, false, true, reverse); auto zero_mask_cumsum = - zero_mask_cumsum1 + - zero_mask_cumsum2; // determine the index of first zero + zero_mask_cumsum_inclusive + zero_mask_cumsum_exclusive; auto ones_tensor = full(x_dim, 1.0, x.dtype()); auto first_zero_mask = cast(equal(zero_mask_cumsum, ones_tensor), x.dtype()); + // compute the grad for position with value not equal to 0 auto common_dx = cumsum(out * out_grad, dim, false, exclusive, !reverse); + // fill the positions of 0 with 1. auto replace_one = (1 - zero_mask) * x + zero_mask; + // fill the first positions of 0 with 1. auto replace_first_one = (1 - first_zero_mask) * x + first_zero_mask; + // recompute the grad of the first position with 0 auto cumprod_recompute = cumprod(replace_first_one, dim, exclusive, reverse); auto zeros_dx = cumsum( diff --git a/paddle/fluid/primitive/rule/vjp/details.h b/paddle/fluid/primitive/rule/vjp/details.h index 630c8515db938..13f3f0e878fe8 100644 --- a/paddle/fluid/primitive/rule/vjp/details.h +++ b/paddle/fluid/primitive/rule/vjp/details.h @@ -72,14 +72,30 @@ void cumprod_grad(const Tensor& x, std::vector x_dim = common::vectorize(x.dims()); auto zero_tensor = full(x_dim, 0.0, x.dtype()); auto zero_mask = cast(equal(x, zero_tensor), x.dtype()); - auto common_dx = - cumsum(out * out_grad, dim, false, exclusive, !reverse) / x; + // determine the index of first zero + auto zero_mask_cumsum_inclusive = + cumsum(zero_mask, dim, false, false, reverse); + auto zero_mask_cumsum_exclusive = + cumsum(zero_mask, dim, false, true, reverse); + auto zero_mask_cumsum = + zero_mask_cumsum_inclusive + zero_mask_cumsum_exclusive; auto ones_tensor = full(x_dim, 1.0, x.dtype()); - auto replace_one = (1 - zero_mask) * x + zero_mask * ones_tensor; - auto cumprod_recompute = cumprod(replace_one, dim, exclusive, reverse); + auto first_zero_mask = + cast(equal(zero_mask_cumsum, ones_tensor), x.dtype()); + // compute the grad for position with value not equal to 0 + auto common_dx = cumsum(out * out_grad, dim, false, exclusive, !reverse); + // fill the positions of 0 with 1. + auto replace_one = (1 - zero_mask) * x + zero_mask; + // fill the first positions of 0 with 1. + auto replace_first_one = (1 - first_zero_mask) * x + first_zero_mask; + // recompute the grad of the first position with 0 + auto cumprod_recompute = + cumprod(replace_first_one, dim, exclusive, reverse); auto zeros_dx = cumsum( cumprod_recompute * out_grad, dim, false, exclusive, !reverse); - auto x_grad_res = (1 - zero_mask) * common_dx + zero_mask * zeros_dx; + auto x_grad_res = + ((1 - first_zero_mask) * common_dx + first_zero_mask * zeros_dx) / + replace_one; set_output(x_grad_res, x_grad); } } diff --git a/paddle/phi/kernels/cpu/cumprod_kernel.cc b/paddle/phi/kernels/cpu/cumprod_kernel.cc index f39bddbb443ba..e3661ceefd14f 100644 --- a/paddle/phi/kernels/cpu/cumprod_kernel.cc +++ b/paddle/phi/kernels/cpu/cumprod_kernel.cc @@ -34,7 +34,8 @@ void CumprodKernel(const Context& dev_ctx, auto* x_data = x->data(); auto* out_data = dev_ctx.template Alloc(out); DDim shape = x->dims(); - + std::cout << "x_data addr: " << x_data << "\n"; + std::cout << "out_data addr: " << out_data << "\n"; size_t outer_dim = 1; size_t mid_dim = 1; size_t inner_dim = 1; diff --git a/test/deprecated/legacy_test/test_reduce_op.py b/test/deprecated/legacy_test/test_reduce_op.py index ce74b1423eab4..e899a864246e7 100644 --- a/test/deprecated/legacy_test/test_reduce_op.py +++ b/test/deprecated/legacy_test/test_reduce_op.py @@ -12,531 +12,532 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import unittest import numpy as np -from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci -from utils import static_guard + +sys.path.append("../../legacy_test") +from op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import base from paddle.base import core -from paddle.base.framework import convert_np_dtype_to_dtype_, in_pir_mode -from paddle.pir_utils import test_with_pir_api - - -class TestSumOp(OpTest): - def setUp(self): - self.init_dtype() - self.init_input() - self.init_attrs() - self.calc_output() - - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.op_type = "reduce_sum" - self.prim_op_type = "prim" - self.inputs = {'X': self.x} - self.outputs = {'Out': self.out} - self.if_enable_cinn() - - def init_dtype(self): - self.dtype = np.float64 - - def init_input(self): - self.x = np.random.random((5, 6, 10)).astype(self.dtype) - - def init_attrs(self): - self.attrs = {'dim': [0]} - - def if_enable_cinn(self): - pass - - def calc_output(self): - self.out = self.x.sum(axis=tuple(self.attrs['dim'])) - - def test_check_output(self): - self.check_output(check_pir=True) - - def test_check_grad(self): - self.check_grad( - ['X'], - 'Out', - check_prim=True, - check_pir=True, - check_prim_pir=True, - ) - - -class TestComplexSumOP(TestSumOp): - def init_dtype(self): - self.dtype = np.complex128 - - def init_input(self): - self.x = np.random.random((3, 4)).astype(self.dtype) - - def init_attrs(self): - self.attrs = {'dim': [0]} - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=False) - - -class TestSumOp_ZeroDim(TestSumOp): - def init_attrs(self): - self.attrs = {'dim': []} - - def init_input(self): - self.x = np.random.random([]).astype(self.dtype) - - def calc_output(self): - self.out = self.x.sum(axis=None) - - def test_check_grad(self): - self.check_grad( - ['X'], - 'Out', - check_pir=True, - check_prim=True, - check_prim_pir=True, - ) - - -class TestSumOp5D(TestSumOp): - def init_input(self): - self.x = np.random.random((1, 2, 5, 6, 10)).astype(self.dtype) - - def init_attrs(self): - self.attrs = {'dim': [0]} - - -class TestSumOp6D(TestSumOp): - def init_input(self): - self.x = np.random.random((1, 1, 2, 5, 6, 10)).astype(self.dtype) - - def init_attrs(self): - self.attrs = {'dim': [0]} - - -class TestSumOp8D(TestSumOp): - def init_input(self): - self.x = np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype(self.dtype) - - def init_attrs(self): - self.attrs = {'dim': (0, 3)} - - def test_check_output(self): - self.check_output(check_pir=True) - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_pir=True) - - -class TestSumOp_withInt(TestSumOp): - def init_input(self): - # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format - # Precision limitations on integer values between 0 and 2048 can be exactly represented - self.x = np.random.randint(0, 30, (10, 10)).astype(self.dtype) - - def init_attrs(self): - self.attrs = {'dim': (0, 1)} - - def test_check_output(self): - self.check_output(check_pir=True) - - def calc_gradient(self): - x = self.inputs["X"] - grad = np.ones(x.shape, dtype=x.dtype) - return (grad,) - - def test_check_grad(self): - self.check_grad( - ['X'], - 'Out', - user_defined_grads=self.calc_gradient(), - check_prim=True, - check_prim_pir=True, - check_pir=True, - ) - - -class TestSumOp3Dim(TestSumOp): - def init_input(self): - self.x = np.random.uniform(0, 0.1, (5, 6, 10)).astype(self.dtype) - - def init_attrs(self): - self.attrs = {'dim': (0, 1, 2)} - - def test_check_output(self): - self.check_output(check_pir=True) - - def calc_gradient(self): - x = self.inputs["X"] - grad = np.ones(x.shape, dtype=x.dtype) - return (grad,) - - def test_check_grad(self): - self.check_grad( - ['X'], - 'Out', - user_defined_grads=self.calc_gradient(), - check_prim=True, - check_prim_pir=True, - check_pir=True, - ) - - -def create_test_fp16_class(parent): - @unittest.skipIf( - not core.is_compiled_with_cuda(), "core is not compiled with CUDA" - ) - class TestSumOpFp16(parent): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - self.check_output(check_pir=True) - - def test_check_grad(self): - self.check_grad( - ['X'], - 'Out', - check_prim=True, - check_prim_pir=True, - check_pir=True, - ) - - -create_test_fp16_class(TestSumOp) -create_test_fp16_class(TestSumOp_ZeroDim) -create_test_fp16_class(TestSumOp5D) -create_test_fp16_class(TestSumOp6D) -create_test_fp16_class(TestSumOp8D) -create_test_fp16_class(TestSumOp_withInt) -create_test_fp16_class(TestSumOp3Dim) - - -def create_test_bf16_class(parent): - @unittest.skipIf( - not core.is_compiled_with_cuda() or paddle.is_compiled_with_rocm(), - "core is not compiled with CUDA", - ) - class TestSumOpBf16(parent): - def setUp(self): - self.inputs = {'X': convert_float_to_uint16(self.x)} - self.outputs = {'Out': convert_float_to_uint16(self.out)} - self.enable_cinn = False - - def init_dtype(self): - self.dtype = np.uint16 - - def test_check_output(self): - place = core.CUDAPlace(0) - self.check_output_with_place(place, check_pir=True) - - def test_check_grad(self): - place = core.CUDAPlace(0) - self.check_grad_with_place( - place, - ['X'], - 'Out', - user_defined_grads=self.gradient, - check_prim=True, - check_prim_pir=True, - check_pir=True, - ) - - def calc_gradient(self): - x = self.x - grad = np.ones(x.shape, dtype=x.dtype) - return [grad] - - -create_test_bf16_class(TestSumOp) -create_test_bf16_class(TestSumOp_ZeroDim) -create_test_bf16_class(TestSumOp5D) -create_test_bf16_class(TestSumOp6D) -create_test_bf16_class(TestSumOp8D) -create_test_bf16_class(TestSumOp_withInt) -create_test_bf16_class(TestSumOp3Dim) - - -@skip_check_grad_ci( - reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework." -) -class TestMaxOp(OpTest): - """Remove Max with subgradient from gradient check to confirm the success of CI.""" - - def setUp(self): - self.op_type = "reduce_max" - self.prim_op_type = "prim" - self.python_api = paddle.max - self.public_python_api = paddle.max - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': [-1]} - self.outputs = { - 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) - } - - def test_check_output(self): - self.check_output(check_pir=True) - - def test_check_grad(self): - # only composite op support gradient check of reduce_max - self.check_grad( - ['X'], - 'Out', - check_prim=True, - only_check_prim=True, - check_pir=True, - ) - - -class TestMaxOp_ZeroDim(OpTest): - """Remove Max with subgradient from gradient check to confirm the success of CI.""" - - def setUp(self): - self.op_type = "reduce_max" - self.prim_op_type = "prim" - self.python_api = paddle.max - self.public_python_api = paddle.max - self.if_enable_cinn() - self.init_inputs_and_outputs() - - def if_enable_cinn(self): - self.enable_cinn = False - - def init_inputs_and_outputs(self): - self.inputs = {'X': np.random.random([]).astype("float64")} - self.attrs = {'dim': []} - self.outputs = { - 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) - } - - def test_check_output(self): - self.check_output(check_pir=True) - - def test_check_grad(self): - # only composite op support gradient check of reduce_max - self.check_grad( - ['X'], - 'Out', - check_prim=True, - only_check_prim=True, - check_pir=True, - ) - - -class TestMaxOp_ZeroDim1(TestMaxOp_ZeroDim): - def init_inputs_and_outputs(self): - self.inputs = {'X': np.random.random([5]).astype("float64")} - self.attrs = {'dim': [0]} - self.outputs = {'Out': self.inputs['X'].max(axis=(0,))} - - -class TestMaxOp_ZeroDim2(TestMaxOp_ZeroDim1): - def init_inputs_and_outputs(self): - self.inputs = {'X': np.random.random([5, 20]).astype("float64")} - self.attrs = {'dim': [0, 1]} - self.outputs = {'Out': self.inputs['X'].max(axis=(0, 1))} - -class TestMaxFP32Op(OpTest): - """Remove Max with subgradient from gradient check to confirm the success of CI.""" +paddle.set_flags({"FLAGS_pir_apply_inplace_pass": False}) - def setUp(self): - self.op_type = "reduce_max" - self.prim_op_type = "prim" - self.python_api = paddle.max - self.public_python_api = paddle.max - self.init_dtype() - self.if_enable_cinn() - if self.dtype == np.uint16: - x = np.random.random((5, 6, 10)).astype(np.float32) - self.inputs = {'X': convert_float_to_uint16(x)} - else: - x = np.random.random((5, 6, 10)).astype(self.dtype) - self.inputs = {'X': x} - self.attrs = {'dim': [-1], 'keep_dim': True} - out = x.max(axis=tuple(self.attrs['dim']), keepdims=True) - if self.dtype == np.uint16: - self.outputs = {'Out': convert_float_to_uint16(out)} - else: - self.outputs = {'Out': out} - def if_enable_cinn(self): - pass +# class TestSumOp(OpTest): +# def setUp(self): +# self.init_dtype() +# self.init_input() +# self.init_attrs() +# self.calc_output() - def test_check_output(self): - self.check_output(check_pir=True) +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.op_type = "reduce_sum" +# self.prim_op_type = "prim" +# self.inputs = {'X': self.x} +# self.outputs = {'Out': self.out} +# self.if_enable_cinn() - def test_check_grad(self): - # only composite op support gradient check of reduce_max - self.check_grad( - ['X'], - 'Out', - check_prim=True, - only_check_prim=True, - check_pir=True, - ) +# def init_dtype(self): +# self.dtype = np.float64 - def init_dtype(self): - self.dtype = np.float32 +# def init_input(self): +# self.x = np.random.random((5, 6, 10)).astype(self.dtype) +# def init_attrs(self): +# self.attrs = {'dim': [0]} -class TestMaxFP16Op(TestMaxFP32Op): - def init_dtype(self): - self.dtype = np.float16 +# def if_enable_cinn(self): +# pass +# def calc_output(self): +# self.out = self.x.sum(axis=tuple(self.attrs['dim'])) -@unittest.skipIf( - not core.is_compiled_with_cuda() - or paddle.is_compiled_with_rocm() - or not core.is_bfloat16_supported(core.CUDAPlace(0)), - "core is not compiled with CUDA or not support the bfloat16", -) -class TestMaxBF16Op(TestMaxFP32Op): - def init_dtype(self): - self.dtype = np.uint16 +# def test_check_output(self): +# self.check_output(check_pir=True) - def if_enable_cinn(self): - self.enable_cinn = False +# def test_check_grad(self): +# self.check_grad( +# ['X'], +# 'Out', +# check_prim=True, +# check_pir=True, +# check_prim_pir=True, +# ) - def test_check_output(self): - self.check_output_with_place(core.CUDAPlace(0), check_pir=True) - def test_check_grad(self): - # only composite op support gradient check of reduce_max - self.check_grad_with_place( - core.CUDAPlace(0), - ['X'], - 'Out', - check_prim=True, - only_check_prim=True, - check_pir=True, - ) +# class TestComplexSumOP(TestSumOp): +# def init_dtype(self): +# self.dtype = np.complex128 +# def init_input(self): +# self.x = np.random.random((3, 4)).astype(self.dtype) -@skip_check_grad_ci( - reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework." -) -class TestMinOp(OpTest): - """Remove Min with subgradient from gradient check to confirm the success of CI.""" +# def init_attrs(self): +# self.attrs = {'dim': [0]} - def setUp(self): - self.op_type = "reduce_min" - self.python_api = paddle.min - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': [2]} - self.outputs = { - 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) - } +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=False) - def test_check_output(self): - self.check_output(check_pir=True) +# class TestSumOp_ZeroDim(TestSumOp): +# def init_attrs(self): +# self.attrs = {'dim': []} -class TestMinOp_ZeroDim(OpTest): - """Remove Min with subgradient from gradient check to confirm the success of CI.""" +# def init_input(self): +# self.x = np.random.random([]).astype(self.dtype) - def setUp(self): - self.op_type = "reduce_min" - self.python_api = paddle.min - self.inputs = {'X': np.random.random([]).astype("float64")} - self.attrs = {'dim': []} - self.outputs = { - 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) - } +# def calc_output(self): +# self.out = self.x.sum(axis=None) - def test_check_output(self): - self.check_output(check_pir=True) +# def test_check_grad(self): +# self.check_grad( +# ['X'], +# 'Out', +# check_pir=True, +# check_prim=True, +# check_prim_pir=True, +# ) -class TestMin6DOp(OpTest): - """Remove Min with subgradient from gradient check to confirm the success of CI.""" +# class TestSumOp5D(TestSumOp): +# def init_input(self): +# self.x = np.random.random((1, 2, 5, 6, 10)).astype(self.dtype) - def setUp(self): - self.op_type = "reduce_min" - self.python_api = paddle.min - self.inputs = { - 'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64") - } - self.attrs = {'dim': [2, 4]} - self.outputs = { - 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) - } +# def init_attrs(self): +# self.attrs = {'dim': [0]} - def test_check_output(self): - self.check_output(check_pir=True) +# class TestSumOp6D(TestSumOp): +# def init_input(self): +# self.x = np.random.random((1, 1, 2, 5, 6, 10)).astype(self.dtype) -class TestMin8DOp(OpTest): - """Remove Min with subgradient from gradient check to confirm the success of CI.""" +# def init_attrs(self): +# self.attrs = {'dim': [0]} - def setUp(self): - self.op_type = "reduce_min" - self.python_api = paddle.min - self.inputs = { - 'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64") - } - self.attrs = {'dim': [2, 3, 4]} - self.outputs = { - 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) - } - def test_check_output(self): - self.check_output(check_pir=True) +# class TestSumOp8D(TestSumOp): +# def init_input(self): +# self.x = np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype(self.dtype) +# def init_attrs(self): +# self.attrs = {'dim': (0, 3)} -@skip_check_grad_ci( - reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework." -) -@unittest.skipIf( - paddle.is_compiled_with_rocm(), "ROCm doesn't have FP16 reduce_min kernel" -) -class TestMinFP16Op(OpTest): - """Remove Min with subgradient from gradient check to confirm the success of CI.""" +# def test_check_output(self): +# self.check_output(check_pir=True) - def setUp(self): - self.op_type = "reduce_min" - self.python_api = paddle.min - self.public_python_api = paddle.min - self.init_dtype() - if self.dtype == np.uint16: - x = np.random.random((5, 6, 10)).astype(np.float32) - self.inputs = {'X': convert_float_to_uint16(x)} - else: - x = np.random.random((5, 6, 10)).astype(self.dtype) - self.inputs = {'X': x} - self.attrs = {'dim': [2], 'keep_dim': True} - out = x.min(axis=tuple(self.attrs['dim']), keepdims=True) - if self.dtype == np.uint16: - self.outputs = {'Out': convert_float_to_uint16(out)} - else: - self.outputs = {'Out': out} - - def init_dtype(self): - self.dtype = np.float16 +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_pir=True) - def test_check_output(self): - self.check_output(check_pir=True) +# class TestSumOp_withInt(TestSumOp): +# def init_input(self): +# # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format +# # Precision limitations on integer values between 0 and 2048 can be exactly represented +# self.x = np.random.randint(0, 30, (10, 10)).astype(self.dtype) -@unittest.skipIf( - not core.is_compiled_with_cuda() - or paddle.is_compiled_with_rocm() - or not core.is_bfloat16_supported(core.CUDAPlace(0)), - "core is not compiled with CUDA or not support the bfloat16", -) -class TestMinBF16Op(TestMinFP16Op): - def init_dtype(self): - self.dtype = np.uint16 +# def init_attrs(self): +# self.attrs = {'dim': (0, 1)} - def test_check_output(self): - self.check_output_with_place(core.CUDAPlace(0), check_pir=True) +# def test_check_output(self): +# self.check_output(check_pir=True) + +# def calc_gradient(self): +# x = self.inputs["X"] +# grad = np.ones(x.shape, dtype=x.dtype) +# return (grad,) + +# def test_check_grad(self): +# self.check_grad( +# ['X'], +# 'Out', +# user_defined_grads=self.calc_gradient(), +# check_prim=True, +# check_prim_pir=True, +# check_pir=True, +# ) + + +# class TestSumOp3Dim(TestSumOp): +# def init_input(self): +# self.x = np.random.uniform(0, 0.1, (5, 6, 10)).astype(self.dtype) + +# def init_attrs(self): +# self.attrs = {'dim': (0, 1, 2)} + +# def test_check_output(self): +# self.check_output(check_pir=True) + +# def calc_gradient(self): +# x = self.inputs["X"] +# grad = np.ones(x.shape, dtype=x.dtype) +# return (grad,) + +# def test_check_grad(self): +# self.check_grad( +# ['X'], +# 'Out', +# user_defined_grads=self.calc_gradient(), +# check_prim=True, +# check_prim_pir=True, +# check_pir=True, +# ) + + +# def create_test_fp16_class(parent): +# @unittest.skipIf( +# not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +# ) +# class TestSumOpFp16(parent): +# def init_dtype(self): +# self.dtype = np.float16 + +# def test_check_output(self): +# self.check_output(check_pir=True) + +# def test_check_grad(self): +# self.check_grad( +# ['X'], +# 'Out', +# check_prim=True, +# check_prim_pir=True, +# check_pir=True, +# ) + + +# create_test_fp16_class(TestSumOp) +# create_test_fp16_class(TestSumOp_ZeroDim) +# create_test_fp16_class(TestSumOp5D) +# create_test_fp16_class(TestSumOp6D) +# create_test_fp16_class(TestSumOp8D) +# create_test_fp16_class(TestSumOp_withInt) +# create_test_fp16_class(TestSumOp3Dim) + + +# def create_test_bf16_class(parent): +# @unittest.skipIf( +# not core.is_compiled_with_cuda() or paddle.is_compiled_with_rocm(), +# "core is not compiled with CUDA", +# ) +# class TestSumOpBf16(parent): +# def setUp(self): +# self.inputs = {'X': convert_float_to_uint16(self.x)} +# self.outputs = {'Out': convert_float_to_uint16(self.out)} +# self.enable_cinn = False + +# def init_dtype(self): +# self.dtype = np.uint16 + +# def test_check_output(self): +# place = core.CUDAPlace(0) +# self.check_output_with_place(place, check_pir=True) + +# def test_check_grad(self): +# place = core.CUDAPlace(0) +# self.check_grad_with_place( +# place, +# ['X'], +# 'Out', +# user_defined_grads=self.gradient, +# check_prim=True, +# check_prim_pir=True, +# check_pir=True, +# ) + +# def calc_gradient(self): +# x = self.x +# grad = np.ones(x.shape, dtype=x.dtype) +# return [grad] + + +# create_test_bf16_class(TestSumOp) +# create_test_bf16_class(TestSumOp_ZeroDim) +# create_test_bf16_class(TestSumOp5D) +# create_test_bf16_class(TestSumOp6D) +# create_test_bf16_class(TestSumOp8D) +# create_test_bf16_class(TestSumOp_withInt) +# create_test_bf16_class(TestSumOp3Dim) + + +# @skip_check_grad_ci( +# reason="reduce_max is discontinuous non-derivable function," +# " its gradient check is not supported by unittest framework." +# ) +# class TestMaxOp(OpTest): +# """Remove Max with subgradient from gradient check to confirm the success of CI.""" + +# def setUp(self): +# self.op_type = "reduce_max" +# self.prim_op_type = "prim" +# self.python_api = paddle.max +# self.public_python_api = paddle.max +# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} +# self.attrs = {'dim': [-1]} +# self.outputs = { +# 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True) + +# def test_check_grad(self): +# # only composite op support gradient check of reduce_max +# self.check_grad( +# ['X'], +# 'Out', +# check_prim=True, +# only_check_prim=True, +# check_pir=True, +# ) + + +# class TestMaxOp_ZeroDim(OpTest): +# """Remove Max with subgradient from gradient check to confirm the success of CI.""" + +# def setUp(self): +# self.op_type = "reduce_max" +# self.prim_op_type = "prim" +# self.python_api = paddle.max +# self.public_python_api = paddle.max +# self.if_enable_cinn() +# self.init_inputs_and_outputs() + +# def if_enable_cinn(self): +# self.enable_cinn = False + +# def init_inputs_and_outputs(self): +# self.inputs = {'X': np.random.random([]).astype("float64")} +# self.attrs = {'dim': []} +# self.outputs = { +# 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True) + +# def test_check_grad(self): +# # only composite op support gradient check of reduce_max +# self.check_grad( +# ['X'], +# 'Out', +# check_prim=True, +# only_check_prim=True, +# check_pir=True, +# ) + + +# class TestMaxOp_ZeroDim1(TestMaxOp_ZeroDim): +# def init_inputs_and_outputs(self): +# self.inputs = {'X': np.random.random([5]).astype("float64")} +# self.attrs = {'dim': [0]} +# self.outputs = {'Out': self.inputs['X'].max(axis=(0,))} + + +# class TestMaxOp_ZeroDim2(TestMaxOp_ZeroDim1): +# def init_inputs_and_outputs(self): +# self.inputs = {'X': np.random.random([5, 20]).astype("float64")} +# self.attrs = {'dim': [0, 1]} +# self.outputs = {'Out': self.inputs['X'].max(axis=(0, 1))} + + +# class TestMaxFP32Op(OpTest): +# """Remove Max with subgradient from gradient check to confirm the success of CI.""" + +# def setUp(self): +# self.op_type = "reduce_max" +# self.prim_op_type = "prim" +# self.python_api = paddle.max +# self.public_python_api = paddle.max +# self.init_dtype() +# self.if_enable_cinn() +# if self.dtype == np.uint16: +# x = np.random.random((5, 6, 10)).astype(np.float32) +# self.inputs = {'X': convert_float_to_uint16(x)} +# else: +# x = np.random.random((5, 6, 10)).astype(self.dtype) +# self.inputs = {'X': x} +# self.attrs = {'dim': [-1], 'keep_dim': True} +# out = x.max(axis=tuple(self.attrs['dim']), keepdims=True) +# if self.dtype == np.uint16: +# self.outputs = {'Out': convert_float_to_uint16(out)} +# else: +# self.outputs = {'Out': out} + +# def if_enable_cinn(self): +# pass + +# def test_check_output(self): +# self.check_output(check_pir=True) + +# def test_check_grad(self): +# # only composite op support gradient check of reduce_max +# self.check_grad( +# ['X'], +# 'Out', +# check_prim=True, +# only_check_prim=True, +# check_pir=True, +# ) + +# def init_dtype(self): +# self.dtype = np.float32 + + +# class TestMaxFP16Op(TestMaxFP32Op): +# def init_dtype(self): +# self.dtype = np.float16 + + +# @unittest.skipIf( +# not core.is_compiled_with_cuda() +# or paddle.is_compiled_with_rocm() +# or not core.is_bfloat16_supported(core.CUDAPlace(0)), +# "core is not compiled with CUDA or not support the bfloat16", +# ) +# class TestMaxBF16Op(TestMaxFP32Op): +# def init_dtype(self): +# self.dtype = np.uint16 + +# def if_enable_cinn(self): +# self.enable_cinn = False + +# def test_check_output(self): +# self.check_output_with_place(core.CUDAPlace(0), check_pir=True) + +# def test_check_grad(self): +# # only composite op support gradient check of reduce_max +# self.check_grad_with_place( +# core.CUDAPlace(0), +# ['X'], +# 'Out', +# check_prim=True, +# only_check_prim=True, +# check_pir=True, +# ) + + +# @skip_check_grad_ci( +# reason="reduce_min is discontinuous non-derivable function," +# " its gradient check is not supported by unittest framework." +# ) +# class TestMinOp(OpTest): +# """Remove Min with subgradient from gradient check to confirm the success of CI.""" + +# def setUp(self): +# self.op_type = "reduce_min" +# self.python_api = paddle.min +# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} +# self.attrs = {'dim': [2]} +# self.outputs = { +# 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestMinOp_ZeroDim(OpTest): +# """Remove Min with subgradient from gradient check to confirm the success of CI.""" + +# def setUp(self): +# self.op_type = "reduce_min" +# self.python_api = paddle.min +# self.inputs = {'X': np.random.random([]).astype("float64")} +# self.attrs = {'dim': []} +# self.outputs = { +# 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestMin6DOp(OpTest): +# """Remove Min with subgradient from gradient check to confirm the success of CI.""" + +# def setUp(self): +# self.op_type = "reduce_min" +# self.python_api = paddle.min +# self.inputs = { +# 'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64") +# } +# self.attrs = {'dim': [2, 4]} +# self.outputs = { +# 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestMin8DOp(OpTest): +# """Remove Min with subgradient from gradient check to confirm the success of CI.""" + +# def setUp(self): +# self.op_type = "reduce_min" +# self.python_api = paddle.min +# self.inputs = { +# 'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64") +# } +# self.attrs = {'dim': [2, 3, 4]} +# self.outputs = { +# 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# @skip_check_grad_ci( +# reason="reduce_min is discontinuous non-derivable function," +# " its gradient check is not supported by unittest framework." +# ) +# @unittest.skipIf( +# paddle.is_compiled_with_rocm(), "ROCm doesn't have FP16 reduce_min kernel" +# ) +# class TestMinFP16Op(OpTest): +# """Remove Min with subgradient from gradient check to confirm the success of CI.""" + +# def setUp(self): +# self.op_type = "reduce_min" +# self.python_api = paddle.min +# self.public_python_api = paddle.min +# self.init_dtype() +# if self.dtype == np.uint16: +# x = np.random.random((5, 6, 10)).astype(np.float32) +# self.inputs = {'X': convert_float_to_uint16(x)} +# else: +# x = np.random.random((5, 6, 10)).astype(self.dtype) +# self.inputs = {'X': x} +# self.attrs = {'dim': [2], 'keep_dim': True} +# out = x.min(axis=tuple(self.attrs['dim']), keepdims=True) +# if self.dtype == np.uint16: +# self.outputs = {'Out': convert_float_to_uint16(out)} +# else: +# self.outputs = {'Out': out} + +# def init_dtype(self): +# self.dtype = np.float16 + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# @unittest.skipIf( +# not core.is_compiled_with_cuda() +# or paddle.is_compiled_with_rocm() +# or not core.is_bfloat16_supported(core.CUDAPlace(0)), +# "core is not compiled with CUDA or not support the bfloat16", +# ) +# class TestMinBF16Op(TestMinFP16Op): +# def init_dtype(self): +# self.dtype = np.uint16 + +# def test_check_output(self): +# self.check_output_with_place(core.CUDAPlace(0), check_pir=True) def raw_reduce_prod(x, dim=[0], keep_dim=False): @@ -554,7 +555,8 @@ def setUp(self): self.if_enable_cinn() def init_inputs_and_outputs(self): - self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)} + x = np.random.random((5, 6, 10)).astype(self.data_type) + self.inputs = {'X': x} self.outputs = {'Out': self.inputs['X'].prod(axis=0)} def init_data_type(self): @@ -832,1140 +834,1140 @@ def test_check_grad(self): ) -def reduce_all_wrapper(x, axis=None, keepdim=False, reduce_all=True, name=None): - return paddle.all(x, axis, keepdim, name) - - -class TestAllOp(OpTest): - def setUp(self): - self.op_type = "reduce_all" - self.python_api = reduce_all_wrapper - self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} - self.outputs = {'Out': self.inputs['X'].all()} - self.attrs = {'reduce_all': True} - - def test_check_output(self): - self.check_output(check_pir=True) - - -class TestAllFloatOp(OpTest): - def setUp(self): - self.op_type = "reduce_all" - self.python_api = reduce_all_wrapper - self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("float")} - self.outputs = {'Out': self.inputs['X'].all()} - self.attrs = {'reduce_all': True} - - def test_check_output(self): - self.check_output(check_pir=True) - - -class TestAllIntOp(OpTest): - def setUp(self): - self.op_type = "reduce_all" - self.python_api = reduce_all_wrapper - self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("int")} - self.outputs = {'Out': self.inputs['X'].all()} - self.attrs = {'reduce_all': True} - - def test_check_output(self): - self.check_output(check_pir=True) - - -class TestAllOp_ZeroDim(OpTest): - def setUp(self): - self.python_api = paddle.all - self.op_type = "reduce_all" - self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")} - self.outputs = {'Out': self.inputs['X'].all()} - self.attrs = {'dim': []} - - def test_check_output(self): - self.check_output(check_pir=True) - - -class TestAll8DOp(OpTest): - def setUp(self): - self.op_type = "reduce_all" - self.python_api = paddle.all - self.inputs = { - 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( - "bool" - ) - } - self.attrs = {'dim': (2, 3, 4)} - self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} - - def test_check_output(self): - self.check_output(check_pir=True) - +# def reduce_all_wrapper(x, axis=None, keepdim=False, reduce_all=True, name=None): +# return paddle.all(x, axis, keepdim, name) + + +# class TestAllOp(OpTest): +# def setUp(self): +# self.op_type = "reduce_all" +# self.python_api = reduce_all_wrapper +# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} +# self.outputs = {'Out': self.inputs['X'].all()} +# self.attrs = {'reduce_all': True} -class TestAllOpWithDim(OpTest): - def setUp(self): - self.op_type = "reduce_all" - self.python_api = paddle.all - self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} - self.attrs = {'dim': (1,)} - self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} - - def test_check_output(self): - self.check_output(check_pir=True) - - -class TestAll8DOpWithDim(OpTest): - def setUp(self): - self.op_type = "reduce_all" - self.python_api = paddle.all - self.inputs = { - 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( - "bool" - ) - } - self.attrs = {'dim': (1, 3, 4)} - self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} - - def test_check_output(self): - self.check_output(check_pir=True) - - -class TestAllOpWithKeepDim(OpTest): - def setUp(self): - self.op_type = "reduce_all" - self.python_api = paddle.all - self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} - self.attrs = {'dim': [1], 'keep_dim': True} - self.outputs = { - 'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1) - } - - def test_check_output(self): - self.check_output(check_pir=True) - - -class TestAll8DOpWithKeepDim(OpTest): - def setUp(self): - self.op_type = "reduce_all" - self.python_api = paddle.all - self.inputs = { - 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( - "bool" - ) - } - self.attrs = {'dim': (5,), 'keep_dim': True} - self.outputs = { - 'Out': np.expand_dims( - self.inputs['X'].all(axis=self.attrs['dim']), axis=5 - ) - } - - def test_check_output(self): - self.check_output(check_pir=True) - - -class TestAllOpError(unittest.TestCase): - @test_with_pir_api - def test_errors(self): - with paddle.static.program_guard( - paddle.static.Program(), paddle.static.Program() - ): - # The input type of reduce_all_op must be Variable. - input1 = 12 - self.assertRaises(TypeError, paddle.all, input1) - - -def reduce_any_wrapper(x, axis=None, keepdim=False, reduce_all=True, name=None): - return paddle.any(x, axis, keepdim, name) - - -class TestAnyOp(OpTest): - def setUp(self): - self.op_type = "reduce_any" - self.prim_op_type = "comp" - self.python_api = reduce_any_wrapper - self.public_python_api = reduce_any_wrapper - self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} - self.outputs = {'Out': self.inputs['X'].any()} - self.attrs = {'reduce_all': True} - - def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) - - -class TestAnyFloatOp(OpTest): - def setUp(self): - self.op_type = "reduce_any" - self.prim_op_type = "comp" - self.python_api = reduce_any_wrapper - self.public_python_api = reduce_any_wrapper - self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("float")} - self.outputs = {'Out': self.inputs['X'].any()} - self.attrs = {'reduce_all': True} - - def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) - - -class TestAnyIntOp(OpTest): - def setUp(self): - self.op_type = "reduce_any" - self.prim_op_type = "comp" - self.python_api = reduce_any_wrapper - self.public_python_api = reduce_any_wrapper - self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("int")} - self.outputs = {'Out': self.inputs['X'].any()} - self.attrs = {'reduce_all': True} - - def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) - - -class TestAnyOp_ZeroDim(OpTest): - def setUp(self): - self.op_type = "reduce_any" - self.prim_op_type = "comp" - self.python_api = paddle.any - self.public_python_api = paddle.any - self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")} - self.outputs = {'Out': self.inputs['X'].any()} - self.attrs = {'dim': []} - - def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) - - -class TestAny8DOp(OpTest): - def setUp(self): - self.op_type = "reduce_any" - self.prim_op_type = "comp" - self.python_api = paddle.any - self.public_python_api = paddle.any - self.inputs = { - 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( - "bool" - ) - } - self.attrs = {'dim': (3, 5, 4)} - self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} - - def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) - - -class TestAnyOpWithDim(OpTest): - def setUp(self): - self.op_type = "reduce_any" - self.prim_op_type = "comp" - self.python_api = paddle.any - self.public_python_api = paddle.any - self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} - self.attrs = {'dim': [1]} - self.outputs = {'Out': self.inputs['X'].any(axis=1)} - - def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) - - -class TestAny8DOpWithDim(OpTest): - def setUp(self): - self.op_type = "reduce_any" - self.prim_op_type = "comp" - self.python_api = paddle.any - self.public_python_api = paddle.any - self.inputs = { - 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( - "bool" - ) - } - self.attrs = {'dim': (3, 6)} - self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} - - def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) - - -class TestAnyOpWithKeepDim(OpTest): - def setUp(self): - self.op_type = "reduce_any" - self.prim_op_type = "comp" - self.python_api = paddle.any - self.public_python_api = paddle.any - self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} - self.attrs = {'dim': (1,), 'keep_dim': True} - self.outputs = { - 'Out': np.expand_dims( - self.inputs['X'].any(axis=self.attrs['dim']), axis=1 - ) - } - - def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) - - -class TestAny8DOpWithKeepDim(OpTest): - def setUp(self): - self.op_type = "reduce_any" - self.prim_op_type = "comp" - self.python_api = paddle.any - self.public_python_api = paddle.any - self.inputs = { - 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( - "bool" - ) - } - self.attrs = {'dim': (1,), 'keep_dim': True} - self.outputs = { - 'Out': np.expand_dims( - self.inputs['X'].any(axis=self.attrs['dim']), axis=1 - ) - } - - def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) - - -class TestAnyOpError(unittest.TestCase): - @test_with_pir_api - def test_errors(self): - with paddle.static.program_guard( - paddle.static.Program(), paddle.static.Program() - ): - # The input type of reduce_any_op must be Variable. - input1 = 12 - self.assertRaises(TypeError, paddle.any, input1) - - -class Test1DReduce(OpTest): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random(120).astype("float64")} - self.outputs = {'Out': self.inputs['X'].sum(axis=0)} - self.if_enable_cinn() - - def if_enable_cinn(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) - - -class TestReduceSum_ZeroDim(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random(()).astype("float64")} - self.outputs = {'Out': self.inputs['X'].sum(axis=0)} - self.if_enable_cinn() - - -class Test2DReduce0(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.attrs = {'dim': [0]} - self.inputs = {'X': np.random.random((20, 10)).astype("float64")} - self.outputs = {'Out': self.inputs['X'].sum(axis=0)} - self.if_enable_cinn() - - -class Test2DReduce1(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.attrs = {'dim': [1]} - self.inputs = {'X': np.random.random((20, 10)).astype("float64")} - self.outputs = { - 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) - } - self.if_enable_cinn() - - -class Test3DReduce0(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.attrs = {'dim': [1]} - self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} - self.outputs = { - 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) - } - self.if_enable_cinn() - - -class Test3DReduce1(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.attrs = {'dim': [2]} - self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} - self.outputs = { - 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) - } - self.if_enable_cinn() - - -class Test3DReduce2(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.attrs = {'dim': [-2]} - self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} - self.outputs = { - 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) - } - self.if_enable_cinn() - - -class Test3DReduce3(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.attrs = {'dim': [1, 2]} - self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} - self.outputs = { - 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) - } - self.if_enable_cinn() - - -def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False): - if paddle.in_dynamic_mode(): - return paddle._C_ops.sum(x, axis, dtype, keepdim) - else: - if in_pir_mode(): - return paddle._pir_ops.sum(x, axis, dtype, keepdim) - - -class Test8DReduce0(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = reduce_sum_wrapper2 - self.attrs = {'dim': (4, 2, 3)} - self.inputs = { - 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64") - } - self.outputs = { - 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) - } - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out') - - -class TestKeepDimReduce(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': [1], 'keep_dim': True} - self.outputs = { - 'Out': self.inputs['X'].sum( - axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] - ) - } - self.if_enable_cinn() - - -class TestKeepDimReduceForEager(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = reduce_sum_wrapper2 - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': [1], 'keep_dim': True} - self.outputs = { - 'Out': self.inputs['X'].sum( - axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] - ) - } - - def test_check_grad(self): - self.check_grad(['X'], 'Out') - - -class TestKeepDim8DReduce(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = reduce_sum_wrapper2 - self.inputs = { - 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64") - } - self.attrs = {'dim': (3, 4, 5), 'keep_dim': True} - self.outputs = { - 'Out': self.inputs['X'].sum( - axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] - ) - } - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out') - - -@skip_check_grad_ci( - reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework." -) -class TestReduceMaxOpMultiAxises(OpTest): - """Remove Max with subgradient from gradient check to confirm the success of CI.""" - - def setUp(self): - self.op_type = "reduce_max" - self.prim_op_type = "prim" - self.python_api = paddle.max - self.public_python_api = paddle.max - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': [-2, -1]} - self.outputs = { - 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) - } - - def test_check_output(self): - self.check_output(check_pir=True) - - def test_check_grad(self): - # only composite op support gradient check of reduce_max - self.check_grad( - ['X'], - 'Out', - check_prim=True, - only_check_prim=True, - check_pir=True, - ) - - -@skip_check_grad_ci( - reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework." -) -class TestReduceMinOpMultiAxises(OpTest): - """Remove Min with subgradient from gradient check to confirm the success of CI.""" - - def setUp(self): - self.op_type = "reduce_min" - self.python_api = paddle.min - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': [1, 2]} - self.outputs = { - 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) - } - - def test_check_output(self): - self.check_output() - - -class TestKeepDimReduceSumMultiAxises(OpTest): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': [-2, -1], 'keep_dim': True} - self.outputs = { - 'Out': self.inputs['X'].sum( - axis=tuple(self.attrs['dim']), keepdims=True - ) - } - self.if_enable_cinn() - - def if_enable_cinn(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) - - -class TestKeepDimReduceSumMultiAxisesForEager(OpTest): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = reduce_sum_wrapper2 - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': [-2, -1], 'keep_dim': True} - self.outputs = { - 'Out': self.inputs['X'].sum( - axis=tuple(self.attrs['dim']), keepdims=True - ) - } - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out') - - -class TestReduceSumWithDimOne(OpTest): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} - self.attrs = {'dim': [1, 2], 'keep_dim': True} - self.outputs = { - 'Out': self.inputs['X'].sum( - axis=tuple(self.attrs['dim']), keepdims=True - ) - } - self.if_enable_cinn() - - def if_enable_cinn(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) - - -class TestReduceSumWithDimOneForEager(OpTest): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = reduce_sum_wrapper2 - self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} - self.attrs = {'dim': [1, 2], 'keep_dim': True} - self.outputs = { - 'Out': self.inputs['X'].sum( - axis=tuple(self.attrs['dim']), keepdims=True - ) - } - self.enable_cinn = True - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out') - - -class TestReduceSumWithNumelOne(OpTest): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random((100, 1)).astype("float64")} - self.attrs = {'dim': [1], 'keep_dim': False} - self.outputs = { - 'Out': self.inputs['X'].sum( - axis=tuple(self.attrs['dim']), keepdims=False - ) - } - self.if_enable_cinn() - - def if_enable_cinn(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=False) - - -def reduce_sum_wrapper( - x, axis=None, keepdim=False, reduce_all=True, out_dtype=None, name=None -): - return paddle.sum(x, axis, out_dtype, keepdim, name) - - -class TestReduceAll(OpTest): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = reduce_sum_wrapper - self.public_python_api = reduce_sum_wrapper - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} - self.attrs = {'reduce_all': True, 'keep_dim': False} - self.outputs = {'Out': self.inputs['X'].sum()} - self.if_enable_cinn() - - def if_enable_cinn(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) - - -class TestReduceAllFp32(OpTest): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = reduce_sum_wrapper - self.public_python_api = reduce_sum_wrapper - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")} - self.attrs = {'reduce_all': True, 'keep_dim': False} - self.outputs = {'Out': self.inputs['X'].sum()} - self.if_enable_cinn() - - def if_enable_cinn(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) - - -class Test1DReduceWithAxes1(OpTest): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random(100).astype("float64")} - self.attrs = {'dim': [0], 'keep_dim': False} - self.outputs = {'Out': self.inputs['X'].sum(axis=0)} - self.if_enable_cinn() - - def if_enable_cinn(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) - - -def reduce_sum_wrapper_fp64( - x, axis=None, keepdim=False, reduce_all=True, out_dtype=None, name=None -): - return paddle.sum(x, axis, 'float64', keepdim, name) - - -class TestReduceWithDtype(OpTest): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = reduce_sum_wrapper_fp64 - self.public_python_api = reduce_sum_wrapper_fp64 - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} - self.outputs = {'Out': self.inputs['X'].sum().astype('float64')} - self.attrs = {'reduce_all': True} - self.attrs.update( - { - 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), - 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), - } - ) - self.if_enable_cinn() - - def if_enable_cinn(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) - - -class TestReduceWithDtype1(TestReduceWithDtype): - def setUp(self): - self.op_type = "reduce_sum" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.prim_op_type = "prim" - self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} - self.outputs = {'Out': self.inputs['X'].sum(axis=1)} - self.attrs = {'dim': [1]} - self.attrs.update( - { - 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), - 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), - } - ) - # cinn op_mapper not support in_dtype/out_dtype attr - self.enable_cinn = False - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) - - -class TestReduceWithDtype2(TestReduceWithDtype): - def setUp(self): - self.op_type = "reduce_sum" - self.prim_op_type = "prim" - self.python_api = paddle.sum - self.public_python_api = paddle.sum - self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} - self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)} - self.attrs = {'dim': [1], 'keep_dim': True} - self.attrs.update( - { - 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), - 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), - } - ) - # cinn op_mapper not support in_dtype/out_dtype attr - self.enable_cinn = False - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) - - -class TestReduceSumOpError(unittest.TestCase): - def test_errors1(self): - with static_guard(): - with paddle.static.program_guard( - paddle.static.Program(), paddle.static.Program() - ): - # The input type of reduce_sum_op must be Variable. - x1 = base.create_lod_tensor( - np.array([[-1]]), [[1]], base.CPUPlace() - ) - self.assertRaises(TypeError, paddle.sum, x1) - # The input dtype of reduce_sum_op must be float32 or float64 or int32 or int64. - - -class API_TestSumOp(unittest.TestCase): - def run_static( - self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None - ): - if np_axis is None: - np_axis = attr_axis - - places = [base.CPUPlace()] - if core.is_compiled_with_cuda(): - places.append(base.CUDAPlace(0)) - for place in places: - with base.program_guard(base.Program(), base.Program()): - data = paddle.static.data("data", shape=shape, dtype=x_dtype) - result_sum = paddle.sum( - x=data, axis=attr_axis, dtype=attr_dtype - ) - - exe = base.Executor(place) - input_data = np.random.rand(*shape).astype(x_dtype) - (res,) = exe.run( - feed={"data": input_data}, fetch_list=[result_sum] - ) - - np.testing.assert_allclose( - res, - np.sum(input_data.astype(attr_dtype), axis=np_axis), - rtol=1e-05, - ) - - @test_with_pir_api - def test_static(self): - shape = [10, 10] - axis = 1 - - self.run_static(shape, "bool", axis, attr_dtype=None) - self.run_static(shape, "bool", axis, attr_dtype="int32") - self.run_static(shape, "bool", axis, attr_dtype="int64") - self.run_static(shape, "bool", axis, attr_dtype="float16") - - self.run_static(shape, "int32", axis, attr_dtype=None) - self.run_static(shape, "int32", axis, attr_dtype="int32") - self.run_static(shape, "int32", axis, attr_dtype="int64") - self.run_static(shape, "int32", axis, attr_dtype="float64") - - self.run_static(shape, "int64", axis, attr_dtype=None) - self.run_static(shape, "int64", axis, attr_dtype="int64") - self.run_static(shape, "int64", axis, attr_dtype="int32") - - self.run_static(shape, "float32", axis, attr_dtype=None) - self.run_static(shape, "float32", axis, attr_dtype="float32") - self.run_static(shape, "float32", axis, attr_dtype="float64") - self.run_static(shape, "float32", axis, attr_dtype="int64") - - self.run_static(shape, "float64", axis, attr_dtype=None) - self.run_static(shape, "float64", axis, attr_dtype="float32") - self.run_static(shape, "float64", axis, attr_dtype="float64") - - shape = [5, 5, 5] - self.run_static(shape, "int32", (0, 1), attr_dtype="int32") - self.run_static( - shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2) - ) - - def test_dygraph(self): - np_x = np.random.random([2, 3, 4]).astype('int32') - with base.dygraph.guard(): - x = paddle.to_tensor(np_x) - out0 = paddle.sum(x).numpy() - out1 = paddle.sum(x, axis=0).numpy() - out2 = paddle.sum(x, axis=(0, 1)).numpy() - out3 = paddle.sum(x, axis=(0, 1, 2)).numpy() - - self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all()) - self.assertTrue((out1 == np.sum(np_x, axis=0)).all()) - self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all()) - self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all()) - - -class TestAllAPI(unittest.TestCase): - def setUp(self): - np.random.seed(123) - paddle.enable_static() - self.places = [base.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(base.CUDAPlace(0)) - - def check_static_result(self, place): - main = paddle.static.Program() - startup = paddle.static.Program() - with base.program_guard(main, startup): - input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") - result = paddle.all(x=input) - input_np = np.random.randint(0, 2, [4, 4]).astype("bool") - - exe = base.Executor(place) - fetches = exe.run( - main, - feed={"input": input_np}, - fetch_list=[result], - ) - self.assertTrue((fetches[0] == np.all(input_np)).all()) - - def check_static_float_result(self, place): - main = paddle.static.Program() - startup = paddle.static.Program() - with base.program_guard(main, startup): - input = paddle.static.data( - name="input", shape=[4, 4], dtype="float" - ) - result = paddle.all(x=input) - input_np = np.random.randint(0, 2, [4, 4]).astype("float") - - exe = base.Executor(place) - fetches = exe.run( - main, - feed={"input": input_np}, - fetch_list=[result], - ) - self.assertTrue((fetches[0] == np.all(input_np)).all()) - - def check_static_int_result(self, place): - main = paddle.static.Program() - startup = paddle.static.Program() - with base.program_guard(main, startup): - input = paddle.static.data(name="input", shape=[4, 4], dtype="int") - result = paddle.all(x=input) - input_np = np.random.randint(0, 2, [4, 4]).astype("int") - - exe = base.Executor(place) - fetches = exe.run( - main, - feed={"input": input_np}, - fetch_list=[result], - ) - self.assertTrue((fetches[0] == np.all(input_np)).all()) - - @test_with_pir_api - def test_static(self): - for place in self.places: - self.check_static_result(place=place) - self.check_static_float_result(place=place) - self.check_static_int_result(place=place) - - def test_dygraph(self): - paddle.disable_static() - for place in self.places: - with base.dygraph.guard(place): - np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) - x = paddle.assign(np_x) - x = paddle.cast(x, 'bool') - - out1 = paddle.all(x) - np_out1 = out1.numpy() - expect_res1 = np.all(np_x) - self.assertTrue((np_out1 == expect_res1).all()) - - out2 = paddle.all(x, axis=0) - np_out2 = out2.numpy() - expect_res2 = np.all(np_x, axis=0) - self.assertTrue((np_out2 == expect_res2).all()) - - out3 = paddle.all(x, axis=-1) - np_out3 = out3.numpy() - expect_res3 = np.all(np_x, axis=-1) - self.assertTrue((np_out3 == expect_res3).all()) - - out4 = paddle.all(x, axis=1, keepdim=True) - np_out4 = out4.numpy() - expect_res4 = np.all(np_x, axis=1, keepdims=True) - self.assertTrue((np_out4 == expect_res4).all()) - - x = paddle.cast(x, 'float') - out5 = paddle.all(x) - np_out5 = out5.numpy() - expect_res5 = np.all(np_x) - self.assertTrue((np_out5 == expect_res5).all()) - - x = paddle.cast(x, 'int') - out6 = paddle.all(x) - np_out6 = out6.numpy() - expect_res6 = np.all(np_x) - self.assertTrue((np_out6 == expect_res6).all()) - - paddle.enable_static() - - -class TestAnyAPI(unittest.TestCase): - def setUp(self): - np.random.seed(123) - paddle.enable_static() - self.places = [base.CPUPlace()] - if core.is_compiled_with_cuda(): - self.places.append(base.CUDAPlace(0)) - - def check_static_result(self, place): - main = paddle.static.Program() - startup = paddle.static.Program() - with base.program_guard(main, startup): - input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") - result = paddle.any(x=input) - input_np = np.random.randint(0, 2, [4, 4]).astype("bool") - - exe = base.Executor(place) - fetches = exe.run( - main, - feed={"input": input_np}, - fetch_list=[result], - ) - self.assertTrue((fetches[0] == np.any(input_np)).all()) - - def check_static_float_result(self, place): - main = paddle.static.Program() - startup = paddle.static.Program() - with base.program_guard(main, startup): - input = paddle.static.data( - name="input", shape=[4, 4], dtype="float" - ) - result = paddle.any(x=input) - input_np = np.random.randint(0, 2, [4, 4]).astype("float") - - exe = base.Executor(place) - fetches = exe.run( - main, - feed={"input": input_np}, - fetch_list=[result], - ) - self.assertTrue((fetches[0] == np.any(input_np)).all()) - - def check_static_int_result(self, place): - main = paddle.static.Program() - startup = paddle.static.Program() - with base.program_guard(main, startup): - input = paddle.static.data(name="input", shape=[4, 4], dtype="int") - result = paddle.any(x=input) - input_np = np.random.randint(0, 2, [4, 4]).astype("int") - - exe = base.Executor(place) - fetches = exe.run( - main, - feed={"input": input_np}, - fetch_list=[result], - ) - self.assertTrue((fetches[0] == np.any(input_np)).all()) - - @test_with_pir_api - def test_static(self): - for place in self.places: - self.check_static_result(place=place) - self.check_static_float_result(place=place) - self.check_static_int_result(place=place) - - def test_dygraph(self): - paddle.disable_static() - for place in self.places: - with base.dygraph.guard(place): - np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) - x = paddle.assign(np_x) - x = paddle.cast(x, 'bool') - - out1 = paddle.any(x) - np_out1 = out1.numpy() - expect_res1 = np.any(np_x) - self.assertTrue((np_out1 == expect_res1).all()) - - out2 = paddle.any(x, axis=0) - np_out2 = out2.numpy() - expect_res2 = np.any(np_x, axis=0) - self.assertTrue((np_out2 == expect_res2).all()) - - out3 = paddle.any(x, axis=-1) - np_out3 = out3.numpy() - expect_res3 = np.any(np_x, axis=-1) - self.assertTrue((np_out3 == expect_res3).all()) - - out4 = paddle.any(x, axis=1, keepdim=True) - np_out4 = out4.numpy() - expect_res4 = np.any(np_x, axis=1, keepdims=True) - self.assertTrue((np_out4 == expect_res4).all()) - - np_x = np.random.randint(0, 2, (12, 10)).astype(np.float32) - x = paddle.assign(np_x) - x = paddle.cast(x, 'float32') - - out5 = paddle.any(x) - np_out5 = out5.numpy() - expect_res5 = np.any(np_x) - self.assertTrue((np_out5 == expect_res5).all()) - - x = paddle.cast(x, 'int') - out6 = paddle.any(x) - np_out6 = out6.numpy() - expect_res6 = np.any(np_x) - self.assertTrue((np_out6 == expect_res6).all()) - - paddle.enable_static() - - -class TestAllZeroError(unittest.TestCase): - def test_errors(self): - with paddle.base.dygraph.guard(): - - def test_0_size(): - array = np.array([], dtype=np.float32) - x = paddle.to_tensor(np.reshape(array, [0, 0, 0]), dtype='bool') - paddle.all(x, axis=1) - - self.assertRaises(ValueError, test_0_size) +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestAllFloatOp(OpTest): +# def setUp(self): +# self.op_type = "reduce_all" +# self.python_api = reduce_all_wrapper +# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("float")} +# self.outputs = {'Out': self.inputs['X'].all()} +# self.attrs = {'reduce_all': True} + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestAllIntOp(OpTest): +# def setUp(self): +# self.op_type = "reduce_all" +# self.python_api = reduce_all_wrapper +# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("int")} +# self.outputs = {'Out': self.inputs['X'].all()} +# self.attrs = {'reduce_all': True} + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestAllOp_ZeroDim(OpTest): +# def setUp(self): +# self.python_api = paddle.all +# self.op_type = "reduce_all" +# self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")} +# self.outputs = {'Out': self.inputs['X'].all()} +# self.attrs = {'dim': []} + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestAll8DOp(OpTest): +# def setUp(self): +# self.op_type = "reduce_all" +# self.python_api = paddle.all +# self.inputs = { +# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( +# "bool" +# ) +# } +# self.attrs = {'dim': (2, 3, 4)} +# self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestAllOpWithDim(OpTest): +# def setUp(self): +# self.op_type = "reduce_all" +# self.python_api = paddle.all +# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} +# self.attrs = {'dim': (1,)} +# self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestAll8DOpWithDim(OpTest): +# def setUp(self): +# self.op_type = "reduce_all" +# self.python_api = paddle.all +# self.inputs = { +# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( +# "bool" +# ) +# } +# self.attrs = {'dim': (1, 3, 4)} +# self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestAllOpWithKeepDim(OpTest): +# def setUp(self): +# self.op_type = "reduce_all" +# self.python_api = paddle.all +# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} +# self.attrs = {'dim': [1], 'keep_dim': True} +# self.outputs = { +# 'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestAll8DOpWithKeepDim(OpTest): +# def setUp(self): +# self.op_type = "reduce_all" +# self.python_api = paddle.all +# self.inputs = { +# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( +# "bool" +# ) +# } +# self.attrs = {'dim': (5,), 'keep_dim': True} +# self.outputs = { +# 'Out': np.expand_dims( +# self.inputs['X'].all(axis=self.attrs['dim']), axis=5 +# ) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True) + + +# class TestAllOpError(unittest.TestCase): +# @test_with_pir_api +# def test_errors(self): +# with paddle.static.program_guard( +# paddle.static.Program(), paddle.static.Program() +# ): +# # The input type of reduce_all_op must be Variable. +# input1 = 12 +# self.assertRaises(TypeError, paddle.all, input1) + + +# def reduce_any_wrapper(x, axis=None, keepdim=False, reduce_all=True, name=None): +# return paddle.any(x, axis, keepdim, name) + + +# class TestAnyOp(OpTest): +# def setUp(self): +# self.op_type = "reduce_any" +# self.prim_op_type = "comp" +# self.python_api = reduce_any_wrapper +# self.public_python_api = reduce_any_wrapper +# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} +# self.outputs = {'Out': self.inputs['X'].any()} +# self.attrs = {'reduce_all': True} + +# def test_check_output(self): +# self.check_output(check_pir=True, check_prim_pir=True) + + +# class TestAnyFloatOp(OpTest): +# def setUp(self): +# self.op_type = "reduce_any" +# self.prim_op_type = "comp" +# self.python_api = reduce_any_wrapper +# self.public_python_api = reduce_any_wrapper +# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("float")} +# self.outputs = {'Out': self.inputs['X'].any()} +# self.attrs = {'reduce_all': True} + +# def test_check_output(self): +# self.check_output(check_pir=True, check_prim_pir=True) + + +# class TestAnyIntOp(OpTest): +# def setUp(self): +# self.op_type = "reduce_any" +# self.prim_op_type = "comp" +# self.python_api = reduce_any_wrapper +# self.public_python_api = reduce_any_wrapper +# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("int")} +# self.outputs = {'Out': self.inputs['X'].any()} +# self.attrs = {'reduce_all': True} + +# def test_check_output(self): +# self.check_output(check_pir=True, check_prim_pir=True) + + +# class TestAnyOp_ZeroDim(OpTest): +# def setUp(self): +# self.op_type = "reduce_any" +# self.prim_op_type = "comp" +# self.python_api = paddle.any +# self.public_python_api = paddle.any +# self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")} +# self.outputs = {'Out': self.inputs['X'].any()} +# self.attrs = {'dim': []} + +# def test_check_output(self): +# self.check_output(check_pir=True, check_prim_pir=True) + + +# class TestAny8DOp(OpTest): +# def setUp(self): +# self.op_type = "reduce_any" +# self.prim_op_type = "comp" +# self.python_api = paddle.any +# self.public_python_api = paddle.any +# self.inputs = { +# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( +# "bool" +# ) +# } +# self.attrs = {'dim': (3, 5, 4)} +# self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} + +# def test_check_output(self): +# self.check_output(check_pir=True, check_prim_pir=True) + + +# class TestAnyOpWithDim(OpTest): +# def setUp(self): +# self.op_type = "reduce_any" +# self.prim_op_type = "comp" +# self.python_api = paddle.any +# self.public_python_api = paddle.any +# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} +# self.attrs = {'dim': [1]} +# self.outputs = {'Out': self.inputs['X'].any(axis=1)} + +# def test_check_output(self): +# self.check_output(check_pir=True, check_prim_pir=True) + + +# class TestAny8DOpWithDim(OpTest): +# def setUp(self): +# self.op_type = "reduce_any" +# self.prim_op_type = "comp" +# self.python_api = paddle.any +# self.public_python_api = paddle.any +# self.inputs = { +# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( +# "bool" +# ) +# } +# self.attrs = {'dim': (3, 6)} +# self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} + +# def test_check_output(self): +# self.check_output(check_pir=True, check_prim_pir=True) + + +# class TestAnyOpWithKeepDim(OpTest): +# def setUp(self): +# self.op_type = "reduce_any" +# self.prim_op_type = "comp" +# self.python_api = paddle.any +# self.public_python_api = paddle.any +# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} +# self.attrs = {'dim': (1,), 'keep_dim': True} +# self.outputs = { +# 'Out': np.expand_dims( +# self.inputs['X'].any(axis=self.attrs['dim']), axis=1 +# ) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True, check_prim_pir=True) + + +# class TestAny8DOpWithKeepDim(OpTest): +# def setUp(self): +# self.op_type = "reduce_any" +# self.prim_op_type = "comp" +# self.python_api = paddle.any +# self.public_python_api = paddle.any +# self.inputs = { +# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( +# "bool" +# ) +# } +# self.attrs = {'dim': (1,), 'keep_dim': True} +# self.outputs = { +# 'Out': np.expand_dims( +# self.inputs['X'].any(axis=self.attrs['dim']), axis=1 +# ) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True, check_prim_pir=True) + + +# class TestAnyOpError(unittest.TestCase): +# @test_with_pir_api +# def test_errors(self): +# with paddle.static.program_guard( +# paddle.static.Program(), paddle.static.Program() +# ): +# # The input type of reduce_any_op must be Variable. +# input1 = 12 +# self.assertRaises(TypeError, paddle.any, input1) + + +# class Test1DReduce(OpTest): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random(120).astype("float64")} +# self.outputs = {'Out': self.inputs['X'].sum(axis=0)} +# self.if_enable_cinn() + +# def if_enable_cinn(self): +# pass + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=True) + + +# class TestReduceSum_ZeroDim(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random(()).astype("float64")} +# self.outputs = {'Out': self.inputs['X'].sum(axis=0)} +# self.if_enable_cinn() + + +# class Test2DReduce0(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.attrs = {'dim': [0]} +# self.inputs = {'X': np.random.random((20, 10)).astype("float64")} +# self.outputs = {'Out': self.inputs['X'].sum(axis=0)} +# self.if_enable_cinn() + + +# class Test2DReduce1(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.attrs = {'dim': [1]} +# self.inputs = {'X': np.random.random((20, 10)).astype("float64")} +# self.outputs = { +# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) +# } +# self.if_enable_cinn() + + +# class Test3DReduce0(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.attrs = {'dim': [1]} +# self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} +# self.outputs = { +# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) +# } +# self.if_enable_cinn() + + +# class Test3DReduce1(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.attrs = {'dim': [2]} +# self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} +# self.outputs = { +# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) +# } +# self.if_enable_cinn() + + +# class Test3DReduce2(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.attrs = {'dim': [-2]} +# self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} +# self.outputs = { +# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) +# } +# self.if_enable_cinn() + + +# class Test3DReduce3(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.attrs = {'dim': [1, 2]} +# self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} +# self.outputs = { +# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) +# } +# self.if_enable_cinn() + + +# def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False): +# if paddle.in_dynamic_mode(): +# return paddle._C_ops.sum(x, axis, dtype, keepdim) +# else: +# if in_pir_mode(): +# return paddle._pir_ops.sum(x, axis, dtype, keepdim) + + +# class Test8DReduce0(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = reduce_sum_wrapper2 +# self.attrs = {'dim': (4, 2, 3)} +# self.inputs = { +# 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64") +# } +# self.outputs = { +# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) +# } + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out') + + +# class TestKeepDimReduce(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} +# self.attrs = {'dim': [1], 'keep_dim': True} +# self.outputs = { +# 'Out': self.inputs['X'].sum( +# axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] +# ) +# } +# self.if_enable_cinn() + + +# class TestKeepDimReduceForEager(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = reduce_sum_wrapper2 +# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} +# self.attrs = {'dim': [1], 'keep_dim': True} +# self.outputs = { +# 'Out': self.inputs['X'].sum( +# axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] +# ) +# } + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out') + + +# class TestKeepDim8DReduce(Test1DReduce): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = reduce_sum_wrapper2 +# self.inputs = { +# 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64") +# } +# self.attrs = {'dim': (3, 4, 5), 'keep_dim': True} +# self.outputs = { +# 'Out': self.inputs['X'].sum( +# axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] +# ) +# } + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out') + + +# @skip_check_grad_ci( +# reason="reduce_max is discontinuous non-derivable function," +# " its gradient check is not supported by unittest framework." +# ) +# class TestReduceMaxOpMultiAxises(OpTest): +# """Remove Max with subgradient from gradient check to confirm the success of CI.""" + +# def setUp(self): +# self.op_type = "reduce_max" +# self.prim_op_type = "prim" +# self.python_api = paddle.max +# self.public_python_api = paddle.max +# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} +# self.attrs = {'dim': [-2, -1]} +# self.outputs = { +# 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) +# } + +# def test_check_output(self): +# self.check_output(check_pir=True) + +# def test_check_grad(self): +# # only composite op support gradient check of reduce_max +# self.check_grad( +# ['X'], +# 'Out', +# check_prim=True, +# only_check_prim=True, +# check_pir=True, +# ) + + +# @skip_check_grad_ci( +# reason="reduce_min is discontinuous non-derivable function," +# " its gradient check is not supported by unittest framework." +# ) +# class TestReduceMinOpMultiAxises(OpTest): +# """Remove Min with subgradient from gradient check to confirm the success of CI.""" + +# def setUp(self): +# self.op_type = "reduce_min" +# self.python_api = paddle.min +# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} +# self.attrs = {'dim': [1, 2]} +# self.outputs = { +# 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) +# } + +# def test_check_output(self): +# self.check_output() + + +# class TestKeepDimReduceSumMultiAxises(OpTest): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} +# self.attrs = {'dim': [-2, -1], 'keep_dim': True} +# self.outputs = { +# 'Out': self.inputs['X'].sum( +# axis=tuple(self.attrs['dim']), keepdims=True +# ) +# } +# self.if_enable_cinn() + +# def if_enable_cinn(self): +# pass + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=True) + + +# class TestKeepDimReduceSumMultiAxisesForEager(OpTest): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = reduce_sum_wrapper2 +# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} +# self.attrs = {'dim': [-2, -1], 'keep_dim': True} +# self.outputs = { +# 'Out': self.inputs['X'].sum( +# axis=tuple(self.attrs['dim']), keepdims=True +# ) +# } + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out') + + +# class TestReduceSumWithDimOne(OpTest): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} +# self.attrs = {'dim': [1, 2], 'keep_dim': True} +# self.outputs = { +# 'Out': self.inputs['X'].sum( +# axis=tuple(self.attrs['dim']), keepdims=True +# ) +# } +# self.if_enable_cinn() + +# def if_enable_cinn(self): +# pass + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=True) + + +# class TestReduceSumWithDimOneForEager(OpTest): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = reduce_sum_wrapper2 +# self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} +# self.attrs = {'dim': [1, 2], 'keep_dim': True} +# self.outputs = { +# 'Out': self.inputs['X'].sum( +# axis=tuple(self.attrs['dim']), keepdims=True +# ) +# } +# self.enable_cinn = True + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out') + + +# class TestReduceSumWithNumelOne(OpTest): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random((100, 1)).astype("float64")} +# self.attrs = {'dim': [1], 'keep_dim': False} +# self.outputs = { +# 'Out': self.inputs['X'].sum( +# axis=tuple(self.attrs['dim']), keepdims=False +# ) +# } +# self.if_enable_cinn() + +# def if_enable_cinn(self): +# pass + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=False) + + +# def reduce_sum_wrapper( +# x, axis=None, keepdim=False, reduce_all=True, out_dtype=None, name=None +# ): +# return paddle.sum(x, axis, out_dtype, keepdim, name) + + +# class TestReduceAll(OpTest): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = reduce_sum_wrapper +# self.public_python_api = reduce_sum_wrapper +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} +# self.attrs = {'reduce_all': True, 'keep_dim': False} +# self.outputs = {'Out': self.inputs['X'].sum()} +# self.if_enable_cinn() + +# def if_enable_cinn(self): +# pass + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=True) + + +# class TestReduceAllFp32(OpTest): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = reduce_sum_wrapper +# self.public_python_api = reduce_sum_wrapper +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")} +# self.attrs = {'reduce_all': True, 'keep_dim': False} +# self.outputs = {'Out': self.inputs['X'].sum()} +# self.if_enable_cinn() + +# def if_enable_cinn(self): +# pass + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=True) + + +# class Test1DReduceWithAxes1(OpTest): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random(100).astype("float64")} +# self.attrs = {'dim': [0], 'keep_dim': False} +# self.outputs = {'Out': self.inputs['X'].sum(axis=0)} +# self.if_enable_cinn() + +# def if_enable_cinn(self): +# pass + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=True) + + +# def reduce_sum_wrapper_fp64( +# x, axis=None, keepdim=False, reduce_all=True, out_dtype=None, name=None +# ): +# return paddle.sum(x, axis, 'float64', keepdim, name) + + +# class TestReduceWithDtype(OpTest): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = reduce_sum_wrapper_fp64 +# self.public_python_api = reduce_sum_wrapper_fp64 +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} +# self.outputs = {'Out': self.inputs['X'].sum().astype('float64')} +# self.attrs = {'reduce_all': True} +# self.attrs.update( +# { +# 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), +# 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), +# } +# ) +# self.if_enable_cinn() + +# def if_enable_cinn(self): +# pass + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=True) + + +# class TestReduceWithDtype1(TestReduceWithDtype): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.prim_op_type = "prim" +# self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} +# self.outputs = {'Out': self.inputs['X'].sum(axis=1)} +# self.attrs = {'dim': [1]} +# self.attrs.update( +# { +# 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), +# 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), +# } +# ) +# # cinn op_mapper not support in_dtype/out_dtype attr +# self.enable_cinn = False + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=True) + + +# class TestReduceWithDtype2(TestReduceWithDtype): +# def setUp(self): +# self.op_type = "reduce_sum" +# self.prim_op_type = "prim" +# self.python_api = paddle.sum +# self.public_python_api = paddle.sum +# self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} +# self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)} +# self.attrs = {'dim': [1], 'keep_dim': True} +# self.attrs.update( +# { +# 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), +# 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), +# } +# ) +# # cinn op_mapper not support in_dtype/out_dtype attr +# self.enable_cinn = False + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad(self): +# self.check_grad(['X'], 'Out', check_prim=True) + + +# class TestReduceSumOpError(unittest.TestCase): +# def test_errors1(self): +# with static_guard(): +# with paddle.static.program_guard( +# paddle.static.Program(), paddle.static.Program() +# ): +# # The input type of reduce_sum_op must be Variable. +# x1 = base.create_lod_tensor( +# np.array([[-1]]), [[1]], base.CPUPlace() +# ) +# self.assertRaises(TypeError, paddle.sum, x1) +# # The input dtype of reduce_sum_op must be float32 or float64 or int32 or int64. + + +# class API_TestSumOp(unittest.TestCase): +# def run_static( +# self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None +# ): +# if np_axis is None: +# np_axis = attr_axis + +# places = [base.CPUPlace()] +# if core.is_compiled_with_cuda(): +# places.append(base.CUDAPlace(0)) +# for place in places: +# with base.program_guard(base.Program(), base.Program()): +# data = paddle.static.data("data", shape=shape, dtype=x_dtype) +# result_sum = paddle.sum( +# x=data, axis=attr_axis, dtype=attr_dtype +# ) + +# exe = base.Executor(place) +# input_data = np.random.rand(*shape).astype(x_dtype) +# (res,) = exe.run( +# feed={"data": input_data}, fetch_list=[result_sum] +# ) + +# np.testing.assert_allclose( +# res, +# np.sum(input_data.astype(attr_dtype), axis=np_axis), +# rtol=1e-05, +# ) + +# @test_with_pir_api +# def test_static(self): +# shape = [10, 10] +# axis = 1 + +# self.run_static(shape, "bool", axis, attr_dtype=None) +# self.run_static(shape, "bool", axis, attr_dtype="int32") +# self.run_static(shape, "bool", axis, attr_dtype="int64") +# self.run_static(shape, "bool", axis, attr_dtype="float16") + +# self.run_static(shape, "int32", axis, attr_dtype=None) +# self.run_static(shape, "int32", axis, attr_dtype="int32") +# self.run_static(shape, "int32", axis, attr_dtype="int64") +# self.run_static(shape, "int32", axis, attr_dtype="float64") + +# self.run_static(shape, "int64", axis, attr_dtype=None) +# self.run_static(shape, "int64", axis, attr_dtype="int64") +# self.run_static(shape, "int64", axis, attr_dtype="int32") + +# self.run_static(shape, "float32", axis, attr_dtype=None) +# self.run_static(shape, "float32", axis, attr_dtype="float32") +# self.run_static(shape, "float32", axis, attr_dtype="float64") +# self.run_static(shape, "float32", axis, attr_dtype="int64") + +# self.run_static(shape, "float64", axis, attr_dtype=None) +# self.run_static(shape, "float64", axis, attr_dtype="float32") +# self.run_static(shape, "float64", axis, attr_dtype="float64") + +# shape = [5, 5, 5] +# self.run_static(shape, "int32", (0, 1), attr_dtype="int32") +# self.run_static( +# shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2) +# ) + +# def test_dygraph(self): +# np_x = np.random.random([2, 3, 4]).astype('int32') +# with base.dygraph.guard(): +# x = paddle.to_tensor(np_x) +# out0 = paddle.sum(x).numpy() +# out1 = paddle.sum(x, axis=0).numpy() +# out2 = paddle.sum(x, axis=(0, 1)).numpy() +# out3 = paddle.sum(x, axis=(0, 1, 2)).numpy() + +# self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all()) +# self.assertTrue((out1 == np.sum(np_x, axis=0)).all()) +# self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all()) +# self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all()) + + +# class TestAllAPI(unittest.TestCase): +# def setUp(self): +# np.random.seed(123) +# paddle.enable_static() +# self.places = [base.CPUPlace()] +# if core.is_compiled_with_cuda(): +# self.places.append(base.CUDAPlace(0)) + +# def check_static_result(self, place): +# main = paddle.static.Program() +# startup = paddle.static.Program() +# with base.program_guard(main, startup): +# input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") +# result = paddle.all(x=input) +# input_np = np.random.randint(0, 2, [4, 4]).astype("bool") + +# exe = base.Executor(place) +# fetches = exe.run( +# main, +# feed={"input": input_np}, +# fetch_list=[result], +# ) +# self.assertTrue((fetches[0] == np.all(input_np)).all()) + +# def check_static_float_result(self, place): +# main = paddle.static.Program() +# startup = paddle.static.Program() +# with base.program_guard(main, startup): +# input = paddle.static.data( +# name="input", shape=[4, 4], dtype="float" +# ) +# result = paddle.all(x=input) +# input_np = np.random.randint(0, 2, [4, 4]).astype("float") + +# exe = base.Executor(place) +# fetches = exe.run( +# main, +# feed={"input": input_np}, +# fetch_list=[result], +# ) +# self.assertTrue((fetches[0] == np.all(input_np)).all()) + +# def check_static_int_result(self, place): +# main = paddle.static.Program() +# startup = paddle.static.Program() +# with base.program_guard(main, startup): +# input = paddle.static.data(name="input", shape=[4, 4], dtype="int") +# result = paddle.all(x=input) +# input_np = np.random.randint(0, 2, [4, 4]).astype("int") + +# exe = base.Executor(place) +# fetches = exe.run( +# main, +# feed={"input": input_np}, +# fetch_list=[result], +# ) +# self.assertTrue((fetches[0] == np.all(input_np)).all()) + +# @test_with_pir_api +# def test_static(self): +# for place in self.places: +# self.check_static_result(place=place) +# self.check_static_float_result(place=place) +# self.check_static_int_result(place=place) + +# def test_dygraph(self): +# paddle.disable_static() +# for place in self.places: +# with base.dygraph.guard(place): +# np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) +# x = paddle.assign(np_x) +# x = paddle.cast(x, 'bool') + +# out1 = paddle.all(x) +# np_out1 = out1.numpy() +# expect_res1 = np.all(np_x) +# self.assertTrue((np_out1 == expect_res1).all()) + +# out2 = paddle.all(x, axis=0) +# np_out2 = out2.numpy() +# expect_res2 = np.all(np_x, axis=0) +# self.assertTrue((np_out2 == expect_res2).all()) + +# out3 = paddle.all(x, axis=-1) +# np_out3 = out3.numpy() +# expect_res3 = np.all(np_x, axis=-1) +# self.assertTrue((np_out3 == expect_res3).all()) + +# out4 = paddle.all(x, axis=1, keepdim=True) +# np_out4 = out4.numpy() +# expect_res4 = np.all(np_x, axis=1, keepdims=True) +# self.assertTrue((np_out4 == expect_res4).all()) + +# x = paddle.cast(x, 'float') +# out5 = paddle.all(x) +# np_out5 = out5.numpy() +# expect_res5 = np.all(np_x) +# self.assertTrue((np_out5 == expect_res5).all()) + +# x = paddle.cast(x, 'int') +# out6 = paddle.all(x) +# np_out6 = out6.numpy() +# expect_res6 = np.all(np_x) +# self.assertTrue((np_out6 == expect_res6).all()) + +# paddle.enable_static() + + +# class TestAnyAPI(unittest.TestCase): +# def setUp(self): +# np.random.seed(123) +# paddle.enable_static() +# self.places = [base.CPUPlace()] +# if core.is_compiled_with_cuda(): +# self.places.append(base.CUDAPlace(0)) + +# def check_static_result(self, place): +# main = paddle.static.Program() +# startup = paddle.static.Program() +# with base.program_guard(main, startup): +# input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") +# result = paddle.any(x=input) +# input_np = np.random.randint(0, 2, [4, 4]).astype("bool") + +# exe = base.Executor(place) +# fetches = exe.run( +# main, +# feed={"input": input_np}, +# fetch_list=[result], +# ) +# self.assertTrue((fetches[0] == np.any(input_np)).all()) + +# def check_static_float_result(self, place): +# main = paddle.static.Program() +# startup = paddle.static.Program() +# with base.program_guard(main, startup): +# input = paddle.static.data( +# name="input", shape=[4, 4], dtype="float" +# ) +# result = paddle.any(x=input) +# input_np = np.random.randint(0, 2, [4, 4]).astype("float") + +# exe = base.Executor(place) +# fetches = exe.run( +# main, +# feed={"input": input_np}, +# fetch_list=[result], +# ) +# self.assertTrue((fetches[0] == np.any(input_np)).all()) + +# def check_static_int_result(self, place): +# main = paddle.static.Program() +# startup = paddle.static.Program() +# with base.program_guard(main, startup): +# input = paddle.static.data(name="input", shape=[4, 4], dtype="int") +# result = paddle.any(x=input) +# input_np = np.random.randint(0, 2, [4, 4]).astype("int") + +# exe = base.Executor(place) +# fetches = exe.run( +# main, +# feed={"input": input_np}, +# fetch_list=[result], +# ) +# self.assertTrue((fetches[0] == np.any(input_np)).all()) + +# @test_with_pir_api +# def test_static(self): +# for place in self.places: +# self.check_static_result(place=place) +# self.check_static_float_result(place=place) +# self.check_static_int_result(place=place) + +# def test_dygraph(self): +# paddle.disable_static() +# for place in self.places: +# with base.dygraph.guard(place): +# np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) +# x = paddle.assign(np_x) +# x = paddle.cast(x, 'bool') + +# out1 = paddle.any(x) +# np_out1 = out1.numpy() +# expect_res1 = np.any(np_x) +# self.assertTrue((np_out1 == expect_res1).all()) + +# out2 = paddle.any(x, axis=0) +# np_out2 = out2.numpy() +# expect_res2 = np.any(np_x, axis=0) +# self.assertTrue((np_out2 == expect_res2).all()) + +# out3 = paddle.any(x, axis=-1) +# np_out3 = out3.numpy() +# expect_res3 = np.any(np_x, axis=-1) +# self.assertTrue((np_out3 == expect_res3).all()) + +# out4 = paddle.any(x, axis=1, keepdim=True) +# np_out4 = out4.numpy() +# expect_res4 = np.any(np_x, axis=1, keepdims=True) +# self.assertTrue((np_out4 == expect_res4).all()) + +# np_x = np.random.randint(0, 2, (12, 10)).astype(np.float32) +# x = paddle.assign(np_x) +# x = paddle.cast(x, 'float32') + +# out5 = paddle.any(x) +# np_out5 = out5.numpy() +# expect_res5 = np.any(np_x) +# self.assertTrue((np_out5 == expect_res5).all()) + +# x = paddle.cast(x, 'int') +# out6 = paddle.any(x) +# np_out6 = out6.numpy() +# expect_res6 = np.any(np_x) +# self.assertTrue((np_out6 == expect_res6).all()) + +# paddle.enable_static() + + +# class TestAllZeroError(unittest.TestCase): +# def test_errors(self): +# with paddle.base.dygraph.guard(): + +# def test_0_size(): +# array = np.array([], dtype=np.float32) +# x = paddle.to_tensor(np.reshape(array, [0, 0, 0]), dtype='bool') +# paddle.all(x, axis=1) + +# self.assertRaises(ValueError, test_0_size) if __name__ == '__main__': diff --git a/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py index 9e39e609a9823..a3f7a8ccb40aa 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_cumprod_grad.py @@ -97,5 +97,51 @@ def desired(primal, dim): core.set_prim_eager_enabled(False) +@param.parameterized_class( + ('primal', 'dtype'), + [ + ( + np.random.uniform(1, 5, ()), + np.float32, + ), + ], +) +class TestCumprodGradComp0D(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.primal = cls.primal.astype(cls.dtype) + + def test_cumprod_grad_comp_0d(self): + def actual(primal, dim): + paddle.disable_static() + core.set_prim_eager_enabled(True) + x = paddle.to_tensor(primal, dtype='float32', stop_gradient=False) + x.stop_gradient = False + y = paddle.cumprod(x, dim=dim) + x_cotangent = paddle.grad( + y, x, create_graph=True, retain_graph=True + ) + return x_cotangent[0] + + def desired(primal, dim): + paddle.disable_static() + core.set_prim_eager_enabled(False) + x = paddle.to_tensor(primal, dtype='float32', stop_gradient=False) + x.stop_gradient = False + y = paddle.cumprod(x, dim=dim) + x_cotangent = paddle.grad( + y, x, create_graph=False, retain_graph=True + ) + return x_cotangent[0] + + np.testing.assert_allclose( + actual=actual(self.primal, 0), + desired=desired(self.primal, 0), + rtol=1e-6, + atol=0, + ) + core.set_prim_eager_enabled(False) + + if __name__ == '__main__': unittest.main() diff --git a/test/prim/prim/vjp/static/test_comp_cumprod_grad.py b/test/prim/prim/vjp/static/test_comp_cumprod_grad.py index ab13f19ddc87e..aacc812a9829a 100644 --- a/test/prim/prim/vjp/static/test_comp_cumprod_grad.py +++ b/test/prim/prim/vjp/static/test_comp_cumprod_grad.py @@ -83,7 +83,7 @@ def train(self, use_prim, use_cinn): return res - def test_tanh_grad_comp(self): + def test_cumprod_grad_comp(self): paddle.enable_static() def actual(primal, cotangent, dim): @@ -142,5 +142,85 @@ def desired(primal, cotangent, dim): paddle.disable_static() +@param.parameterized_class( + ('primal', 'cotangent', 'dtype'), + [ + ( + np.random.uniform(1, 5, ()), + np.random.uniform(1, 5, ()), + np.float32, + ) + ], +) +class TestCumprodGradComp0D(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.primal = cls.primal.astype(cls.dtype) + cls.cotangent = cls.cotangent.astype(cls.dtype) + cls.zero_nums = [0, 1, 10, int(np.prod(cls.primal.shape))] + + def train(self, use_prim, use_cinn): + paddle.seed(2022) + self.x = paddle.randn([2, 4]) + self.x.stop_gradient = False + net = PrimeNet() + core._set_prim_backward_enabled(use_prim) + net = apply_to_static(net, use_cinn) + out = net(self.x) + res = paddle.autograd.grad(out, [self.x]) + + return res + + def test_cumprod_grad_comp_0d(self): + paddle.enable_static() + + def actual(primal, cotangent, dim): + core._set_prim_backward_enabled(True) + mp, sp = paddle.static.Program(), paddle.static.Program() + with paddle.static.program_guard(mp, sp): + x = paddle.static.data('primal', primal.shape, primal.dtype) + x.stop_gradient = False + v = paddle.static.data( + 'cotangent', cotangent.shape, cotangent.dtype + ) + y = paddle.cumprod(x, dim) + x_cotangent = paddle.static.gradients(y, x, v) + exe = paddle.static.Executor() + exe.run(sp) + return exe.run( + program=mp, + feed={'primal': primal, 'cotangent': cotangent}, + fetch_list=[x_cotangent[0]], + )[0] + + def desired(primal, cotangent, dim): + core._set_prim_backward_enabled(False) + mp, sp = paddle.static.Program(), paddle.static.Program() + with paddle.static.program_guard(mp, sp): + x = paddle.static.data('primal', primal.shape, primal.dtype) + x.stop_gradient = False + v = paddle.static.data( + 'cotangent', cotangent.shape, cotangent.dtype + ) + y = paddle.cumprod(x, dim) + x_cotangent = paddle.static.gradients(y, x, v) + exe = paddle.static.Executor() + exe.run(sp) + return exe.run( + program=mp, + feed={'primal': primal, 'cotangent': cotangent}, + fetch_list=[x_cotangent[0]], + )[0] + + np.testing.assert_allclose( + actual=actual(self.primal, self.cotangent, 0), + desired=desired(self.primal, self.cotangent, 0), + rtol=1e-6, + atol=0, + ) + core._set_prim_backward_enabled(False) + paddle.disable_static() + + if __name__ == '__main__': unittest.main() From 3c8b7cfcae7f3353ac0b8bb544e5021aa9300e03 Mon Sep 17 00:00:00 2001 From: YibinLiu666 <2632839426@qq.com> Date: Tue, 21 May 2024 14:57:22 +0000 Subject: [PATCH 06/10] remove cout --- paddle/phi/kernels/cpu/cumprod_kernel.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/paddle/phi/kernels/cpu/cumprod_kernel.cc b/paddle/phi/kernels/cpu/cumprod_kernel.cc index e3661ceefd14f..f39bddbb443ba 100644 --- a/paddle/phi/kernels/cpu/cumprod_kernel.cc +++ b/paddle/phi/kernels/cpu/cumprod_kernel.cc @@ -34,8 +34,7 @@ void CumprodKernel(const Context& dev_ctx, auto* x_data = x->data(); auto* out_data = dev_ctx.template Alloc(out); DDim shape = x->dims(); - std::cout << "x_data addr: " << x_data << "\n"; - std::cout << "out_data addr: " << out_data << "\n"; + size_t outer_dim = 1; size_t mid_dim = 1; size_t inner_dim = 1; From 300c63e0a4f7dd0f0e1652f143006c75f060a592 Mon Sep 17 00:00:00 2001 From: YibinLiu666 <2632839426@qq.com> Date: Tue, 21 May 2024 15:00:22 +0000 Subject: [PATCH 07/10] update --- test/deprecated/legacy_test/test_reduce_op.py | 3236 ++++++++--------- 1 file changed, 1617 insertions(+), 1619 deletions(-) diff --git a/test/deprecated/legacy_test/test_reduce_op.py b/test/deprecated/legacy_test/test_reduce_op.py index e899a864246e7..ce74b1423eab4 100644 --- a/test/deprecated/legacy_test/test_reduce_op.py +++ b/test/deprecated/legacy_test/test_reduce_op.py @@ -12,532 +12,531 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import unittest import numpy as np - -sys.path.append("../../legacy_test") -from op_test import OpTest, convert_float_to_uint16 +from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci +from utils import static_guard import paddle +from paddle import base from paddle.base import core +from paddle.base.framework import convert_np_dtype_to_dtype_, in_pir_mode +from paddle.pir_utils import test_with_pir_api + + +class TestSumOp(OpTest): + def setUp(self): + self.init_dtype() + self.init_input() + self.init_attrs() + self.calc_output() + + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.op_type = "reduce_sum" + self.prim_op_type = "prim" + self.inputs = {'X': self.x} + self.outputs = {'Out': self.out} + self.if_enable_cinn() + + def init_dtype(self): + self.dtype = np.float64 + + def init_input(self): + self.x = np.random.random((5, 6, 10)).astype(self.dtype) + + def init_attrs(self): + self.attrs = {'dim': [0]} + + def if_enable_cinn(self): + pass + + def calc_output(self): + self.out = self.x.sum(axis=tuple(self.attrs['dim'])) + + def test_check_output(self): + self.check_output(check_pir=True) + + def test_check_grad(self): + self.check_grad( + ['X'], + 'Out', + check_prim=True, + check_pir=True, + check_prim_pir=True, + ) + + +class TestComplexSumOP(TestSumOp): + def init_dtype(self): + self.dtype = np.complex128 + + def init_input(self): + self.x = np.random.random((3, 4)).astype(self.dtype) + + def init_attrs(self): + self.attrs = {'dim': [0]} + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=False) + + +class TestSumOp_ZeroDim(TestSumOp): + def init_attrs(self): + self.attrs = {'dim': []} + + def init_input(self): + self.x = np.random.random([]).astype(self.dtype) + + def calc_output(self): + self.out = self.x.sum(axis=None) + + def test_check_grad(self): + self.check_grad( + ['X'], + 'Out', + check_pir=True, + check_prim=True, + check_prim_pir=True, + ) + + +class TestSumOp5D(TestSumOp): + def init_input(self): + self.x = np.random.random((1, 2, 5, 6, 10)).astype(self.dtype) + + def init_attrs(self): + self.attrs = {'dim': [0]} + + +class TestSumOp6D(TestSumOp): + def init_input(self): + self.x = np.random.random((1, 1, 2, 5, 6, 10)).astype(self.dtype) + + def init_attrs(self): + self.attrs = {'dim': [0]} + + +class TestSumOp8D(TestSumOp): + def init_input(self): + self.x = np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype(self.dtype) + + def init_attrs(self): + self.attrs = {'dim': (0, 3)} + + def test_check_output(self): + self.check_output(check_pir=True) + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_pir=True) + + +class TestSumOp_withInt(TestSumOp): + def init_input(self): + # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format + # Precision limitations on integer values between 0 and 2048 can be exactly represented + self.x = np.random.randint(0, 30, (10, 10)).astype(self.dtype) + + def init_attrs(self): + self.attrs = {'dim': (0, 1)} + + def test_check_output(self): + self.check_output(check_pir=True) + + def calc_gradient(self): + x = self.inputs["X"] + grad = np.ones(x.shape, dtype=x.dtype) + return (grad,) + + def test_check_grad(self): + self.check_grad( + ['X'], + 'Out', + user_defined_grads=self.calc_gradient(), + check_prim=True, + check_prim_pir=True, + check_pir=True, + ) + + +class TestSumOp3Dim(TestSumOp): + def init_input(self): + self.x = np.random.uniform(0, 0.1, (5, 6, 10)).astype(self.dtype) + + def init_attrs(self): + self.attrs = {'dim': (0, 1, 2)} + + def test_check_output(self): + self.check_output(check_pir=True) + + def calc_gradient(self): + x = self.inputs["X"] + grad = np.ones(x.shape, dtype=x.dtype) + return (grad,) + + def test_check_grad(self): + self.check_grad( + ['X'], + 'Out', + user_defined_grads=self.calc_gradient(), + check_prim=True, + check_prim_pir=True, + check_pir=True, + ) + + +def create_test_fp16_class(parent): + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) + class TestSumOpFp16(parent): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + self.check_output(check_pir=True) + + def test_check_grad(self): + self.check_grad( + ['X'], + 'Out', + check_prim=True, + check_prim_pir=True, + check_pir=True, + ) + + +create_test_fp16_class(TestSumOp) +create_test_fp16_class(TestSumOp_ZeroDim) +create_test_fp16_class(TestSumOp5D) +create_test_fp16_class(TestSumOp6D) +create_test_fp16_class(TestSumOp8D) +create_test_fp16_class(TestSumOp_withInt) +create_test_fp16_class(TestSumOp3Dim) + + +def create_test_bf16_class(parent): + @unittest.skipIf( + not core.is_compiled_with_cuda() or paddle.is_compiled_with_rocm(), + "core is not compiled with CUDA", + ) + class TestSumOpBf16(parent): + def setUp(self): + self.inputs = {'X': convert_float_to_uint16(self.x)} + self.outputs = {'Out': convert_float_to_uint16(self.out)} + self.enable_cinn = False + + def init_dtype(self): + self.dtype = np.uint16 + + def test_check_output(self): + place = core.CUDAPlace(0) + self.check_output_with_place(place, check_pir=True) + + def test_check_grad(self): + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, + ['X'], + 'Out', + user_defined_grads=self.gradient, + check_prim=True, + check_prim_pir=True, + check_pir=True, + ) + + def calc_gradient(self): + x = self.x + grad = np.ones(x.shape, dtype=x.dtype) + return [grad] + + +create_test_bf16_class(TestSumOp) +create_test_bf16_class(TestSumOp_ZeroDim) +create_test_bf16_class(TestSumOp5D) +create_test_bf16_class(TestSumOp6D) +create_test_bf16_class(TestSumOp8D) +create_test_bf16_class(TestSumOp_withInt) +create_test_bf16_class(TestSumOp3Dim) + + +@skip_check_grad_ci( + reason="reduce_max is discontinuous non-derivable function," + " its gradient check is not supported by unittest framework." +) +class TestMaxOp(OpTest): + """Remove Max with subgradient from gradient check to confirm the success of CI.""" + + def setUp(self): + self.op_type = "reduce_max" + self.prim_op_type = "prim" + self.python_api = paddle.max + self.public_python_api = paddle.max + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [-1]} + self.outputs = { + 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) + } + + def test_check_output(self): + self.check_output(check_pir=True) + + def test_check_grad(self): + # only composite op support gradient check of reduce_max + self.check_grad( + ['X'], + 'Out', + check_prim=True, + only_check_prim=True, + check_pir=True, + ) + + +class TestMaxOp_ZeroDim(OpTest): + """Remove Max with subgradient from gradient check to confirm the success of CI.""" + + def setUp(self): + self.op_type = "reduce_max" + self.prim_op_type = "prim" + self.python_api = paddle.max + self.public_python_api = paddle.max + self.if_enable_cinn() + self.init_inputs_and_outputs() + + def if_enable_cinn(self): + self.enable_cinn = False + + def init_inputs_and_outputs(self): + self.inputs = {'X': np.random.random([]).astype("float64")} + self.attrs = {'dim': []} + self.outputs = { + 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) + } + + def test_check_output(self): + self.check_output(check_pir=True) + + def test_check_grad(self): + # only composite op support gradient check of reduce_max + self.check_grad( + ['X'], + 'Out', + check_prim=True, + only_check_prim=True, + check_pir=True, + ) + + +class TestMaxOp_ZeroDim1(TestMaxOp_ZeroDim): + def init_inputs_and_outputs(self): + self.inputs = {'X': np.random.random([5]).astype("float64")} + self.attrs = {'dim': [0]} + self.outputs = {'Out': self.inputs['X'].max(axis=(0,))} + + +class TestMaxOp_ZeroDim2(TestMaxOp_ZeroDim1): + def init_inputs_and_outputs(self): + self.inputs = {'X': np.random.random([5, 20]).astype("float64")} + self.attrs = {'dim': [0, 1]} + self.outputs = {'Out': self.inputs['X'].max(axis=(0, 1))} + -paddle.set_flags({"FLAGS_pir_apply_inplace_pass": False}) +class TestMaxFP32Op(OpTest): + """Remove Max with subgradient from gradient check to confirm the success of CI.""" + def setUp(self): + self.op_type = "reduce_max" + self.prim_op_type = "prim" + self.python_api = paddle.max + self.public_python_api = paddle.max + self.init_dtype() + self.if_enable_cinn() + if self.dtype == np.uint16: + x = np.random.random((5, 6, 10)).astype(np.float32) + self.inputs = {'X': convert_float_to_uint16(x)} + else: + x = np.random.random((5, 6, 10)).astype(self.dtype) + self.inputs = {'X': x} + self.attrs = {'dim': [-1], 'keep_dim': True} + out = x.max(axis=tuple(self.attrs['dim']), keepdims=True) + if self.dtype == np.uint16: + self.outputs = {'Out': convert_float_to_uint16(out)} + else: + self.outputs = {'Out': out} -# class TestSumOp(OpTest): -# def setUp(self): -# self.init_dtype() -# self.init_input() -# self.init_attrs() -# self.calc_output() + def if_enable_cinn(self): + pass -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.op_type = "reduce_sum" -# self.prim_op_type = "prim" -# self.inputs = {'X': self.x} -# self.outputs = {'Out': self.out} -# self.if_enable_cinn() + def test_check_output(self): + self.check_output(check_pir=True) -# def init_dtype(self): -# self.dtype = np.float64 + def test_check_grad(self): + # only composite op support gradient check of reduce_max + self.check_grad( + ['X'], + 'Out', + check_prim=True, + only_check_prim=True, + check_pir=True, + ) -# def init_input(self): -# self.x = np.random.random((5, 6, 10)).astype(self.dtype) + def init_dtype(self): + self.dtype = np.float32 -# def init_attrs(self): -# self.attrs = {'dim': [0]} -# def if_enable_cinn(self): -# pass +class TestMaxFP16Op(TestMaxFP32Op): + def init_dtype(self): + self.dtype = np.float16 -# def calc_output(self): -# self.out = self.x.sum(axis=tuple(self.attrs['dim'])) -# def test_check_output(self): -# self.check_output(check_pir=True) +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.is_compiled_with_rocm() + or not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not compiled with CUDA or not support the bfloat16", +) +class TestMaxBF16Op(TestMaxFP32Op): + def init_dtype(self): + self.dtype = np.uint16 -# def test_check_grad(self): -# self.check_grad( -# ['X'], -# 'Out', -# check_prim=True, -# check_pir=True, -# check_prim_pir=True, -# ) + def if_enable_cinn(self): + self.enable_cinn = False + def test_check_output(self): + self.check_output_with_place(core.CUDAPlace(0), check_pir=True) -# class TestComplexSumOP(TestSumOp): -# def init_dtype(self): -# self.dtype = np.complex128 + def test_check_grad(self): + # only composite op support gradient check of reduce_max + self.check_grad_with_place( + core.CUDAPlace(0), + ['X'], + 'Out', + check_prim=True, + only_check_prim=True, + check_pir=True, + ) -# def init_input(self): -# self.x = np.random.random((3, 4)).astype(self.dtype) -# def init_attrs(self): -# self.attrs = {'dim': [0]} +@skip_check_grad_ci( + reason="reduce_min is discontinuous non-derivable function," + " its gradient check is not supported by unittest framework." +) +class TestMinOp(OpTest): + """Remove Min with subgradient from gradient check to confirm the success of CI.""" -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=False) + def setUp(self): + self.op_type = "reduce_min" + self.python_api = paddle.min + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [2]} + self.outputs = { + 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) + } + def test_check_output(self): + self.check_output(check_pir=True) -# class TestSumOp_ZeroDim(TestSumOp): -# def init_attrs(self): -# self.attrs = {'dim': []} -# def init_input(self): -# self.x = np.random.random([]).astype(self.dtype) +class TestMinOp_ZeroDim(OpTest): + """Remove Min with subgradient from gradient check to confirm the success of CI.""" -# def calc_output(self): -# self.out = self.x.sum(axis=None) + def setUp(self): + self.op_type = "reduce_min" + self.python_api = paddle.min + self.inputs = {'X': np.random.random([]).astype("float64")} + self.attrs = {'dim': []} + self.outputs = { + 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) + } -# def test_check_grad(self): -# self.check_grad( -# ['X'], -# 'Out', -# check_pir=True, -# check_prim=True, -# check_prim_pir=True, -# ) + def test_check_output(self): + self.check_output(check_pir=True) -# class TestSumOp5D(TestSumOp): -# def init_input(self): -# self.x = np.random.random((1, 2, 5, 6, 10)).astype(self.dtype) +class TestMin6DOp(OpTest): + """Remove Min with subgradient from gradient check to confirm the success of CI.""" -# def init_attrs(self): -# self.attrs = {'dim': [0]} + def setUp(self): + self.op_type = "reduce_min" + self.python_api = paddle.min + self.inputs = { + 'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64") + } + self.attrs = {'dim': [2, 4]} + self.outputs = { + 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) + } + def test_check_output(self): + self.check_output(check_pir=True) -# class TestSumOp6D(TestSumOp): -# def init_input(self): -# self.x = np.random.random((1, 1, 2, 5, 6, 10)).astype(self.dtype) -# def init_attrs(self): -# self.attrs = {'dim': [0]} +class TestMin8DOp(OpTest): + """Remove Min with subgradient from gradient check to confirm the success of CI.""" + def setUp(self): + self.op_type = "reduce_min" + self.python_api = paddle.min + self.inputs = { + 'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64") + } + self.attrs = {'dim': [2, 3, 4]} + self.outputs = { + 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) + } -# class TestSumOp8D(TestSumOp): -# def init_input(self): -# self.x = np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype(self.dtype) + def test_check_output(self): + self.check_output(check_pir=True) -# def init_attrs(self): -# self.attrs = {'dim': (0, 3)} -# def test_check_output(self): -# self.check_output(check_pir=True) +@skip_check_grad_ci( + reason="reduce_min is discontinuous non-derivable function," + " its gradient check is not supported by unittest framework." +) +@unittest.skipIf( + paddle.is_compiled_with_rocm(), "ROCm doesn't have FP16 reduce_min kernel" +) +class TestMinFP16Op(OpTest): + """Remove Min with subgradient from gradient check to confirm the success of CI.""" -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_pir=True) + def setUp(self): + self.op_type = "reduce_min" + self.python_api = paddle.min + self.public_python_api = paddle.min + self.init_dtype() + if self.dtype == np.uint16: + x = np.random.random((5, 6, 10)).astype(np.float32) + self.inputs = {'X': convert_float_to_uint16(x)} + else: + x = np.random.random((5, 6, 10)).astype(self.dtype) + self.inputs = {'X': x} + self.attrs = {'dim': [2], 'keep_dim': True} + out = x.min(axis=tuple(self.attrs['dim']), keepdims=True) + if self.dtype == np.uint16: + self.outputs = {'Out': convert_float_to_uint16(out)} + else: + self.outputs = {'Out': out} + + def init_dtype(self): + self.dtype = np.float16 + def test_check_output(self): + self.check_output(check_pir=True) -# class TestSumOp_withInt(TestSumOp): -# def init_input(self): -# # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format -# # Precision limitations on integer values between 0 and 2048 can be exactly represented -# self.x = np.random.randint(0, 30, (10, 10)).astype(self.dtype) -# def init_attrs(self): -# self.attrs = {'dim': (0, 1)} +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.is_compiled_with_rocm() + or not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not compiled with CUDA or not support the bfloat16", +) +class TestMinBF16Op(TestMinFP16Op): + def init_dtype(self): + self.dtype = np.uint16 -# def test_check_output(self): -# self.check_output(check_pir=True) - -# def calc_gradient(self): -# x = self.inputs["X"] -# grad = np.ones(x.shape, dtype=x.dtype) -# return (grad,) - -# def test_check_grad(self): -# self.check_grad( -# ['X'], -# 'Out', -# user_defined_grads=self.calc_gradient(), -# check_prim=True, -# check_prim_pir=True, -# check_pir=True, -# ) - - -# class TestSumOp3Dim(TestSumOp): -# def init_input(self): -# self.x = np.random.uniform(0, 0.1, (5, 6, 10)).astype(self.dtype) - -# def init_attrs(self): -# self.attrs = {'dim': (0, 1, 2)} - -# def test_check_output(self): -# self.check_output(check_pir=True) - -# def calc_gradient(self): -# x = self.inputs["X"] -# grad = np.ones(x.shape, dtype=x.dtype) -# return (grad,) - -# def test_check_grad(self): -# self.check_grad( -# ['X'], -# 'Out', -# user_defined_grads=self.calc_gradient(), -# check_prim=True, -# check_prim_pir=True, -# check_pir=True, -# ) - - -# def create_test_fp16_class(parent): -# @unittest.skipIf( -# not core.is_compiled_with_cuda(), "core is not compiled with CUDA" -# ) -# class TestSumOpFp16(parent): -# def init_dtype(self): -# self.dtype = np.float16 - -# def test_check_output(self): -# self.check_output(check_pir=True) - -# def test_check_grad(self): -# self.check_grad( -# ['X'], -# 'Out', -# check_prim=True, -# check_prim_pir=True, -# check_pir=True, -# ) - - -# create_test_fp16_class(TestSumOp) -# create_test_fp16_class(TestSumOp_ZeroDim) -# create_test_fp16_class(TestSumOp5D) -# create_test_fp16_class(TestSumOp6D) -# create_test_fp16_class(TestSumOp8D) -# create_test_fp16_class(TestSumOp_withInt) -# create_test_fp16_class(TestSumOp3Dim) - - -# def create_test_bf16_class(parent): -# @unittest.skipIf( -# not core.is_compiled_with_cuda() or paddle.is_compiled_with_rocm(), -# "core is not compiled with CUDA", -# ) -# class TestSumOpBf16(parent): -# def setUp(self): -# self.inputs = {'X': convert_float_to_uint16(self.x)} -# self.outputs = {'Out': convert_float_to_uint16(self.out)} -# self.enable_cinn = False - -# def init_dtype(self): -# self.dtype = np.uint16 - -# def test_check_output(self): -# place = core.CUDAPlace(0) -# self.check_output_with_place(place, check_pir=True) - -# def test_check_grad(self): -# place = core.CUDAPlace(0) -# self.check_grad_with_place( -# place, -# ['X'], -# 'Out', -# user_defined_grads=self.gradient, -# check_prim=True, -# check_prim_pir=True, -# check_pir=True, -# ) - -# def calc_gradient(self): -# x = self.x -# grad = np.ones(x.shape, dtype=x.dtype) -# return [grad] - - -# create_test_bf16_class(TestSumOp) -# create_test_bf16_class(TestSumOp_ZeroDim) -# create_test_bf16_class(TestSumOp5D) -# create_test_bf16_class(TestSumOp6D) -# create_test_bf16_class(TestSumOp8D) -# create_test_bf16_class(TestSumOp_withInt) -# create_test_bf16_class(TestSumOp3Dim) - - -# @skip_check_grad_ci( -# reason="reduce_max is discontinuous non-derivable function," -# " its gradient check is not supported by unittest framework." -# ) -# class TestMaxOp(OpTest): -# """Remove Max with subgradient from gradient check to confirm the success of CI.""" - -# def setUp(self): -# self.op_type = "reduce_max" -# self.prim_op_type = "prim" -# self.python_api = paddle.max -# self.public_python_api = paddle.max -# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} -# self.attrs = {'dim': [-1]} -# self.outputs = { -# 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True) - -# def test_check_grad(self): -# # only composite op support gradient check of reduce_max -# self.check_grad( -# ['X'], -# 'Out', -# check_prim=True, -# only_check_prim=True, -# check_pir=True, -# ) - - -# class TestMaxOp_ZeroDim(OpTest): -# """Remove Max with subgradient from gradient check to confirm the success of CI.""" - -# def setUp(self): -# self.op_type = "reduce_max" -# self.prim_op_type = "prim" -# self.python_api = paddle.max -# self.public_python_api = paddle.max -# self.if_enable_cinn() -# self.init_inputs_and_outputs() - -# def if_enable_cinn(self): -# self.enable_cinn = False - -# def init_inputs_and_outputs(self): -# self.inputs = {'X': np.random.random([]).astype("float64")} -# self.attrs = {'dim': []} -# self.outputs = { -# 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True) - -# def test_check_grad(self): -# # only composite op support gradient check of reduce_max -# self.check_grad( -# ['X'], -# 'Out', -# check_prim=True, -# only_check_prim=True, -# check_pir=True, -# ) - - -# class TestMaxOp_ZeroDim1(TestMaxOp_ZeroDim): -# def init_inputs_and_outputs(self): -# self.inputs = {'X': np.random.random([5]).astype("float64")} -# self.attrs = {'dim': [0]} -# self.outputs = {'Out': self.inputs['X'].max(axis=(0,))} - - -# class TestMaxOp_ZeroDim2(TestMaxOp_ZeroDim1): -# def init_inputs_and_outputs(self): -# self.inputs = {'X': np.random.random([5, 20]).astype("float64")} -# self.attrs = {'dim': [0, 1]} -# self.outputs = {'Out': self.inputs['X'].max(axis=(0, 1))} - - -# class TestMaxFP32Op(OpTest): -# """Remove Max with subgradient from gradient check to confirm the success of CI.""" - -# def setUp(self): -# self.op_type = "reduce_max" -# self.prim_op_type = "prim" -# self.python_api = paddle.max -# self.public_python_api = paddle.max -# self.init_dtype() -# self.if_enable_cinn() -# if self.dtype == np.uint16: -# x = np.random.random((5, 6, 10)).astype(np.float32) -# self.inputs = {'X': convert_float_to_uint16(x)} -# else: -# x = np.random.random((5, 6, 10)).astype(self.dtype) -# self.inputs = {'X': x} -# self.attrs = {'dim': [-1], 'keep_dim': True} -# out = x.max(axis=tuple(self.attrs['dim']), keepdims=True) -# if self.dtype == np.uint16: -# self.outputs = {'Out': convert_float_to_uint16(out)} -# else: -# self.outputs = {'Out': out} - -# def if_enable_cinn(self): -# pass - -# def test_check_output(self): -# self.check_output(check_pir=True) - -# def test_check_grad(self): -# # only composite op support gradient check of reduce_max -# self.check_grad( -# ['X'], -# 'Out', -# check_prim=True, -# only_check_prim=True, -# check_pir=True, -# ) - -# def init_dtype(self): -# self.dtype = np.float32 - - -# class TestMaxFP16Op(TestMaxFP32Op): -# def init_dtype(self): -# self.dtype = np.float16 - - -# @unittest.skipIf( -# not core.is_compiled_with_cuda() -# or paddle.is_compiled_with_rocm() -# or not core.is_bfloat16_supported(core.CUDAPlace(0)), -# "core is not compiled with CUDA or not support the bfloat16", -# ) -# class TestMaxBF16Op(TestMaxFP32Op): -# def init_dtype(self): -# self.dtype = np.uint16 - -# def if_enable_cinn(self): -# self.enable_cinn = False - -# def test_check_output(self): -# self.check_output_with_place(core.CUDAPlace(0), check_pir=True) - -# def test_check_grad(self): -# # only composite op support gradient check of reduce_max -# self.check_grad_with_place( -# core.CUDAPlace(0), -# ['X'], -# 'Out', -# check_prim=True, -# only_check_prim=True, -# check_pir=True, -# ) - - -# @skip_check_grad_ci( -# reason="reduce_min is discontinuous non-derivable function," -# " its gradient check is not supported by unittest framework." -# ) -# class TestMinOp(OpTest): -# """Remove Min with subgradient from gradient check to confirm the success of CI.""" - -# def setUp(self): -# self.op_type = "reduce_min" -# self.python_api = paddle.min -# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} -# self.attrs = {'dim': [2]} -# self.outputs = { -# 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestMinOp_ZeroDim(OpTest): -# """Remove Min with subgradient from gradient check to confirm the success of CI.""" - -# def setUp(self): -# self.op_type = "reduce_min" -# self.python_api = paddle.min -# self.inputs = {'X': np.random.random([]).astype("float64")} -# self.attrs = {'dim': []} -# self.outputs = { -# 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestMin6DOp(OpTest): -# """Remove Min with subgradient from gradient check to confirm the success of CI.""" - -# def setUp(self): -# self.op_type = "reduce_min" -# self.python_api = paddle.min -# self.inputs = { -# 'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64") -# } -# self.attrs = {'dim': [2, 4]} -# self.outputs = { -# 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestMin8DOp(OpTest): -# """Remove Min with subgradient from gradient check to confirm the success of CI.""" - -# def setUp(self): -# self.op_type = "reduce_min" -# self.python_api = paddle.min -# self.inputs = { -# 'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64") -# } -# self.attrs = {'dim': [2, 3, 4]} -# self.outputs = { -# 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# @skip_check_grad_ci( -# reason="reduce_min is discontinuous non-derivable function," -# " its gradient check is not supported by unittest framework." -# ) -# @unittest.skipIf( -# paddle.is_compiled_with_rocm(), "ROCm doesn't have FP16 reduce_min kernel" -# ) -# class TestMinFP16Op(OpTest): -# """Remove Min with subgradient from gradient check to confirm the success of CI.""" - -# def setUp(self): -# self.op_type = "reduce_min" -# self.python_api = paddle.min -# self.public_python_api = paddle.min -# self.init_dtype() -# if self.dtype == np.uint16: -# x = np.random.random((5, 6, 10)).astype(np.float32) -# self.inputs = {'X': convert_float_to_uint16(x)} -# else: -# x = np.random.random((5, 6, 10)).astype(self.dtype) -# self.inputs = {'X': x} -# self.attrs = {'dim': [2], 'keep_dim': True} -# out = x.min(axis=tuple(self.attrs['dim']), keepdims=True) -# if self.dtype == np.uint16: -# self.outputs = {'Out': convert_float_to_uint16(out)} -# else: -# self.outputs = {'Out': out} - -# def init_dtype(self): -# self.dtype = np.float16 - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# @unittest.skipIf( -# not core.is_compiled_with_cuda() -# or paddle.is_compiled_with_rocm() -# or not core.is_bfloat16_supported(core.CUDAPlace(0)), -# "core is not compiled with CUDA or not support the bfloat16", -# ) -# class TestMinBF16Op(TestMinFP16Op): -# def init_dtype(self): -# self.dtype = np.uint16 - -# def test_check_output(self): -# self.check_output_with_place(core.CUDAPlace(0), check_pir=True) + def test_check_output(self): + self.check_output_with_place(core.CUDAPlace(0), check_pir=True) def raw_reduce_prod(x, dim=[0], keep_dim=False): @@ -555,8 +554,7 @@ def setUp(self): self.if_enable_cinn() def init_inputs_and_outputs(self): - x = np.random.random((5, 6, 10)).astype(self.data_type) - self.inputs = {'X': x} + self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)} self.outputs = {'Out': self.inputs['X'].prod(axis=0)} def init_data_type(self): @@ -834,1140 +832,1140 @@ def test_check_grad(self): ) -# def reduce_all_wrapper(x, axis=None, keepdim=False, reduce_all=True, name=None): -# return paddle.all(x, axis, keepdim, name) - - -# class TestAllOp(OpTest): -# def setUp(self): -# self.op_type = "reduce_all" -# self.python_api = reduce_all_wrapper -# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} -# self.outputs = {'Out': self.inputs['X'].all()} -# self.attrs = {'reduce_all': True} +def reduce_all_wrapper(x, axis=None, keepdim=False, reduce_all=True, name=None): + return paddle.all(x, axis, keepdim, name) + + +class TestAllOp(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.python_api = reduce_all_wrapper + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.outputs = {'Out': self.inputs['X'].all()} + self.attrs = {'reduce_all': True} + + def test_check_output(self): + self.check_output(check_pir=True) + + +class TestAllFloatOp(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.python_api = reduce_all_wrapper + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("float")} + self.outputs = {'Out': self.inputs['X'].all()} + self.attrs = {'reduce_all': True} + + def test_check_output(self): + self.check_output(check_pir=True) + + +class TestAllIntOp(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.python_api = reduce_all_wrapper + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("int")} + self.outputs = {'Out': self.inputs['X'].all()} + self.attrs = {'reduce_all': True} + + def test_check_output(self): + self.check_output(check_pir=True) + + +class TestAllOp_ZeroDim(OpTest): + def setUp(self): + self.python_api = paddle.all + self.op_type = "reduce_all" + self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")} + self.outputs = {'Out': self.inputs['X'].all()} + self.attrs = {'dim': []} + + def test_check_output(self): + self.check_output(check_pir=True) + + +class TestAll8DOp(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.python_api = paddle.all + self.inputs = { + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) + } + self.attrs = {'dim': (2, 3, 4)} + self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} + + def test_check_output(self): + self.check_output(check_pir=True) + -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestAllFloatOp(OpTest): -# def setUp(self): -# self.op_type = "reduce_all" -# self.python_api = reduce_all_wrapper -# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("float")} -# self.outputs = {'Out': self.inputs['X'].all()} -# self.attrs = {'reduce_all': True} - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestAllIntOp(OpTest): -# def setUp(self): -# self.op_type = "reduce_all" -# self.python_api = reduce_all_wrapper -# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("int")} -# self.outputs = {'Out': self.inputs['X'].all()} -# self.attrs = {'reduce_all': True} - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestAllOp_ZeroDim(OpTest): -# def setUp(self): -# self.python_api = paddle.all -# self.op_type = "reduce_all" -# self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")} -# self.outputs = {'Out': self.inputs['X'].all()} -# self.attrs = {'dim': []} - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestAll8DOp(OpTest): -# def setUp(self): -# self.op_type = "reduce_all" -# self.python_api = paddle.all -# self.inputs = { -# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( -# "bool" -# ) -# } -# self.attrs = {'dim': (2, 3, 4)} -# self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestAllOpWithDim(OpTest): -# def setUp(self): -# self.op_type = "reduce_all" -# self.python_api = paddle.all -# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} -# self.attrs = {'dim': (1,)} -# self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestAll8DOpWithDim(OpTest): -# def setUp(self): -# self.op_type = "reduce_all" -# self.python_api = paddle.all -# self.inputs = { -# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( -# "bool" -# ) -# } -# self.attrs = {'dim': (1, 3, 4)} -# self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestAllOpWithKeepDim(OpTest): -# def setUp(self): -# self.op_type = "reduce_all" -# self.python_api = paddle.all -# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} -# self.attrs = {'dim': [1], 'keep_dim': True} -# self.outputs = { -# 'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestAll8DOpWithKeepDim(OpTest): -# def setUp(self): -# self.op_type = "reduce_all" -# self.python_api = paddle.all -# self.inputs = { -# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( -# "bool" -# ) -# } -# self.attrs = {'dim': (5,), 'keep_dim': True} -# self.outputs = { -# 'Out': np.expand_dims( -# self.inputs['X'].all(axis=self.attrs['dim']), axis=5 -# ) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True) - - -# class TestAllOpError(unittest.TestCase): -# @test_with_pir_api -# def test_errors(self): -# with paddle.static.program_guard( -# paddle.static.Program(), paddle.static.Program() -# ): -# # The input type of reduce_all_op must be Variable. -# input1 = 12 -# self.assertRaises(TypeError, paddle.all, input1) - - -# def reduce_any_wrapper(x, axis=None, keepdim=False, reduce_all=True, name=None): -# return paddle.any(x, axis, keepdim, name) - - -# class TestAnyOp(OpTest): -# def setUp(self): -# self.op_type = "reduce_any" -# self.prim_op_type = "comp" -# self.python_api = reduce_any_wrapper -# self.public_python_api = reduce_any_wrapper -# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} -# self.outputs = {'Out': self.inputs['X'].any()} -# self.attrs = {'reduce_all': True} - -# def test_check_output(self): -# self.check_output(check_pir=True, check_prim_pir=True) - - -# class TestAnyFloatOp(OpTest): -# def setUp(self): -# self.op_type = "reduce_any" -# self.prim_op_type = "comp" -# self.python_api = reduce_any_wrapper -# self.public_python_api = reduce_any_wrapper -# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("float")} -# self.outputs = {'Out': self.inputs['X'].any()} -# self.attrs = {'reduce_all': True} - -# def test_check_output(self): -# self.check_output(check_pir=True, check_prim_pir=True) - - -# class TestAnyIntOp(OpTest): -# def setUp(self): -# self.op_type = "reduce_any" -# self.prim_op_type = "comp" -# self.python_api = reduce_any_wrapper -# self.public_python_api = reduce_any_wrapper -# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("int")} -# self.outputs = {'Out': self.inputs['X'].any()} -# self.attrs = {'reduce_all': True} - -# def test_check_output(self): -# self.check_output(check_pir=True, check_prim_pir=True) - - -# class TestAnyOp_ZeroDim(OpTest): -# def setUp(self): -# self.op_type = "reduce_any" -# self.prim_op_type = "comp" -# self.python_api = paddle.any -# self.public_python_api = paddle.any -# self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")} -# self.outputs = {'Out': self.inputs['X'].any()} -# self.attrs = {'dim': []} - -# def test_check_output(self): -# self.check_output(check_pir=True, check_prim_pir=True) - - -# class TestAny8DOp(OpTest): -# def setUp(self): -# self.op_type = "reduce_any" -# self.prim_op_type = "comp" -# self.python_api = paddle.any -# self.public_python_api = paddle.any -# self.inputs = { -# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( -# "bool" -# ) -# } -# self.attrs = {'dim': (3, 5, 4)} -# self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} - -# def test_check_output(self): -# self.check_output(check_pir=True, check_prim_pir=True) - - -# class TestAnyOpWithDim(OpTest): -# def setUp(self): -# self.op_type = "reduce_any" -# self.prim_op_type = "comp" -# self.python_api = paddle.any -# self.public_python_api = paddle.any -# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} -# self.attrs = {'dim': [1]} -# self.outputs = {'Out': self.inputs['X'].any(axis=1)} - -# def test_check_output(self): -# self.check_output(check_pir=True, check_prim_pir=True) - - -# class TestAny8DOpWithDim(OpTest): -# def setUp(self): -# self.op_type = "reduce_any" -# self.prim_op_type = "comp" -# self.python_api = paddle.any -# self.public_python_api = paddle.any -# self.inputs = { -# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( -# "bool" -# ) -# } -# self.attrs = {'dim': (3, 6)} -# self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} - -# def test_check_output(self): -# self.check_output(check_pir=True, check_prim_pir=True) - - -# class TestAnyOpWithKeepDim(OpTest): -# def setUp(self): -# self.op_type = "reduce_any" -# self.prim_op_type = "comp" -# self.python_api = paddle.any -# self.public_python_api = paddle.any -# self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} -# self.attrs = {'dim': (1,), 'keep_dim': True} -# self.outputs = { -# 'Out': np.expand_dims( -# self.inputs['X'].any(axis=self.attrs['dim']), axis=1 -# ) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True, check_prim_pir=True) - - -# class TestAny8DOpWithKeepDim(OpTest): -# def setUp(self): -# self.op_type = "reduce_any" -# self.prim_op_type = "comp" -# self.python_api = paddle.any -# self.public_python_api = paddle.any -# self.inputs = { -# 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( -# "bool" -# ) -# } -# self.attrs = {'dim': (1,), 'keep_dim': True} -# self.outputs = { -# 'Out': np.expand_dims( -# self.inputs['X'].any(axis=self.attrs['dim']), axis=1 -# ) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True, check_prim_pir=True) - - -# class TestAnyOpError(unittest.TestCase): -# @test_with_pir_api -# def test_errors(self): -# with paddle.static.program_guard( -# paddle.static.Program(), paddle.static.Program() -# ): -# # The input type of reduce_any_op must be Variable. -# input1 = 12 -# self.assertRaises(TypeError, paddle.any, input1) - - -# class Test1DReduce(OpTest): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random(120).astype("float64")} -# self.outputs = {'Out': self.inputs['X'].sum(axis=0)} -# self.if_enable_cinn() - -# def if_enable_cinn(self): -# pass - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=True) - - -# class TestReduceSum_ZeroDim(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random(()).astype("float64")} -# self.outputs = {'Out': self.inputs['X'].sum(axis=0)} -# self.if_enable_cinn() - - -# class Test2DReduce0(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.attrs = {'dim': [0]} -# self.inputs = {'X': np.random.random((20, 10)).astype("float64")} -# self.outputs = {'Out': self.inputs['X'].sum(axis=0)} -# self.if_enable_cinn() - - -# class Test2DReduce1(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.attrs = {'dim': [1]} -# self.inputs = {'X': np.random.random((20, 10)).astype("float64")} -# self.outputs = { -# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) -# } -# self.if_enable_cinn() - - -# class Test3DReduce0(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.attrs = {'dim': [1]} -# self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} -# self.outputs = { -# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) -# } -# self.if_enable_cinn() - - -# class Test3DReduce1(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.attrs = {'dim': [2]} -# self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} -# self.outputs = { -# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) -# } -# self.if_enable_cinn() - - -# class Test3DReduce2(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.attrs = {'dim': [-2]} -# self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} -# self.outputs = { -# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) -# } -# self.if_enable_cinn() - - -# class Test3DReduce3(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.attrs = {'dim': [1, 2]} -# self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} -# self.outputs = { -# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) -# } -# self.if_enable_cinn() - - -# def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False): -# if paddle.in_dynamic_mode(): -# return paddle._C_ops.sum(x, axis, dtype, keepdim) -# else: -# if in_pir_mode(): -# return paddle._pir_ops.sum(x, axis, dtype, keepdim) - - -# class Test8DReduce0(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = reduce_sum_wrapper2 -# self.attrs = {'dim': (4, 2, 3)} -# self.inputs = { -# 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64") -# } -# self.outputs = { -# 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) -# } - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out') - - -# class TestKeepDimReduce(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} -# self.attrs = {'dim': [1], 'keep_dim': True} -# self.outputs = { -# 'Out': self.inputs['X'].sum( -# axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] -# ) -# } -# self.if_enable_cinn() - - -# class TestKeepDimReduceForEager(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = reduce_sum_wrapper2 -# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} -# self.attrs = {'dim': [1], 'keep_dim': True} -# self.outputs = { -# 'Out': self.inputs['X'].sum( -# axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] -# ) -# } - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out') - - -# class TestKeepDim8DReduce(Test1DReduce): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = reduce_sum_wrapper2 -# self.inputs = { -# 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64") -# } -# self.attrs = {'dim': (3, 4, 5), 'keep_dim': True} -# self.outputs = { -# 'Out': self.inputs['X'].sum( -# axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] -# ) -# } - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out') - - -# @skip_check_grad_ci( -# reason="reduce_max is discontinuous non-derivable function," -# " its gradient check is not supported by unittest framework." -# ) -# class TestReduceMaxOpMultiAxises(OpTest): -# """Remove Max with subgradient from gradient check to confirm the success of CI.""" - -# def setUp(self): -# self.op_type = "reduce_max" -# self.prim_op_type = "prim" -# self.python_api = paddle.max -# self.public_python_api = paddle.max -# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} -# self.attrs = {'dim': [-2, -1]} -# self.outputs = { -# 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) -# } - -# def test_check_output(self): -# self.check_output(check_pir=True) - -# def test_check_grad(self): -# # only composite op support gradient check of reduce_max -# self.check_grad( -# ['X'], -# 'Out', -# check_prim=True, -# only_check_prim=True, -# check_pir=True, -# ) - - -# @skip_check_grad_ci( -# reason="reduce_min is discontinuous non-derivable function," -# " its gradient check is not supported by unittest framework." -# ) -# class TestReduceMinOpMultiAxises(OpTest): -# """Remove Min with subgradient from gradient check to confirm the success of CI.""" - -# def setUp(self): -# self.op_type = "reduce_min" -# self.python_api = paddle.min -# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} -# self.attrs = {'dim': [1, 2]} -# self.outputs = { -# 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) -# } - -# def test_check_output(self): -# self.check_output() - - -# class TestKeepDimReduceSumMultiAxises(OpTest): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} -# self.attrs = {'dim': [-2, -1], 'keep_dim': True} -# self.outputs = { -# 'Out': self.inputs['X'].sum( -# axis=tuple(self.attrs['dim']), keepdims=True -# ) -# } -# self.if_enable_cinn() - -# def if_enable_cinn(self): -# pass - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=True) - - -# class TestKeepDimReduceSumMultiAxisesForEager(OpTest): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = reduce_sum_wrapper2 -# self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} -# self.attrs = {'dim': [-2, -1], 'keep_dim': True} -# self.outputs = { -# 'Out': self.inputs['X'].sum( -# axis=tuple(self.attrs['dim']), keepdims=True -# ) -# } - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out') - - -# class TestReduceSumWithDimOne(OpTest): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} -# self.attrs = {'dim': [1, 2], 'keep_dim': True} -# self.outputs = { -# 'Out': self.inputs['X'].sum( -# axis=tuple(self.attrs['dim']), keepdims=True -# ) -# } -# self.if_enable_cinn() - -# def if_enable_cinn(self): -# pass - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=True) - - -# class TestReduceSumWithDimOneForEager(OpTest): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = reduce_sum_wrapper2 -# self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} -# self.attrs = {'dim': [1, 2], 'keep_dim': True} -# self.outputs = { -# 'Out': self.inputs['X'].sum( -# axis=tuple(self.attrs['dim']), keepdims=True -# ) -# } -# self.enable_cinn = True - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out') - - -# class TestReduceSumWithNumelOne(OpTest): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random((100, 1)).astype("float64")} -# self.attrs = {'dim': [1], 'keep_dim': False} -# self.outputs = { -# 'Out': self.inputs['X'].sum( -# axis=tuple(self.attrs['dim']), keepdims=False -# ) -# } -# self.if_enable_cinn() - -# def if_enable_cinn(self): -# pass - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=False) - - -# def reduce_sum_wrapper( -# x, axis=None, keepdim=False, reduce_all=True, out_dtype=None, name=None -# ): -# return paddle.sum(x, axis, out_dtype, keepdim, name) - - -# class TestReduceAll(OpTest): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = reduce_sum_wrapper -# self.public_python_api = reduce_sum_wrapper -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} -# self.attrs = {'reduce_all': True, 'keep_dim': False} -# self.outputs = {'Out': self.inputs['X'].sum()} -# self.if_enable_cinn() - -# def if_enable_cinn(self): -# pass - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=True) - - -# class TestReduceAllFp32(OpTest): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = reduce_sum_wrapper -# self.public_python_api = reduce_sum_wrapper -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")} -# self.attrs = {'reduce_all': True, 'keep_dim': False} -# self.outputs = {'Out': self.inputs['X'].sum()} -# self.if_enable_cinn() - -# def if_enable_cinn(self): -# pass - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=True) - - -# class Test1DReduceWithAxes1(OpTest): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random(100).astype("float64")} -# self.attrs = {'dim': [0], 'keep_dim': False} -# self.outputs = {'Out': self.inputs['X'].sum(axis=0)} -# self.if_enable_cinn() - -# def if_enable_cinn(self): -# pass - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=True) - - -# def reduce_sum_wrapper_fp64( -# x, axis=None, keepdim=False, reduce_all=True, out_dtype=None, name=None -# ): -# return paddle.sum(x, axis, 'float64', keepdim, name) - - -# class TestReduceWithDtype(OpTest): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = reduce_sum_wrapper_fp64 -# self.public_python_api = reduce_sum_wrapper_fp64 -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} -# self.outputs = {'Out': self.inputs['X'].sum().astype('float64')} -# self.attrs = {'reduce_all': True} -# self.attrs.update( -# { -# 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), -# 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), -# } -# ) -# self.if_enable_cinn() - -# def if_enable_cinn(self): -# pass - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=True) - - -# class TestReduceWithDtype1(TestReduceWithDtype): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.prim_op_type = "prim" -# self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} -# self.outputs = {'Out': self.inputs['X'].sum(axis=1)} -# self.attrs = {'dim': [1]} -# self.attrs.update( -# { -# 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), -# 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), -# } -# ) -# # cinn op_mapper not support in_dtype/out_dtype attr -# self.enable_cinn = False - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=True) - - -# class TestReduceWithDtype2(TestReduceWithDtype): -# def setUp(self): -# self.op_type = "reduce_sum" -# self.prim_op_type = "prim" -# self.python_api = paddle.sum -# self.public_python_api = paddle.sum -# self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} -# self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)} -# self.attrs = {'dim': [1], 'keep_dim': True} -# self.attrs.update( -# { -# 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), -# 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), -# } -# ) -# # cinn op_mapper not support in_dtype/out_dtype attr -# self.enable_cinn = False - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad(self): -# self.check_grad(['X'], 'Out', check_prim=True) - - -# class TestReduceSumOpError(unittest.TestCase): -# def test_errors1(self): -# with static_guard(): -# with paddle.static.program_guard( -# paddle.static.Program(), paddle.static.Program() -# ): -# # The input type of reduce_sum_op must be Variable. -# x1 = base.create_lod_tensor( -# np.array([[-1]]), [[1]], base.CPUPlace() -# ) -# self.assertRaises(TypeError, paddle.sum, x1) -# # The input dtype of reduce_sum_op must be float32 or float64 or int32 or int64. - - -# class API_TestSumOp(unittest.TestCase): -# def run_static( -# self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None -# ): -# if np_axis is None: -# np_axis = attr_axis - -# places = [base.CPUPlace()] -# if core.is_compiled_with_cuda(): -# places.append(base.CUDAPlace(0)) -# for place in places: -# with base.program_guard(base.Program(), base.Program()): -# data = paddle.static.data("data", shape=shape, dtype=x_dtype) -# result_sum = paddle.sum( -# x=data, axis=attr_axis, dtype=attr_dtype -# ) - -# exe = base.Executor(place) -# input_data = np.random.rand(*shape).astype(x_dtype) -# (res,) = exe.run( -# feed={"data": input_data}, fetch_list=[result_sum] -# ) - -# np.testing.assert_allclose( -# res, -# np.sum(input_data.astype(attr_dtype), axis=np_axis), -# rtol=1e-05, -# ) - -# @test_with_pir_api -# def test_static(self): -# shape = [10, 10] -# axis = 1 - -# self.run_static(shape, "bool", axis, attr_dtype=None) -# self.run_static(shape, "bool", axis, attr_dtype="int32") -# self.run_static(shape, "bool", axis, attr_dtype="int64") -# self.run_static(shape, "bool", axis, attr_dtype="float16") - -# self.run_static(shape, "int32", axis, attr_dtype=None) -# self.run_static(shape, "int32", axis, attr_dtype="int32") -# self.run_static(shape, "int32", axis, attr_dtype="int64") -# self.run_static(shape, "int32", axis, attr_dtype="float64") - -# self.run_static(shape, "int64", axis, attr_dtype=None) -# self.run_static(shape, "int64", axis, attr_dtype="int64") -# self.run_static(shape, "int64", axis, attr_dtype="int32") - -# self.run_static(shape, "float32", axis, attr_dtype=None) -# self.run_static(shape, "float32", axis, attr_dtype="float32") -# self.run_static(shape, "float32", axis, attr_dtype="float64") -# self.run_static(shape, "float32", axis, attr_dtype="int64") - -# self.run_static(shape, "float64", axis, attr_dtype=None) -# self.run_static(shape, "float64", axis, attr_dtype="float32") -# self.run_static(shape, "float64", axis, attr_dtype="float64") - -# shape = [5, 5, 5] -# self.run_static(shape, "int32", (0, 1), attr_dtype="int32") -# self.run_static( -# shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2) -# ) - -# def test_dygraph(self): -# np_x = np.random.random([2, 3, 4]).astype('int32') -# with base.dygraph.guard(): -# x = paddle.to_tensor(np_x) -# out0 = paddle.sum(x).numpy() -# out1 = paddle.sum(x, axis=0).numpy() -# out2 = paddle.sum(x, axis=(0, 1)).numpy() -# out3 = paddle.sum(x, axis=(0, 1, 2)).numpy() - -# self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all()) -# self.assertTrue((out1 == np.sum(np_x, axis=0)).all()) -# self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all()) -# self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all()) - - -# class TestAllAPI(unittest.TestCase): -# def setUp(self): -# np.random.seed(123) -# paddle.enable_static() -# self.places = [base.CPUPlace()] -# if core.is_compiled_with_cuda(): -# self.places.append(base.CUDAPlace(0)) - -# def check_static_result(self, place): -# main = paddle.static.Program() -# startup = paddle.static.Program() -# with base.program_guard(main, startup): -# input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") -# result = paddle.all(x=input) -# input_np = np.random.randint(0, 2, [4, 4]).astype("bool") - -# exe = base.Executor(place) -# fetches = exe.run( -# main, -# feed={"input": input_np}, -# fetch_list=[result], -# ) -# self.assertTrue((fetches[0] == np.all(input_np)).all()) - -# def check_static_float_result(self, place): -# main = paddle.static.Program() -# startup = paddle.static.Program() -# with base.program_guard(main, startup): -# input = paddle.static.data( -# name="input", shape=[4, 4], dtype="float" -# ) -# result = paddle.all(x=input) -# input_np = np.random.randint(0, 2, [4, 4]).astype("float") - -# exe = base.Executor(place) -# fetches = exe.run( -# main, -# feed={"input": input_np}, -# fetch_list=[result], -# ) -# self.assertTrue((fetches[0] == np.all(input_np)).all()) - -# def check_static_int_result(self, place): -# main = paddle.static.Program() -# startup = paddle.static.Program() -# with base.program_guard(main, startup): -# input = paddle.static.data(name="input", shape=[4, 4], dtype="int") -# result = paddle.all(x=input) -# input_np = np.random.randint(0, 2, [4, 4]).astype("int") - -# exe = base.Executor(place) -# fetches = exe.run( -# main, -# feed={"input": input_np}, -# fetch_list=[result], -# ) -# self.assertTrue((fetches[0] == np.all(input_np)).all()) - -# @test_with_pir_api -# def test_static(self): -# for place in self.places: -# self.check_static_result(place=place) -# self.check_static_float_result(place=place) -# self.check_static_int_result(place=place) - -# def test_dygraph(self): -# paddle.disable_static() -# for place in self.places: -# with base.dygraph.guard(place): -# np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) -# x = paddle.assign(np_x) -# x = paddle.cast(x, 'bool') - -# out1 = paddle.all(x) -# np_out1 = out1.numpy() -# expect_res1 = np.all(np_x) -# self.assertTrue((np_out1 == expect_res1).all()) - -# out2 = paddle.all(x, axis=0) -# np_out2 = out2.numpy() -# expect_res2 = np.all(np_x, axis=0) -# self.assertTrue((np_out2 == expect_res2).all()) - -# out3 = paddle.all(x, axis=-1) -# np_out3 = out3.numpy() -# expect_res3 = np.all(np_x, axis=-1) -# self.assertTrue((np_out3 == expect_res3).all()) - -# out4 = paddle.all(x, axis=1, keepdim=True) -# np_out4 = out4.numpy() -# expect_res4 = np.all(np_x, axis=1, keepdims=True) -# self.assertTrue((np_out4 == expect_res4).all()) - -# x = paddle.cast(x, 'float') -# out5 = paddle.all(x) -# np_out5 = out5.numpy() -# expect_res5 = np.all(np_x) -# self.assertTrue((np_out5 == expect_res5).all()) - -# x = paddle.cast(x, 'int') -# out6 = paddle.all(x) -# np_out6 = out6.numpy() -# expect_res6 = np.all(np_x) -# self.assertTrue((np_out6 == expect_res6).all()) - -# paddle.enable_static() - - -# class TestAnyAPI(unittest.TestCase): -# def setUp(self): -# np.random.seed(123) -# paddle.enable_static() -# self.places = [base.CPUPlace()] -# if core.is_compiled_with_cuda(): -# self.places.append(base.CUDAPlace(0)) - -# def check_static_result(self, place): -# main = paddle.static.Program() -# startup = paddle.static.Program() -# with base.program_guard(main, startup): -# input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") -# result = paddle.any(x=input) -# input_np = np.random.randint(0, 2, [4, 4]).astype("bool") - -# exe = base.Executor(place) -# fetches = exe.run( -# main, -# feed={"input": input_np}, -# fetch_list=[result], -# ) -# self.assertTrue((fetches[0] == np.any(input_np)).all()) - -# def check_static_float_result(self, place): -# main = paddle.static.Program() -# startup = paddle.static.Program() -# with base.program_guard(main, startup): -# input = paddle.static.data( -# name="input", shape=[4, 4], dtype="float" -# ) -# result = paddle.any(x=input) -# input_np = np.random.randint(0, 2, [4, 4]).astype("float") - -# exe = base.Executor(place) -# fetches = exe.run( -# main, -# feed={"input": input_np}, -# fetch_list=[result], -# ) -# self.assertTrue((fetches[0] == np.any(input_np)).all()) - -# def check_static_int_result(self, place): -# main = paddle.static.Program() -# startup = paddle.static.Program() -# with base.program_guard(main, startup): -# input = paddle.static.data(name="input", shape=[4, 4], dtype="int") -# result = paddle.any(x=input) -# input_np = np.random.randint(0, 2, [4, 4]).astype("int") - -# exe = base.Executor(place) -# fetches = exe.run( -# main, -# feed={"input": input_np}, -# fetch_list=[result], -# ) -# self.assertTrue((fetches[0] == np.any(input_np)).all()) - -# @test_with_pir_api -# def test_static(self): -# for place in self.places: -# self.check_static_result(place=place) -# self.check_static_float_result(place=place) -# self.check_static_int_result(place=place) - -# def test_dygraph(self): -# paddle.disable_static() -# for place in self.places: -# with base.dygraph.guard(place): -# np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) -# x = paddle.assign(np_x) -# x = paddle.cast(x, 'bool') - -# out1 = paddle.any(x) -# np_out1 = out1.numpy() -# expect_res1 = np.any(np_x) -# self.assertTrue((np_out1 == expect_res1).all()) - -# out2 = paddle.any(x, axis=0) -# np_out2 = out2.numpy() -# expect_res2 = np.any(np_x, axis=0) -# self.assertTrue((np_out2 == expect_res2).all()) - -# out3 = paddle.any(x, axis=-1) -# np_out3 = out3.numpy() -# expect_res3 = np.any(np_x, axis=-1) -# self.assertTrue((np_out3 == expect_res3).all()) - -# out4 = paddle.any(x, axis=1, keepdim=True) -# np_out4 = out4.numpy() -# expect_res4 = np.any(np_x, axis=1, keepdims=True) -# self.assertTrue((np_out4 == expect_res4).all()) - -# np_x = np.random.randint(0, 2, (12, 10)).astype(np.float32) -# x = paddle.assign(np_x) -# x = paddle.cast(x, 'float32') - -# out5 = paddle.any(x) -# np_out5 = out5.numpy() -# expect_res5 = np.any(np_x) -# self.assertTrue((np_out5 == expect_res5).all()) - -# x = paddle.cast(x, 'int') -# out6 = paddle.any(x) -# np_out6 = out6.numpy() -# expect_res6 = np.any(np_x) -# self.assertTrue((np_out6 == expect_res6).all()) - -# paddle.enable_static() - - -# class TestAllZeroError(unittest.TestCase): -# def test_errors(self): -# with paddle.base.dygraph.guard(): - -# def test_0_size(): -# array = np.array([], dtype=np.float32) -# x = paddle.to_tensor(np.reshape(array, [0, 0, 0]), dtype='bool') -# paddle.all(x, axis=1) - -# self.assertRaises(ValueError, test_0_size) +class TestAllOpWithDim(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.python_api = paddle.all + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.attrs = {'dim': (1,)} + self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} + + def test_check_output(self): + self.check_output(check_pir=True) + + +class TestAll8DOpWithDim(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.python_api = paddle.all + self.inputs = { + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) + } + self.attrs = {'dim': (1, 3, 4)} + self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} + + def test_check_output(self): + self.check_output(check_pir=True) + + +class TestAllOpWithKeepDim(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.python_api = paddle.all + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.attrs = {'dim': [1], 'keep_dim': True} + self.outputs = { + 'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1) + } + + def test_check_output(self): + self.check_output(check_pir=True) + + +class TestAll8DOpWithKeepDim(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.python_api = paddle.all + self.inputs = { + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) + } + self.attrs = {'dim': (5,), 'keep_dim': True} + self.outputs = { + 'Out': np.expand_dims( + self.inputs['X'].all(axis=self.attrs['dim']), axis=5 + ) + } + + def test_check_output(self): + self.check_output(check_pir=True) + + +class TestAllOpError(unittest.TestCase): + @test_with_pir_api + def test_errors(self): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + # The input type of reduce_all_op must be Variable. + input1 = 12 + self.assertRaises(TypeError, paddle.all, input1) + + +def reduce_any_wrapper(x, axis=None, keepdim=False, reduce_all=True, name=None): + return paddle.any(x, axis, keepdim, name) + + +class TestAnyOp(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.prim_op_type = "comp" + self.python_api = reduce_any_wrapper + self.public_python_api = reduce_any_wrapper + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.outputs = {'Out': self.inputs['X'].any()} + self.attrs = {'reduce_all': True} + + def test_check_output(self): + self.check_output(check_pir=True, check_prim_pir=True) + + +class TestAnyFloatOp(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.prim_op_type = "comp" + self.python_api = reduce_any_wrapper + self.public_python_api = reduce_any_wrapper + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("float")} + self.outputs = {'Out': self.inputs['X'].any()} + self.attrs = {'reduce_all': True} + + def test_check_output(self): + self.check_output(check_pir=True, check_prim_pir=True) + + +class TestAnyIntOp(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.prim_op_type = "comp" + self.python_api = reduce_any_wrapper + self.public_python_api = reduce_any_wrapper + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("int")} + self.outputs = {'Out': self.inputs['X'].any()} + self.attrs = {'reduce_all': True} + + def test_check_output(self): + self.check_output(check_pir=True, check_prim_pir=True) + + +class TestAnyOp_ZeroDim(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.prim_op_type = "comp" + self.python_api = paddle.any + self.public_python_api = paddle.any + self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")} + self.outputs = {'Out': self.inputs['X'].any()} + self.attrs = {'dim': []} + + def test_check_output(self): + self.check_output(check_pir=True, check_prim_pir=True) + + +class TestAny8DOp(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.prim_op_type = "comp" + self.python_api = paddle.any + self.public_python_api = paddle.any + self.inputs = { + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) + } + self.attrs = {'dim': (3, 5, 4)} + self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} + + def test_check_output(self): + self.check_output(check_pir=True, check_prim_pir=True) + + +class TestAnyOpWithDim(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.prim_op_type = "comp" + self.python_api = paddle.any + self.public_python_api = paddle.any + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.attrs = {'dim': [1]} + self.outputs = {'Out': self.inputs['X'].any(axis=1)} + + def test_check_output(self): + self.check_output(check_pir=True, check_prim_pir=True) + + +class TestAny8DOpWithDim(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.prim_op_type = "comp" + self.python_api = paddle.any + self.public_python_api = paddle.any + self.inputs = { + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) + } + self.attrs = {'dim': (3, 6)} + self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} + + def test_check_output(self): + self.check_output(check_pir=True, check_prim_pir=True) + + +class TestAnyOpWithKeepDim(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.prim_op_type = "comp" + self.python_api = paddle.any + self.public_python_api = paddle.any + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.attrs = {'dim': (1,), 'keep_dim': True} + self.outputs = { + 'Out': np.expand_dims( + self.inputs['X'].any(axis=self.attrs['dim']), axis=1 + ) + } + + def test_check_output(self): + self.check_output(check_pir=True, check_prim_pir=True) + + +class TestAny8DOpWithKeepDim(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.prim_op_type = "comp" + self.python_api = paddle.any + self.public_python_api = paddle.any + self.inputs = { + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) + } + self.attrs = {'dim': (1,), 'keep_dim': True} + self.outputs = { + 'Out': np.expand_dims( + self.inputs['X'].any(axis=self.attrs['dim']), axis=1 + ) + } + + def test_check_output(self): + self.check_output(check_pir=True, check_prim_pir=True) + + +class TestAnyOpError(unittest.TestCase): + @test_with_pir_api + def test_errors(self): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + # The input type of reduce_any_op must be Variable. + input1 = 12 + self.assertRaises(TypeError, paddle.any, input1) + + +class Test1DReduce(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random(120).astype("float64")} + self.outputs = {'Out': self.inputs['X'].sum(axis=0)} + self.if_enable_cinn() + + def if_enable_cinn(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=True) + + +class TestReduceSum_ZeroDim(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random(()).astype("float64")} + self.outputs = {'Out': self.inputs['X'].sum(axis=0)} + self.if_enable_cinn() + + +class Test2DReduce0(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.attrs = {'dim': [0]} + self.inputs = {'X': np.random.random((20, 10)).astype("float64")} + self.outputs = {'Out': self.inputs['X'].sum(axis=0)} + self.if_enable_cinn() + + +class Test2DReduce1(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.attrs = {'dim': [1]} + self.inputs = {'X': np.random.random((20, 10)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + self.if_enable_cinn() + + +class Test3DReduce0(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.attrs = {'dim': [1]} + self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + self.if_enable_cinn() + + +class Test3DReduce1(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.attrs = {'dim': [2]} + self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + self.if_enable_cinn() + + +class Test3DReduce2(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.attrs = {'dim': [-2]} + self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + self.if_enable_cinn() + + +class Test3DReduce3(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.attrs = {'dim': [1, 2]} + self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + self.if_enable_cinn() + + +def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False): + if paddle.in_dynamic_mode(): + return paddle._C_ops.sum(x, axis, dtype, keepdim) + else: + if in_pir_mode(): + return paddle._pir_ops.sum(x, axis, dtype, keepdim) + + +class Test8DReduce0(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = reduce_sum_wrapper2 + self.attrs = {'dim': (4, 2, 3)} + self.inputs = { + 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64") + } + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestKeepDimReduce(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [1], 'keep_dim': True} + self.outputs = { + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] + ) + } + self.if_enable_cinn() + + +class TestKeepDimReduceForEager(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = reduce_sum_wrapper2 + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [1], 'keep_dim': True} + self.outputs = { + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] + ) + } + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestKeepDim8DReduce(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = reduce_sum_wrapper2 + self.inputs = { + 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64") + } + self.attrs = {'dim': (3, 4, 5), 'keep_dim': True} + self.outputs = { + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] + ) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +@skip_check_grad_ci( + reason="reduce_max is discontinuous non-derivable function," + " its gradient check is not supported by unittest framework." +) +class TestReduceMaxOpMultiAxises(OpTest): + """Remove Max with subgradient from gradient check to confirm the success of CI.""" + + def setUp(self): + self.op_type = "reduce_max" + self.prim_op_type = "prim" + self.python_api = paddle.max + self.public_python_api = paddle.max + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [-2, -1]} + self.outputs = { + 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) + } + + def test_check_output(self): + self.check_output(check_pir=True) + + def test_check_grad(self): + # only composite op support gradient check of reduce_max + self.check_grad( + ['X'], + 'Out', + check_prim=True, + only_check_prim=True, + check_pir=True, + ) + + +@skip_check_grad_ci( + reason="reduce_min is discontinuous non-derivable function," + " its gradient check is not supported by unittest framework." +) +class TestReduceMinOpMultiAxises(OpTest): + """Remove Min with subgradient from gradient check to confirm the success of CI.""" + + def setUp(self): + self.op_type = "reduce_min" + self.python_api = paddle.min + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [1, 2]} + self.outputs = { + 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) + } + + def test_check_output(self): + self.check_output() + + +class TestKeepDimReduceSumMultiAxises(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [-2, -1], 'keep_dim': True} + self.outputs = { + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=True + ) + } + self.if_enable_cinn() + + def if_enable_cinn(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=True) + + +class TestKeepDimReduceSumMultiAxisesForEager(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = reduce_sum_wrapper2 + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [-2, -1], 'keep_dim': True} + self.outputs = { + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=True + ) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestReduceSumWithDimOne(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} + self.attrs = {'dim': [1, 2], 'keep_dim': True} + self.outputs = { + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=True + ) + } + self.if_enable_cinn() + + def if_enable_cinn(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=True) + + +class TestReduceSumWithDimOneForEager(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = reduce_sum_wrapper2 + self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} + self.attrs = {'dim': [1, 2], 'keep_dim': True} + self.outputs = { + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=True + ) + } + self.enable_cinn = True + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestReduceSumWithNumelOne(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random((100, 1)).astype("float64")} + self.attrs = {'dim': [1], 'keep_dim': False} + self.outputs = { + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=False + ) + } + self.if_enable_cinn() + + def if_enable_cinn(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=False) + + +def reduce_sum_wrapper( + x, axis=None, keepdim=False, reduce_all=True, out_dtype=None, name=None +): + return paddle.sum(x, axis, out_dtype, keepdim, name) + + +class TestReduceAll(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = reduce_sum_wrapper + self.public_python_api = reduce_sum_wrapper + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} + self.attrs = {'reduce_all': True, 'keep_dim': False} + self.outputs = {'Out': self.inputs['X'].sum()} + self.if_enable_cinn() + + def if_enable_cinn(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=True) + + +class TestReduceAllFp32(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = reduce_sum_wrapper + self.public_python_api = reduce_sum_wrapper + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")} + self.attrs = {'reduce_all': True, 'keep_dim': False} + self.outputs = {'Out': self.inputs['X'].sum()} + self.if_enable_cinn() + + def if_enable_cinn(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=True) + + +class Test1DReduceWithAxes1(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random(100).astype("float64")} + self.attrs = {'dim': [0], 'keep_dim': False} + self.outputs = {'Out': self.inputs['X'].sum(axis=0)} + self.if_enable_cinn() + + def if_enable_cinn(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=True) + + +def reduce_sum_wrapper_fp64( + x, axis=None, keepdim=False, reduce_all=True, out_dtype=None, name=None +): + return paddle.sum(x, axis, 'float64', keepdim, name) + + +class TestReduceWithDtype(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = reduce_sum_wrapper_fp64 + self.public_python_api = reduce_sum_wrapper_fp64 + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} + self.outputs = {'Out': self.inputs['X'].sum().astype('float64')} + self.attrs = {'reduce_all': True} + self.attrs.update( + { + 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), + 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), + } + ) + self.if_enable_cinn() + + def if_enable_cinn(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=True) + + +class TestReduceWithDtype1(TestReduceWithDtype): + def setUp(self): + self.op_type = "reduce_sum" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.prim_op_type = "prim" + self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} + self.outputs = {'Out': self.inputs['X'].sum(axis=1)} + self.attrs = {'dim': [1]} + self.attrs.update( + { + 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), + 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), + } + ) + # cinn op_mapper not support in_dtype/out_dtype attr + self.enable_cinn = False + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=True) + + +class TestReduceWithDtype2(TestReduceWithDtype): + def setUp(self): + self.op_type = "reduce_sum" + self.prim_op_type = "prim" + self.python_api = paddle.sum + self.public_python_api = paddle.sum + self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} + self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)} + self.attrs = {'dim': [1], 'keep_dim': True} + self.attrs.update( + { + 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), + 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), + } + ) + # cinn op_mapper not support in_dtype/out_dtype attr + self.enable_cinn = False + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=True) + + +class TestReduceSumOpError(unittest.TestCase): + def test_errors1(self): + with static_guard(): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + # The input type of reduce_sum_op must be Variable. + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() + ) + self.assertRaises(TypeError, paddle.sum, x1) + # The input dtype of reduce_sum_op must be float32 or float64 or int32 or int64. + + +class API_TestSumOp(unittest.TestCase): + def run_static( + self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None + ): + if np_axis is None: + np_axis = attr_axis + + places = [base.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) + for place in places: + with base.program_guard(base.Program(), base.Program()): + data = paddle.static.data("data", shape=shape, dtype=x_dtype) + result_sum = paddle.sum( + x=data, axis=attr_axis, dtype=attr_dtype + ) + + exe = base.Executor(place) + input_data = np.random.rand(*shape).astype(x_dtype) + (res,) = exe.run( + feed={"data": input_data}, fetch_list=[result_sum] + ) + + np.testing.assert_allclose( + res, + np.sum(input_data.astype(attr_dtype), axis=np_axis), + rtol=1e-05, + ) + + @test_with_pir_api + def test_static(self): + shape = [10, 10] + axis = 1 + + self.run_static(shape, "bool", axis, attr_dtype=None) + self.run_static(shape, "bool", axis, attr_dtype="int32") + self.run_static(shape, "bool", axis, attr_dtype="int64") + self.run_static(shape, "bool", axis, attr_dtype="float16") + + self.run_static(shape, "int32", axis, attr_dtype=None) + self.run_static(shape, "int32", axis, attr_dtype="int32") + self.run_static(shape, "int32", axis, attr_dtype="int64") + self.run_static(shape, "int32", axis, attr_dtype="float64") + + self.run_static(shape, "int64", axis, attr_dtype=None) + self.run_static(shape, "int64", axis, attr_dtype="int64") + self.run_static(shape, "int64", axis, attr_dtype="int32") + + self.run_static(shape, "float32", axis, attr_dtype=None) + self.run_static(shape, "float32", axis, attr_dtype="float32") + self.run_static(shape, "float32", axis, attr_dtype="float64") + self.run_static(shape, "float32", axis, attr_dtype="int64") + + self.run_static(shape, "float64", axis, attr_dtype=None) + self.run_static(shape, "float64", axis, attr_dtype="float32") + self.run_static(shape, "float64", axis, attr_dtype="float64") + + shape = [5, 5, 5] + self.run_static(shape, "int32", (0, 1), attr_dtype="int32") + self.run_static( + shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2) + ) + + def test_dygraph(self): + np_x = np.random.random([2, 3, 4]).astype('int32') + with base.dygraph.guard(): + x = paddle.to_tensor(np_x) + out0 = paddle.sum(x).numpy() + out1 = paddle.sum(x, axis=0).numpy() + out2 = paddle.sum(x, axis=(0, 1)).numpy() + out3 = paddle.sum(x, axis=(0, 1, 2)).numpy() + + self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all()) + self.assertTrue((out1 == np.sum(np_x, axis=0)).all()) + self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all()) + self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all()) + + +class TestAllAPI(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.places = [base.CPUPlace()] + if core.is_compiled_with_cuda(): + self.places.append(base.CUDAPlace(0)) + + def check_static_result(self, place): + main = paddle.static.Program() + startup = paddle.static.Program() + with base.program_guard(main, startup): + input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") + result = paddle.all(x=input) + input_np = np.random.randint(0, 2, [4, 4]).astype("bool") + + exe = base.Executor(place) + fetches = exe.run( + main, + feed={"input": input_np}, + fetch_list=[result], + ) + self.assertTrue((fetches[0] == np.all(input_np)).all()) + + def check_static_float_result(self, place): + main = paddle.static.Program() + startup = paddle.static.Program() + with base.program_guard(main, startup): + input = paddle.static.data( + name="input", shape=[4, 4], dtype="float" + ) + result = paddle.all(x=input) + input_np = np.random.randint(0, 2, [4, 4]).astype("float") + + exe = base.Executor(place) + fetches = exe.run( + main, + feed={"input": input_np}, + fetch_list=[result], + ) + self.assertTrue((fetches[0] == np.all(input_np)).all()) + + def check_static_int_result(self, place): + main = paddle.static.Program() + startup = paddle.static.Program() + with base.program_guard(main, startup): + input = paddle.static.data(name="input", shape=[4, 4], dtype="int") + result = paddle.all(x=input) + input_np = np.random.randint(0, 2, [4, 4]).astype("int") + + exe = base.Executor(place) + fetches = exe.run( + main, + feed={"input": input_np}, + fetch_list=[result], + ) + self.assertTrue((fetches[0] == np.all(input_np)).all()) + + @test_with_pir_api + def test_static(self): + for place in self.places: + self.check_static_result(place=place) + self.check_static_float_result(place=place) + self.check_static_int_result(place=place) + + def test_dygraph(self): + paddle.disable_static() + for place in self.places: + with base.dygraph.guard(place): + np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) + x = paddle.assign(np_x) + x = paddle.cast(x, 'bool') + + out1 = paddle.all(x) + np_out1 = out1.numpy() + expect_res1 = np.all(np_x) + self.assertTrue((np_out1 == expect_res1).all()) + + out2 = paddle.all(x, axis=0) + np_out2 = out2.numpy() + expect_res2 = np.all(np_x, axis=0) + self.assertTrue((np_out2 == expect_res2).all()) + + out3 = paddle.all(x, axis=-1) + np_out3 = out3.numpy() + expect_res3 = np.all(np_x, axis=-1) + self.assertTrue((np_out3 == expect_res3).all()) + + out4 = paddle.all(x, axis=1, keepdim=True) + np_out4 = out4.numpy() + expect_res4 = np.all(np_x, axis=1, keepdims=True) + self.assertTrue((np_out4 == expect_res4).all()) + + x = paddle.cast(x, 'float') + out5 = paddle.all(x) + np_out5 = out5.numpy() + expect_res5 = np.all(np_x) + self.assertTrue((np_out5 == expect_res5).all()) + + x = paddle.cast(x, 'int') + out6 = paddle.all(x) + np_out6 = out6.numpy() + expect_res6 = np.all(np_x) + self.assertTrue((np_out6 == expect_res6).all()) + + paddle.enable_static() + + +class TestAnyAPI(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.places = [base.CPUPlace()] + if core.is_compiled_with_cuda(): + self.places.append(base.CUDAPlace(0)) + + def check_static_result(self, place): + main = paddle.static.Program() + startup = paddle.static.Program() + with base.program_guard(main, startup): + input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") + result = paddle.any(x=input) + input_np = np.random.randint(0, 2, [4, 4]).astype("bool") + + exe = base.Executor(place) + fetches = exe.run( + main, + feed={"input": input_np}, + fetch_list=[result], + ) + self.assertTrue((fetches[0] == np.any(input_np)).all()) + + def check_static_float_result(self, place): + main = paddle.static.Program() + startup = paddle.static.Program() + with base.program_guard(main, startup): + input = paddle.static.data( + name="input", shape=[4, 4], dtype="float" + ) + result = paddle.any(x=input) + input_np = np.random.randint(0, 2, [4, 4]).astype("float") + + exe = base.Executor(place) + fetches = exe.run( + main, + feed={"input": input_np}, + fetch_list=[result], + ) + self.assertTrue((fetches[0] == np.any(input_np)).all()) + + def check_static_int_result(self, place): + main = paddle.static.Program() + startup = paddle.static.Program() + with base.program_guard(main, startup): + input = paddle.static.data(name="input", shape=[4, 4], dtype="int") + result = paddle.any(x=input) + input_np = np.random.randint(0, 2, [4, 4]).astype("int") + + exe = base.Executor(place) + fetches = exe.run( + main, + feed={"input": input_np}, + fetch_list=[result], + ) + self.assertTrue((fetches[0] == np.any(input_np)).all()) + + @test_with_pir_api + def test_static(self): + for place in self.places: + self.check_static_result(place=place) + self.check_static_float_result(place=place) + self.check_static_int_result(place=place) + + def test_dygraph(self): + paddle.disable_static() + for place in self.places: + with base.dygraph.guard(place): + np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) + x = paddle.assign(np_x) + x = paddle.cast(x, 'bool') + + out1 = paddle.any(x) + np_out1 = out1.numpy() + expect_res1 = np.any(np_x) + self.assertTrue((np_out1 == expect_res1).all()) + + out2 = paddle.any(x, axis=0) + np_out2 = out2.numpy() + expect_res2 = np.any(np_x, axis=0) + self.assertTrue((np_out2 == expect_res2).all()) + + out3 = paddle.any(x, axis=-1) + np_out3 = out3.numpy() + expect_res3 = np.any(np_x, axis=-1) + self.assertTrue((np_out3 == expect_res3).all()) + + out4 = paddle.any(x, axis=1, keepdim=True) + np_out4 = out4.numpy() + expect_res4 = np.any(np_x, axis=1, keepdims=True) + self.assertTrue((np_out4 == expect_res4).all()) + + np_x = np.random.randint(0, 2, (12, 10)).astype(np.float32) + x = paddle.assign(np_x) + x = paddle.cast(x, 'float32') + + out5 = paddle.any(x) + np_out5 = out5.numpy() + expect_res5 = np.any(np_x) + self.assertTrue((np_out5 == expect_res5).all()) + + x = paddle.cast(x, 'int') + out6 = paddle.any(x) + np_out6 = out6.numpy() + expect_res6 = np.any(np_x) + self.assertTrue((np_out6 == expect_res6).all()) + + paddle.enable_static() + + +class TestAllZeroError(unittest.TestCase): + def test_errors(self): + with paddle.base.dygraph.guard(): + + def test_0_size(): + array = np.array([], dtype=np.float32) + x = paddle.to_tensor(np.reshape(array, [0, 0, 0]), dtype='bool') + paddle.all(x, axis=1) + + self.assertRaises(ValueError, test_0_size) if __name__ == '__main__': From d200f8b5448799ccd582e1582c36f05dcb52aed1 Mon Sep 17 00:00:00 2001 From: YibinLiu666 <2632839426@qq.com> Date: Tue, 21 May 2024 15:02:42 +0000 Subject: [PATCH 08/10] update static test --- test/prim/prim/vjp/static/test_comp_cumprod_grad.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/test/prim/prim/vjp/static/test_comp_cumprod_grad.py b/test/prim/prim/vjp/static/test_comp_cumprod_grad.py index aacc812a9829a..239df04b9f8b4 100644 --- a/test/prim/prim/vjp/static/test_comp_cumprod_grad.py +++ b/test/prim/prim/vjp/static/test_comp_cumprod_grad.py @@ -157,19 +157,6 @@ class TestCumprodGradComp0D(unittest.TestCase): def setUpClass(cls): cls.primal = cls.primal.astype(cls.dtype) cls.cotangent = cls.cotangent.astype(cls.dtype) - cls.zero_nums = [0, 1, 10, int(np.prod(cls.primal.shape))] - - def train(self, use_prim, use_cinn): - paddle.seed(2022) - self.x = paddle.randn([2, 4]) - self.x.stop_gradient = False - net = PrimeNet() - core._set_prim_backward_enabled(use_prim) - net = apply_to_static(net, use_cinn) - out = net(self.x) - res = paddle.autograd.grad(out, [self.x]) - - return res def test_cumprod_grad_comp_0d(self): paddle.enable_static() From f985123583a7864dfd1fe7923b96c08c6f741b17 Mon Sep 17 00:00:00 2001 From: YibinLiu666 <2632839426@qq.com> Date: Wed, 22 May 2024 13:59:09 +0000 Subject: [PATCH 09/10] update --- .../prim/api/composite_backward/composite_backward_api.h | 5 +---- test/prim/prim/vjp/eager/CMakeLists.txt | 2 ++ 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index 7d836fa0f8dc6..0465f73a44593 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -1085,12 +1085,9 @@ void cumprod_grad(const Tensor& x, auto zero_tensor = full(x_dim, 0.0, x.dtype()); auto zero_mask = cast(equal(x, zero_tensor), x.dtype()); // determine the index of first zero - auto zero_mask_cumsum_inclusive = - cumsum(zero_mask, dim, false, false, reverse); auto zero_mask_cumsum_exclusive = cumsum(zero_mask, dim, false, true, reverse); - auto zero_mask_cumsum = - zero_mask_cumsum_inclusive + zero_mask_cumsum_exclusive; + auto zero_mask_cumsum = scale(zero_mask_cumsum_exclusive, 2) + zero_mask; auto ones_tensor = full(x_dim, 1.0, x.dtype()); auto first_zero_mask = cast(equal(zero_mask_cumsum, ones_tensor), x.dtype()); diff --git a/test/prim/prim/vjp/eager/CMakeLists.txt b/test/prim/prim/vjp/eager/CMakeLists.txt index 863a484c466f1..1de54ecb6e3f4 100644 --- a/test/prim/prim/vjp/eager/CMakeLists.txt +++ b/test/prim/prim/vjp/eager/CMakeLists.txt @@ -8,3 +8,5 @@ set(GC_ENVS FLAGS_eager_delete_tensor_gb=0.0) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS ${GC_ENVS}) endforeach() + +set_tests_properties(test_comp_eager_cumprod_grad PROPERTIES TIMEOUT 120) From 15a1fa21498522b87a00dbce887806c60a7fc6f1 Mon Sep 17 00:00:00 2001 From: YibLiu <68105073+YibinLiu666@users.noreply.github.com> Date: Thu, 23 May 2024 12:15:40 +0800 Subject: [PATCH 10/10] Update details.h --- paddle/fluid/primitive/rule/vjp/details.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/paddle/fluid/primitive/rule/vjp/details.h b/paddle/fluid/primitive/rule/vjp/details.h index 13f3f0e878fe8..acf8ed945914f 100644 --- a/paddle/fluid/primitive/rule/vjp/details.h +++ b/paddle/fluid/primitive/rule/vjp/details.h @@ -73,12 +73,9 @@ void cumprod_grad(const Tensor& x, auto zero_tensor = full(x_dim, 0.0, x.dtype()); auto zero_mask = cast(equal(x, zero_tensor), x.dtype()); // determine the index of first zero - auto zero_mask_cumsum_inclusive = - cumsum(zero_mask, dim, false, false, reverse); auto zero_mask_cumsum_exclusive = cumsum(zero_mask, dim, false, true, reverse); - auto zero_mask_cumsum = - zero_mask_cumsum_inclusive + zero_mask_cumsum_exclusive; + auto zero_mask_cumsum = scale(zero_mask_cumsum_exclusive, 2) + zero_mask; auto ones_tensor = full(x_dim, 1.0, x.dtype()); auto first_zero_mask = cast(equal(zero_mask_cumsum, ones_tensor), x.dtype());