From 266b56e7bc08e9ed514ea29d5504e40280053ba7 Mon Sep 17 00:00:00 2001 From: zhenyun-li <1500424927@qq.com> Date: Mon, 6 Mar 2023 05:49:36 +0000 Subject: [PATCH] fill_constant_batch_size_like support bf16 shape support bf16 --- paddle/phi/kernels/full_kernel.cc | 3 +- paddle/phi/kernels/shape_kernel.cc | 3 +- .../test_fill_constant_batch_size_like.py | 44 ++++++++++++++++++- .../fluid/tests/unittests/test_shape_op.py | 26 ++++++++++- 4 files changed, 72 insertions(+), 4 deletions(-) diff --git a/paddle/phi/kernels/full_kernel.cc b/paddle/phi/kernels/full_kernel.cc index 9622bff5c255a..ce898210633b7 100644 --- a/paddle/phi/kernels/full_kernel.cc +++ b/paddle/phi/kernels/full_kernel.cc @@ -59,7 +59,8 @@ PD_REGISTER_KERNEL(full_batch_size_like, int, int64_t, bool, - phi::dtype::float16) { + phi::dtype::float16, + phi::dtype::bfloat16) { kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); } #endif diff --git a/paddle/phi/kernels/shape_kernel.cc b/paddle/phi/kernels/shape_kernel.cc index 84cd7b43519b0..7d0778d4089a0 100644 --- a/paddle/phi/kernels/shape_kernel.cc +++ b/paddle/phi/kernels/shape_kernel.cc @@ -65,7 +65,8 @@ PD_REGISTER_KERNEL(shape, double, phi::dtype::complex, phi::dtype::complex, - phi::dtype::float16) { + phi::dtype::float16, + phi::dtype::bfloat16) { kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); kernel->OutputAt(0).SetBackend(phi::Backend::CPU); kernel->OutputAt(0).SetDataType(phi::DataType::INT32); diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like.py b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like.py index 68bb8f1cc9146..2b49503c8263a 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like.py @@ -15,9 +15,10 @@ import unittest import numpy as np -from eager_op_test import OpTest +from eager_op_test import OpTest, convert_float_to_uint16 import paddle +import paddle.fluid.core as core from paddle.fluid.framework import convert_np_dtype_to_dtype_ paddle.enable_static() @@ -70,5 +71,46 @@ def test_check_output(self): self.check_output() +@unittest.skipIf( + not core.is_compiled_with_cuda() or not core.supports_bfloat16(), + "core is not compiled with CUDA or place do not support bfloat16", +) +class TestFillConstatnBatchSizeLikeBf16(OpTest): + # test bf16 + def setUp(self): + self.op_type = "fill_constant_batch_size_like" + self.python_api = fill_constant_batch_size_like + self.init_data() + + input = np.zeros(self.shape).astype("float32") + input_bf16 = convert_float_to_uint16(input) + out = np.full_like(input, self.value, np.float32) + out_bf16 = convert_float_to_uint16(out) + + self.inputs = {'Input': input_bf16} + self.outputs = {'Out': out_bf16} + self.attrs = { + 'shape': self.shape, + 'dtype': convert_np_dtype_to_dtype_(self.dtype), + 'value': self.value, + 'input_dim_idx': self.input_dim_idx, + 'output_dim_idx': self.output_dim_idx, + 'force_cpu': self.force_cpu, + } + + def init_data(self): + self.shape = [10, 10] + self.dtype = np.uint16 + self.value = 100 + self.input_dim_idx = 0 + self.output_dim_idx = 0 + self.force_cpu = False + + def test_check_output(self): + place = core.CUDAPlace(0) + self.check_output_with_place(place) + + if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_shape_op.py b/python/paddle/fluid/tests/unittests/test_shape_op.py index 8853cb217938b..1c6decce09383 100644 --- a/python/paddle/fluid/tests/unittests/test_shape_op.py +++ b/python/paddle/fluid/tests/unittests/test_shape_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest +from op_test import OpTest, convert_float_to_uint16 import paddle from paddle.fluid import core @@ -85,5 +85,29 @@ def test_check_output(self): self.check_with_place(place) +@unittest.skipIf( + not core.is_compiled_with_cuda() or not core.supports_bfloat16(), + "core is not compiled with CUDA or place do not support bfloat16", +) +class TestShapeOpBf16(OpTest): + def setUp(self): + self.op_type = "shape" + self.dtype = 'bfloat16' + self.python_api = paddle.shape + self.config() + self.shape = [2, 3] + input = np.zeros(self.shape) + input = convert_float_to_uint16(input.astype('float32')) + self.inputs = {'Input': input} + self.outputs = {'Out': np.array(self.shape)} + + def config(self): + self.shape = [2, 3] + + def test_check_output(self): + place = core.CUDAPlace(0) + self.check_output_with_place(place) + + if __name__ == '__main__': unittest.main()