From 72db2a535a19819d7776f37a4c69a65e65ca63b5 Mon Sep 17 00:00:00 2001 From: chenrunzhe Date: Thu, 4 Jan 2024 07:42:14 +0000 Subject: [PATCH] correct adamw bf16 unit test and the way to get data type --- test/xpu/op_test_xpu.py | 11 ++++++++++- test/xpu/test_adamw_op_xpu.py | 22 +++++++++++++++++++--- test/xpu/test_elementwise_sub_op_xpu.py | 7 ++++--- 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/test/xpu/op_test_xpu.py b/test/xpu/op_test_xpu.py index 6247942b8cde7..0641af10876ff 100644 --- a/test/xpu/op_test_xpu.py +++ b/test/xpu/op_test_xpu.py @@ -106,8 +106,13 @@ def check_output_with_place( if not core.is_float16_supported(place): return - if self.dtype == np.float16: + if self.dtype == np.uint16: + if not core.is_bfloat16_supported(place): + return + + if self.dtype == np.float16 or self.dtype == np.uint16: atol = 0.1 + return super().check_output_with_place( place, atol, @@ -183,6 +188,10 @@ def check_grad_with_place( if not core.is_float16_supported(place): return + if self.dtype == np.uint16: + if not core.is_bfloat16_supported(place): + return + if self.dtype == np.float16 or self.dtype == np.uint16: max_relative_error = 0.1 return super().check_grad_with_place( diff --git a/test/xpu/test_adamw_op_xpu.py b/test/xpu/test_adamw_op_xpu.py index 8584360837d79..b9120779c40f6 100644 --- a/test/xpu/test_adamw_op_xpu.py +++ b/test/xpu/test_adamw_op_xpu.py @@ -21,6 +21,7 @@ create_test_class, get_xpu_op_support_types, ) +from op_test import convert_float_to_uint16 from op_test_xpu import XPUOpTest import paddle @@ -85,8 +86,8 @@ def setUp(self): self.op_type = "adamw" self.init_shape() self.dtype = self.in_type - param = np.random.uniform(-1, 1, self.shape).astype(self.dtype) - grad = np.random.uniform(-1, 1, self.shape).astype(self.dtype) + param = np.random.uniform(-1, 1, self.shape) + grad = np.random.uniform(-1, 1, self.shape) moment1 = np.random.uniform(-1, 1, self.shape).astype("float32") # The second moment is positive moment2 = np.random.random(self.shape).astype("float32") @@ -97,7 +98,9 @@ def setUp(self): epsilon = 1e-4 beta1_pow = beta1**10 beta2_pow = beta2**10 - + if self.dtype != np.uint16: + param = param.astype(self.dtype) + grad = grad.astype(self.dtype) self.inputs = { 'Param': param, 'Grad': grad, @@ -128,6 +131,15 @@ def setUp(self): 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } + if self.dtype == np.uint16: + self.inputs['Param'] = convert_float_to_uint16( + self.inputs['Param'] + ) + self.inputs['Grad'] = convert_float_to_uint16( + self.inputs['Grad'] + ) + self.outputs['ParamOut'] = convert_float_to_uint16(param_out) + def init_shape(self): self.shape = [102, 105] @@ -135,6 +147,10 @@ def test_check_output(self): paddle.enable_static() self.check_output_with_place(place=paddle.XPUPlace(0)) + def infer_dtype_from_inputs_outputs(self, inputs, outputs): + self.__class__.dtype = self.dtype + self.output_dtype = self.dtype + class TestAdamW2(TestAdamW): def init_shape(self): self.shape = [ diff --git a/test/xpu/test_elementwise_sub_op_xpu.py b/test/xpu/test_elementwise_sub_op_xpu.py index 3cb440f05de06..4afa0ba553678 100644 --- a/test/xpu/test_elementwise_sub_op_xpu.py +++ b/test/xpu/test_elementwise_sub_op_xpu.py @@ -66,7 +66,8 @@ def init_input_output(self): if self.dtype == np.uint16: tmp_x = self.reshape_data(self.x, self.y) tmp_y = self.reshape_data(self.y, self.x) - self.outputs = {'Out': tmp_x - tmp_y} + tmp_out = tmp_x - tmp_y + self.outputs = {'Out': convert_float_to_uint16(tmp_out)} self.x = convert_float_to_uint16(self.x) self.y = convert_float_to_uint16(self.y) else: @@ -74,8 +75,8 @@ def init_input_output(self): tmp_y = self.reshape_data(self.y, self.x).astype(self.dtype) self.outputs = {'Out': tmp_x - tmp_y} self.inputs = { - 'X': self.x, - 'Y': self.y, + 'X': self.x.astype(self.dtype), + 'Y': self.y.astype(self.dtype), } def init_shape(self):