diff --git a/paddle/phi/kernels/legacy/xpu/elementwise_divide_kernel.cc b/paddle/phi/kernels/legacy/xpu/elementwise_divide_kernel.cc index 5318cb464001f8..ccdfcd750f091d 100644 --- a/paddle/phi/kernels/legacy/xpu/elementwise_divide_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/elementwise_divide_kernel.cc @@ -50,4 +50,5 @@ PD_REGISTER_KERNEL(divide_raw, ALL_LAYOUT, phi::DivideRawKernel, phi::dtype::float16, + phi::dtype::bfloat16, float) {} diff --git a/paddle/phi/kernels/legacy/xpu/elementwise_multiply_kernel.cc b/paddle/phi/kernels/legacy/xpu/elementwise_multiply_kernel.cc index 790bd72b240914..2986e555cda705 100644 --- a/paddle/phi/kernels/legacy/xpu/elementwise_multiply_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/elementwise_multiply_kernel.cc @@ -50,6 +50,7 @@ PD_REGISTER_KERNEL(multiply_raw, ALL_LAYOUT, phi::MultiplyRawKernel, phi::dtype::float16, + phi::dtype::bfloat16, float, int, int64_t) {} diff --git a/paddle/phi/kernels/legacy/xpu/elementwise_subtract_kernel.cc b/paddle/phi/kernels/legacy/xpu/elementwise_subtract_kernel.cc index 421a30a240a434..7fb4144d7705bc 100644 --- a/paddle/phi/kernels/legacy/xpu/elementwise_subtract_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/elementwise_subtract_kernel.cc @@ -45,4 +45,5 @@ PD_REGISTER_KERNEL(subtract_raw, phi::SubtractRawKernel, float, phi::dtype::float16, + phi::dtype::bfloat16, int64_t) {} diff --git a/test/xpu/test_elementwise_div_op_xpu.py b/test/xpu/test_elementwise_div_op_xpu.py index 52e2e62e067d2e..ca190b7eb12307 100644 --- a/test/xpu/test_elementwise_div_op_xpu.py +++ b/test/xpu/test_elementwise_div_op_xpu.py @@ -20,7 +20,10 @@ create_test_class, get_xpu_op_support_types, ) -from op_test import skip_check_grad_ci +from op_test import ( + convert_float_to_uint16, + skip_check_grad_ci, +) from op_test_xpu import XPUOpTest import paddle @@ -28,6 +31,8 @@ paddle.enable_static() +INT_GROUP = [np.int32, np.int64] + class XPUTestElementwiseDivOp(XPUOpTestWrapper): def __init__(self): @@ -40,6 +45,7 @@ def setUp(self): self.dtype = self.in_type self.init_dtype() self.use_xpu = True + self.init_shape() self.init_input_output() """ Warning CPU gradient check error! @@ -47,20 +53,40 @@ def setUp(self): 'Y': np.random.random((32,84)).astype("float32") """ + def gen_data_depend_on_dtype(self, shape): + if self.dtype in INT_GROUP: + return np.random.randint(1, 100, size=shape) + else: + return np.random.uniform(-1, 1, size=shape) + + def reshape_y_depend_on_x(self): + if len(self.x_shape) <= len(self.y_shape) or self.y_shape == (): + return self.y + reshape_dims = [ + 1 if i not in self.y_shape else i for i in self.x_shape + ] + return np.reshape(self.y, reshape_dims) + def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: + self.x = self.gen_data_depend_on_dtype(self.x_shape) + self.y = self.gen_data_depend_on_dtype(self.y_shape) + reshaped_y = self.reshape_y_depend_on_x() + if self.dtype == np.uint16: + self.outputs = {'Out': np.divide(self.x, reshaped_y)} self.inputs = { - 'X': np.random.randint(1, 100, [13, 17]).astype(self.dtype), - 'Y': np.random.randint(1, 100, [13, 17]).astype(self.dtype), + 'X': convert_float_to_uint16(self.x), + 'Y': convert_float_to_uint16(self.y), } - self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} else: self.inputs = { - 'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype), + 'X': self.x.astype(self.dtype), + 'Y': self.y.astype(self.dtype), } + reshaped_y.astype(self.dtype) self.outputs = { - 'Out': np.divide(self.inputs['X'], self.inputs['Y']) + 'Out': self.inputs['X'] // reshaped_y + if self.dtype in INT_GROUP + else np.divide(self.inputs['X'], reshaped_y) } def test_check_output(self): @@ -100,306 +126,80 @@ def test_check_grad_ingore_y(self): def init_dtype(self): pass + def init_shape(self): + self.x_shape = [13, 17] + self.y_shape = [13, 17] + class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, []).astype(self.dtype), - 'Y': np.random.randint(1, 100, []).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} - else: - self.inputs = { - 'X': np.random.uniform(-1, 1, []).astype(self.dtype), - 'Y': np.random.uniform(-1, 1, []).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} + def init_shape(self): + self.x_shape = [] + self.y_shape = [] class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [13, 17]).astype(self.dtype), - 'Y': np.random.randint(1, 100, []).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} - else: - self.inputs = { - 'X': np.random.uniform(-1, 1, [13, 17]).astype(self.dtype), - 'Y': np.random.uniform(-1, 1, []).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} + def init_shape(self): + self.x_shape = [13, 17] + self.y_shape = [] @skip_check_grad_ci( reason="[skip shape check] Use y_shape(1) to test broadcast." ) class TestElementwiseDivOp_scalar(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [20, 3, 4]).astype( - self.dtype - ), - 'Y': np.random.randint(1, 100, [1]).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype( - self.dtype - ), - 'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} + def init_shape(self): + self.x_shape = [20, 3, 4] + self.y_shape = [1] class TestElementwiseDivOp_Vector(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [100]).astype(self.dtype), - 'Y': np.random.randint(1, 100, [100]).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), - } - self.outputs = { - 'Out': np.divide(self.inputs['X'], self.inputs['Y']) - } + def init_shape(self): + self.x_shape = [100] + self.y_shape = [100] class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [100, 3, 4]).astype( - self.dtype - ), - 'Y': np.random.randint(1, 100, [100]).astype(self.dtype), - } - self.outputs = { - 'Out': self.inputs['X'] - // self.inputs['Y'].reshape(100, 1, 1) - } - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype( - self.dtype - ), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), - } - self.outputs = { - 'Out': np.divide( - self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) - ) - } - + def init_shape(self): + self.x_shape = [100, 3, 4] + self.y_shape = [100] self.attrs = {'axis': 0} class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [2, 100, 4]).astype( - self.dtype - ), - 'Y': np.random.randint(1, 100, [100]).astype(self.dtype), - } - self.outputs = { - 'Out': self.inputs['X'] - // self.inputs['Y'].reshape(1, 100, 1) - } - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype( - self.dtype - ), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), - } - self.outputs = { - 'Out': np.divide( - self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) - ) - } - + def init_shape(self): + self.x_shape = [2, 100, 4] + self.y_shape = [100] self.attrs = {'axis': 1} class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [2, 3, 100]).astype( - self.dtype - ), - 'Y': np.random.randint(1, 100, [100]).astype(self.dtype), - } - self.outputs = { - 'Out': self.inputs['X'] - // self.inputs['Y'].reshape(1, 1, 100) - } - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype( - self.dtype - ), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), - } - self.outputs = { - 'Out': np.divide( - self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100) - ) - } + def init_shape(self): + self.x_shape = [2, 3, 100] + self.y_shape = [100] class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [2, 10, 12, 5]).astype( - self.dtype - ), - 'Y': np.random.randint(1, 100, [10, 12]).astype(self.dtype), - } - self.outputs = { - 'Out': self.inputs['X'] - // self.inputs['Y'].reshape(1, 10, 12, 1) - } - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype( - self.dtype - ), - 'Y': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype), - } - self.outputs = { - 'Out': np.divide( - self.inputs['X'], self.inputs['Y'].reshape(1, 10, 12, 1) - ) - } - + def init_shape(self): + self.x_shape = [2, 10, 12, 5] + self.y_shape = [10, 12] self.attrs = {'axis': 1} class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [2, 3, 50]).astype( - self.dtype - ), - 'Y': np.random.randint(1, 100, [2, 1, 50]).astype( - self.dtype - ), - } - self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype( - self.dtype - ), - 'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype( - self.dtype - ), - } - self.outputs = { - 'Out': np.divide(self.inputs['X'], self.inputs['Y']) - } + def init_shape(self): + self.x_shape = [2, 3, 50] + self.y_shape = [2, 1, 50] class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [2, 3, 4, 20]).astype( - self.dtype - ), - 'Y': np.random.randint(1, 100, [2, 3, 1, 20]).astype( - self.dtype - ), - } - self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype( - self.dtype - ), - 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype( - self.dtype - ), - } - self.outputs = { - 'Out': np.divide(self.inputs['X'], self.inputs['Y']) - } + def init_shape(self): + self.x_shape = [2, 3, 4, 20] + self.y_shape = [2, 3, 1, 20] class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [2, 3, 100]).astype( - self.dtype - ), - 'Y': np.random.randint(1, 100, [1, 1, 100]).astype( - self.dtype - ), - } - self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype( - self.dtype - ), - 'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype( - self.dtype - ), - } - self.outputs = { - 'Out': np.divide(self.inputs['X'], self.inputs['Y']) - } + def init_shape(self): + self.x_shape = [2, 3, 100] + self.y_shape = [1, 1, 100] class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [30, 3, 1, 5]).astype( - self.dtype - ), - 'Y': np.random.randint(1, 100, [30, 1, 4, 1]).astype( - self.dtype - ), - } - self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype( - self.dtype - ), - 'Y': np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype( - self.dtype - ), - } - self.outputs = { - 'Out': np.divide(self.inputs['X'], self.inputs['Y']) - } + def init_shape(self): + self.x_shape = [30, 3, 1, 5] + self.y_shape = [30, 1, 4, 1] class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp): - def init_input_output(self): - if self.dtype == np.int32 or self.dtype == np.int64: - self.inputs = { - 'X': np.random.randint(1, 100, [10, 12]).astype(self.dtype), - 'Y': np.random.randint(1, 100, [2, 3, 10, 12]).astype( - self.dtype - ), - } - self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} - else: - self.inputs = { - 'X': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype( - self.dtype - ), - } - self.outputs = { - 'Out': np.divide(self.inputs['X'], self.inputs['Y']) - } - + def init_shape(self): + self.x_shape = [10, 12] + self.y_shape = [2, 3, 10, 12] self.attrs = {'axis': 2} class TestElementwiseDivBroadcast(unittest.TestCase): diff --git a/test/xpu/test_elementwise_mul_op_xpu.py b/test/xpu/test_elementwise_mul_op_xpu.py index 6bd604df07e40a..b8fda9a5b6217a 100644 --- a/test/xpu/test_elementwise_mul_op_xpu.py +++ b/test/xpu/test_elementwise_mul_op_xpu.py @@ -20,7 +20,10 @@ create_test_class, get_xpu_op_support_types, ) -from op_test import OpTest, skip_check_grad_ci +from op_test import ( + convert_float_to_uint16, + skip_check_grad_ci, +) from op_test_xpu import XPUOpTest import paddle @@ -40,13 +43,34 @@ def init_kernel_type(self): def setUp(self): self.op_type = 'elementwise_mul' self.use_xpu = True + self.cal_x = None + self.cal_y = None self.dtype = self.in_type self.axis = -1 - self.init_dtype() + self.init_data() + self.gen_output() self.init_input_output() self.init_kernel_type() self.init_axis() + def gen_output(self): + if self.cal_x is None: + self.cal_x = self.x + if self.cal_y is None: + self.cal_y = self.y + if self.dtype == np.uint16: + self.out = np.multiply(self.cal_x, self.cal_y) + else: + self.out = np.multiply( + self.cal_x.astype(self.dtype), self.cal_y.astype(self.dtype) + ) + + def gen_data_depend_on_dtype(self, shape): + if self.dtype == np.int32 or self.dtype == np.int64: + return np.random.randint(1, 100, size=shape) + else: + return np.random.uniform(0.1, 1, size=shape) + def test_check_output(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) @@ -84,158 +108,109 @@ def test_check_grad_ingore_y(self): check_dygraph=False, ) + def init_data(self): + self.x = self.gen_data_depend_on_dtype([13, 17]) + self.y = self.gen_data_depend_on_dtype([13, 17]) + def init_input_output(self): - self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) - self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) - self.out = np.multiply(self.x, self.y) + if self.dtype == np.uint16: + self.x = convert_float_to_uint16(self.x) + self.y = convert_float_to_uint16(self.y) + else: + self.x = self.x.astype(self.dtype) + self.y = self.y.astype(self.dtype) + self.inputs = { - 'X': OpTest.np_dtype_to_base_dtype(self.x), - 'Y': OpTest.np_dtype_to_base_dtype(self.y), + 'X': self.x, + 'Y': self.y, } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} - def init_dtype(self): - pass - def init_axis(self): pass class TestElementwiseMulOp_ZeroDim1(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.uniform(-1, 1, []).astype(self.dtype), - 'Y': np.random.uniform(-1, 1, []).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + def init_data(self): + self.x = self.gen_data_depend_on_dtype([]) + self.y = self.gen_data_depend_on_dtype([]) class TestElementwiseMulOp_ZeroDim2(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.uniform(-1, 1, [13, 17]).astype(self.dtype), - 'Y': np.random.uniform(-1, 1, []).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + def init_data(self): + self.x = self.gen_data_depend_on_dtype([13, 17]) + self.y = self.gen_data_depend_on_dtype([]) class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.uniform(-1, 1, []).astype(self.dtype), - 'Y': np.random.uniform(-1, 1, [13, 17]).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + def init_data(self): + self.x = self.gen_data_depend_on_dtype([]) + self.y = self.gen_data_depend_on_dtype([13, 17]) @skip_check_grad_ci( reason="[skip shape check] Use y_shape(1) to test broadcast." ) class TestElementwiseMulOp_scalar(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(10, 3, 4).astype(self.dtype), - 'Y': np.random.rand(1).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + def init_data(self): + self.x = self.gen_data_depend_on_dtype([10, 3, 4]) + self.y = self.gen_data_depend_on_dtype([1]) class TestElementwiseMulOp_Vector(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.random((100,)).astype(self.dtype), - 'Y': np.random.random((100,)).astype(self.dtype), - } - self.outputs = { - 'Out': np.multiply(self.inputs['X'], self.inputs['Y']) - } + def init_data(self): + self.x = self.gen_data_depend_on_dtype([100]) + self.y = self.gen_data_depend_on_dtype([100]) class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(100, 2, 3).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype), - } - self.outputs = { - 'Out': self.inputs['X'] * self.inputs['Y'].reshape(100, 1, 1) - } - self.attrs = {'axis': 0} + def init_data(self): + self.x = self.gen_data_depend_on_dtype([100, 2, 3]) + self.y = self.gen_data_depend_on_dtype([100]) + self.cal_y = self.y.reshape(100, 1, 1) + self.axis = 0 class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(2, 100, 3).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype), - } - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1) - } + def init_data(self): + self.x = self.gen_data_depend_on_dtype([2, 100, 3]) + self.y = self.gen_data_depend_on_dtype([100]) + self.cal_y = self.y.reshape(1, 100, 1) + self.axis = 1 class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(2, 3, 100).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype), - } - - self.outputs = { - 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100) - } + def init_data(self): + self.x = self.gen_data_depend_on_dtype([2, 3, 100]) + self.y = self.gen_data_depend_on_dtype([100]) + self.cal_y = self.y.reshape(1, 1, 100) class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(2, 10, 12, 3).astype(self.dtype), - 'Y': np.random.rand(10, 12).astype(self.dtype), - } - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1) - } + def init_data(self): + self.x = self.gen_data_depend_on_dtype([2, 10, 12, 3]) + self.y = self.gen_data_depend_on_dtype([10, 12]) + self.cal_y = self.y.reshape(1, 10, 12, 1) + self.axis = 1 class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(10, 2, 11).astype(self.dtype), - 'Y': np.random.rand(10, 1, 11).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + def init_data(self): + self.x = self.gen_data_depend_on_dtype([10, 2, 11]) + self.y = self.gen_data_depend_on_dtype([10, 1, 11]) class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(10, 4, 2, 3).astype(self.dtype), - 'Y': np.random.rand(10, 4, 1, 3).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + def init_data(self): + self.x = self.gen_data_depend_on_dtype([10, 4, 2, 3]) + self.y = self.gen_data_depend_on_dtype([10, 4, 1, 3]) class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(2, 3, 100).astype(self.dtype), - 'Y': np.random.rand(1, 1, 100).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + def init_data(self): + self.x = self.gen_data_depend_on_dtype([2, 3, 100]) + self.y = self.gen_data_depend_on_dtype([1, 1, 100]) class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(30, 3, 1, 5).astype(self.dtype), - 'Y': np.random.rand(30, 1, 4, 1).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + def init_data(self): + self.x = self.gen_data_depend_on_dtype([30, 3, 1, 5]) + self.y = self.gen_data_depend_on_dtype([30, 1, 4, 1]) class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(10, 10).astype(self.dtype), - 'Y': np.random.rand(2, 2, 10, 10).astype(self.dtype), - } - - self.attrs = {'axis': 2} - - self.outputs = { - 'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y'] - } + def init_data(self): + self.x = self.gen_data_depend_on_dtype([10, 10]) + self.y = self.gen_data_depend_on_dtype([2, 2, 10, 10]) + self.cal_x = self.x.reshape(1, 1, 10, 10) + self.axis = 2 support_types = get_xpu_op_support_types('elementwise_mul') diff --git a/test/xpu/test_elementwise_sub_op_xpu.py b/test/xpu/test_elementwise_sub_op_xpu.py index 8e595932eae29d..3cb440f05de063 100644 --- a/test/xpu/test_elementwise_sub_op_xpu.py +++ b/test/xpu/test_elementwise_sub_op_xpu.py @@ -21,13 +21,18 @@ create_test_class, get_xpu_op_support_types, ) -from op_test import skip_check_grad_ci +from op_test import ( + convert_float_to_uint16, + skip_check_grad_ci, +) from op_test_xpu import XPUOpTest import paddle paddle.enable_static() +INT_GROUP = [np.int32, np.int64] + class XPUTestElementwiseSubOp(XPUOpTestWrapper): def __init__(self): @@ -39,14 +44,43 @@ def setUp(self): self.op_type = "elementwise_sub" self.use_xpu = True self.dtype = self.in_type + self.init_shape() self.init_input_output() + def reshape_data(self, x, y): + if len(x.shape) < len(y.shape): + reshape_dims = [1 if i not in x.shape else i for i in y.shape] + return np.reshape(x, reshape_dims) + else: + return x + + def gen_data_depend_on_dtype(self, shape): + if self.dtype in INT_GROUP: + return np.random.randint(1, 100, size=shape) + else: + return np.random.uniform(-1, 1, size=shape) + def init_input_output(self): + self.x = self.gen_data_depend_on_dtype(self.x_shape) + self.y = self.gen_data_depend_on_dtype(self.y_shape) + if self.dtype == np.uint16: + tmp_x = self.reshape_data(self.x, self.y) + tmp_y = self.reshape_data(self.y, self.x) + self.outputs = {'Out': tmp_x - tmp_y} + self.x = convert_float_to_uint16(self.x) + self.y = convert_float_to_uint16(self.y) + else: + tmp_x = self.reshape_data(self.x, self.y).astype(self.dtype) + tmp_y = self.reshape_data(self.y, self.x).astype(self.dtype) + self.outputs = {'Out': tmp_x - tmp_y} self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype), + 'X': self.x, + 'Y': self.y, } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + + def init_shape(self): + self.x_shape = [2, 3, 4, 5] + self.y_shape = [2, 3, 4, 5] def test_check_output(self): if paddle.is_compiled_with_xpu(): @@ -81,132 +115,77 @@ def test_check_grad_ingore_y(self): ) class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.uniform(-1, 1, []).astype(self.dtype), - 'Y': np.random.uniform(-1, 1, []).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + def init_shape(self): + self.x_shape = [] + self.y_shape = [] class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.uniform(-1, 1, [13, 17]).astype(self.dtype), - 'Y': np.random.uniform(-1, 1, []).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + def init_shape(self): + self.x_shape = [13, 17] + self.y_shape = [] class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.uniform(-1, 1, []).astype(self.dtype), - 'Y': np.random.uniform(-1, 1, [13, 17]).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + def init_shape(self): + self.x_shape = [] + self.y_shape = [13, 17] @skip_check_grad_ci( reason="[skip shape check] Use y_shape(1) to test broadcast." ) class TestElementwiseSubOp_scalar(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(10, 3, 4).astype(self.dtype), - 'Y': np.random.rand(1).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + def init_shape(self): + self.x_shape = [10, 3, 4] + self.y_shape = [1] class TestElementwiseSubOp_Vector(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.random((100,)).astype(self.dtype), - 'Y': np.random.random((100,)).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + def init_shape(self): + self.x_shape = [100] + self.y_shape = [100] class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(100, 3, 2).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype), - } - + def init_shape(self): + self.x_shape = [100, 3, 2] + self.y_shape = [100] self.attrs = {'axis': 0} - self.outputs = { - 'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1) - } class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(2, 100, 3).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype), - } - + def init_shape(self): + self.x_shape = [2, 100, 3] + self.y_shape = [100] self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1) - } class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(2, 3, 100).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype), - } - - self.outputs = { - 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100) - } + def init_shape(self): + self.x_shape = [2, 3, 100] + self.y_shape = [100] class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(2, 10, 12, 3).astype(self.dtype), - 'Y': np.random.rand(10, 12).astype(self.dtype), - } - + def init_shape(self): + self.x_shape = [2, 10, 12, 3] + self.y_shape = [10, 12] self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1) - } class TestElementwiseSubOp_broadcast_4(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(2, 5, 3, 12).astype(self.dtype), - 'Y': np.random.rand(2, 5, 1, 12).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + def init_shape(self): + self.x_shape = [2, 5, 3, 12] + self.y_shape = [2, 5, 1, 12] class TestElementwiseSubOp_commonuse_1(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(2, 3, 100).astype(self.dtype), - 'Y': np.random.rand(1, 1, 100).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + def init_shape(self): + self.x_shape = [2, 3, 100] + self.y_shape = [1, 1, 100] class TestElementwiseSubOp_commonuse_2(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(10, 3, 1, 4).astype(self.dtype), - 'Y': np.random.rand(10, 1, 12, 1).astype(self.dtype), - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + def init_shape(self): + self.x_shape = [10, 3, 1, 4] + self.y_shape = [10, 1, 12, 1] class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(10, 12).astype(self.dtype), - 'Y': np.random.rand(2, 3, 10, 12).astype(self.dtype), - } - + def init_shape(self): + self.x_shape = [10, 12] + self.y_shape = [2, 3, 10, 12] self.attrs = {'axis': 2} - self.outputs = { - 'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y'] - } - support_types = get_xpu_op_support_types('elementwise_sub') for stype in support_types: diff --git a/test/xpu/test_reduce_sum_op_xpu.py b/test/xpu/test_reduce_sum_op_xpu.py index 06c62d29fb263d..cbf144c923bcba 100644 --- a/test/xpu/test_reduce_sum_op_xpu.py +++ b/test/xpu/test_reduce_sum_op_xpu.py @@ -20,6 +20,7 @@ create_test_class, get_xpu_op_support_types, ) +from op_test import convert_float_to_uint16 from op_test_xpu import XPUOpTest import paddle @@ -38,6 +39,16 @@ def setUp(self): self.init_case() self.set_case() + def gen_data_depend_on_dtype(self, shape): + if ( + self.dtype == np.int32 + or self.dtype == np.int64 + or self.dtype == np.uint8 + ): + return np.random.randint(1, 100, size=shape) + else: + return np.random.uniform(-1, 1, size=shape) + def set_case(self): self.op_type = 'reduce_sum' self.attrs = { @@ -46,17 +57,29 @@ def set_case(self): 'keep_dim': self.keep_dim, 'dim': self.axis, } - self.inputs = {'X': np.random.random(self.shape).astype(self.dtype)} - if self.attrs['reduce_all']: - self.outputs = {'Out': self.inputs['X'].sum()} + tmp_x = self.gen_data_depend_on_dtype(self.shape) + if self.dtype == np.uint16: + tmp_out = ( + tmp_x.sum() + if self.attrs['reduce_all'] + else tmp_x.sum( + axis=self.axis, keepdims=self.attrs['keep_dim'] + ) + ) + self.outputs = {'Out': tmp_out} + tmp_x = convert_float_to_uint16(tmp_x) + self.inputs = {'X': tmp_x} else: - self.outputs = { - 'Out': self.inputs['X'].sum( + tmp_x = tmp_x.astype(self.dtype) + self.inputs = {'X': tmp_x} + tmp_out = ( + tmp_x.sum() + if self.attrs['reduce_all'] + else tmp_x.sum( axis=self.axis, keepdims=self.attrs['keep_dim'] ) - } - if self.dtype == np.uint16: - self.outputs['Out'] = self.outputs['Out'].astype(np.uint16) + ) + self.outputs = {'Out': tmp_out} def init_case(self): self.shape = (5, 6, 10)