Skip to content

Commit

Permalink
【New IR】New ir op test v1.1(sum passed) (#56756)
Browse files Browse the repository at this point in the history
* add reference of lbfgs

* add reference of lbfgs

* new ir op test v1.0

* fix new ir optest bug1.0

* modify two testcase bug

* add new ir white list & pass test_mean_op.py

* rename white list

* add new_ir_guard

* new ir sum op test all pass

* rename backward.grad as ir_backward.grad

* check place for new ir

* fix test_build_model env bug

* fix test_prim_program backward bug

* change backward to ir_backward in check_appr

* add check_new_ir flag for mkldnn

* clean

---------

Co-authored-by: wangruting <wangruting@baidu.com>
  • Loading branch information
changeyoung98 and xiaoguoguo626807 authored Sep 3, 2023
1 parent d74bfef commit e2af9d5
Show file tree
Hide file tree
Showing 6 changed files with 23 additions and 6 deletions.
7 changes: 5 additions & 2 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1510,8 +1510,11 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):

dtype_flag = False
if dtype is not None:
dtype_flag = True
dtype = convert_np_dtype_to_dtype_(dtype)
if paddle.ir.core._use_new_ir_api():
dtype = paddle.ir.core.convert_np_dtype_to_dtype_(dtype)
else:
dtype_flag = True
dtype = convert_np_dtype_to_dtype_(dtype)

if in_dynamic_mode():
return _C_ops.sum(x, axis, dtype, keepdim)
Expand Down
8 changes: 8 additions & 0 deletions test/legacy_test/eager_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1927,6 +1927,7 @@ def check_output_with_place(
only_check_prim=False,
inplace_atol=None,
check_cinn=False,
check_new_ir=True,
):
core._set_prim_all_enabled(False)
core.set_prim_eager_enabled(False)
Expand Down Expand Up @@ -2455,6 +2456,7 @@ def _is_skip_name(self, name):
if (
self.op_type
in new_ir_python_api_grad_white_list.new_ir_python_api_grad_white_list
and check_new_ir
):
if (
type(place) is paddle.fluid.libpaddle.CPUPlace
Expand Down Expand Up @@ -2576,6 +2578,7 @@ def check_output(
inplace_atol=None,
check_cinn=False,
only_check_prim=False,
check_new_ir=True,
):
self.__class__.op_type = self.op_type
if self.is_mkldnn_op():
Expand All @@ -2600,6 +2603,7 @@ def check_output(
only_check_prim=only_check_prim,
inplace_atol=inplace_atol,
check_cinn=check_cinn,
check_new_ir=check_new_ir,
)
if not res and only_check_prim:
continue
Expand Down Expand Up @@ -2766,6 +2770,7 @@ def check_grad(
only_check_prim=False,
atol=1e-5,
check_cinn=False,
check_new_ir=True,
):
if hasattr(self, "use_custom_device") and self.use_custom_device:
check_dygraph = False
Expand All @@ -2788,6 +2793,7 @@ def check_grad(
only_check_prim=only_check_prim,
atol=atol,
check_cinn=check_cinn,
check_new_ir=check_new_ir,
)

def check_grad_with_place(
Expand All @@ -2807,6 +2813,7 @@ def check_grad_with_place(
numeric_place=None,
atol=1e-5,
check_cinn=False,
check_new_ir=True,
):
if hasattr(self, "use_custom_device") and self.use_custom_device:
check_dygraph = False
Expand Down Expand Up @@ -3007,6 +3014,7 @@ def check_grad_with_place(
if (
self.op_type
in new_ir_python_api_grad_white_list.new_ir_python_api_grad_white_list
and check_new_ir
):
if (
type(place) is paddle.fluid.libpaddle.CPUPlace
Expand Down
6 changes: 5 additions & 1 deletion test/legacy_test/test_reduce_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -1180,7 +1180,11 @@ def setUp(self):


def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False):
return paddle._C_ops.sum(x, axis, dtype, keepdim)
if paddle.in_dynamic_mode():
return paddle._C_ops.sum(x, axis, dtype, keepdim)
else:
if paddle.ir.core._use_new_ir_api():
return paddle._ir_ops.sum(x, axis, dtype, keepdim)


class Test8DReduce0(Test1DReduce):
Expand Down
3 changes: 2 additions & 1 deletion test/mkldnn/test_reduce_bf16_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def setUp(self):
self.attrs = {'use_mkldnn': self.use_mkldnn}

def test_check_output(self):
self.check_output(check_dygraph=False)
self.check_output(check_dygraph=False, check_new_ir=False)

def calculate_grads(self):
tmp_tensor = np.zeros(self.x_fp32.shape).astype("float32")
Expand Down Expand Up @@ -84,6 +84,7 @@ def test_check_grad(self):
check_dygraph=False,
user_defined_grads=[self.grad_X],
user_defined_grad_outputs=[convert_float_to_uint16(self.grad_Out)],
check_new_ir=False,
)


Expand Down
4 changes: 2 additions & 2 deletions test/mkldnn/test_reduce_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@ def setUp(self):
self.attrs = {'use_mkldnn': self.use_mkldnn}

def test_check_output(self):
self.check_output(check_dygraph=False)
self.check_output(check_dygraph=False, check_new_ir=False)


class TestReduceDefaultWithGradOneDNNOp(TestReduceSumDefaultOneDNNOp):
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_dygraph=False)
self.check_grad(['X'], 'Out', check_dygraph=False, check_new_ir=False)


class TestReduceSum4DOneDNNOp(TestReduceDefaultWithGradOneDNNOp):
Expand Down
1 change: 1 addition & 0 deletions test/white_list/new_ir_python_api_grad_white_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@

new_ir_python_api_grad_white_list = [
"mean",
"reduce_sum",
]

0 comments on commit e2af9d5

Please sign in to comment.