Skip to content

Commit

Permalink
[OpTest] Polish optest (#40879)
Browse files Browse the repository at this point in the history
* 1. add the python api grad 2. add final and intermediate state vlog 3. change the python_api error logic

* add python api or close the check_eager=True

* fix the compatibility

* matmul

* disable unittests: test_elementwise_add_op test_scatter_nd_op test_gather_nd_op test_scatter_op test_index_sample_op test_elementwise_add_mkldnn_op

* refine the logic of prepara_parameter logic

* fix Tensor(gpu) 2 Scalar segment fault.

* add multi-attribute. (test_unsqueeze_op); add python_sig_out for customizing op sig out

* fix some bugs, support python_out_sig
  • Loading branch information
2742195759 authored Mar 25, 2022
1 parent 6547833 commit d43e843
Showing 1 changed file with 19 additions and 10 deletions.
29 changes: 19 additions & 10 deletions python/paddle/fluid/tests/unittests/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -731,12 +731,14 @@ def parse_attri_value(name, op_inputs, op_attrs):
if name in op_proto_attrs:
return op_proto_attrs[name]
elif name in op_inputs:
assert op_inputs[name].__len__(
) == 1, "currently don't support multi-input in attribute."
# why don't use numpy().item() : if the Tensor is float64, we will change it to python.float32, where we loss accuracy: [allclose_op]
# why we reconstruct a tensor: because we want the tensor in cpu.
return paddle.to_tensor(
op_inputs[name][0].numpy(), place='cpu')
if len(op_inputs[name]) == 1:
# why don't use numpy().item() : if the Tensor is float64, we will change it to python.float32, where we loss accuracy: [allclose_op]
# why we reconstruct a tensor: because we want the tensor in cpu.
return paddle.to_tensor(
op_inputs[name][0].numpy(), place='cpu')
else:
# if this is a list (test_unsqueeze2_op): we just pass it into the python api.
return op_inputs[name]
else:
return Empty()

Expand Down Expand Up @@ -786,6 +788,8 @@ def parse_attri_value(name, op_inputs, op_attrs):
return results

def construct_output_dict_by_kernel_sig(ret_tuple, output_sig):
if hasattr(self, "python_out_sig"):
output_sig = self.python_out_sig
if not isinstance(ret_tuple, (tuple, list)):
ret_tuple = [ret_tuple]
if len(output_sig) == len(ret_tuple):
Expand All @@ -795,7 +799,7 @@ def construct_output_dict_by_kernel_sig(ret_tuple, output_sig):
# [assumption]: return multi-Tensor in a single output. such as paddle.split()
assert len(
output_sig
) == 1, "Don't support multi-output with multi-tensor output."
) == 1, "Don't support multi-output with multi-tensor output. (May be you can use set `python_out_sig`, see `test_squeeze2_op` as a example.)"
return {output_sig[0]: ret_tuple}

def assumption_assert_and_transform(args, inp_num):
Expand Down Expand Up @@ -825,6 +829,9 @@ def _get_kernel_signature(eager_tensor_inputs, eager_tensor_outputs,
""" we think the kernel_sig is missing.
"""
kernel_sig = None
print(
"[Warning: op_test.py] Kernel Signature is not found for %s, fall back to intermediate state."
% self.op_type)
return kernel_sig

def cal_python_api(python_api, args, kernel_sig):
Expand Down Expand Up @@ -1942,15 +1949,17 @@ def _get_dygraph_grad(self,
attrs_outputs[attrs_name] = self.attrs[attrs_name]

if check_eager:
outputs = self._calc_python_api_output(place, inputs, outputs)

eager_outputs = self._calc_python_api_output(place, inputs,
outputs)
# if outputs is None, kernel sig is empty or other error is happens.
if not check_eager or outputs is None:
if not check_eager or eager_outputs is None:
block.append_op(
type=self.op_type,
inputs=inputs,
outputs=outputs,
attrs=attrs_outputs if hasattr(self, "attrs") else None)
else:
outputs = eager_outputs

if self.dtype == np.uint16:
cast_inputs = self._find_var_in_dygraph(outputs,
Expand Down

0 comments on commit d43e843

Please sign in to comment.