未验证 提交 d43e8433 编写于 作者: X xiongkun 提交者: GitHub

[OpTest] Polish optest (#40879)

* 1. add the python api grad 2. add final and intermediate state vlog 3. change the python_api error logic

* add python api or close the check_eager=True

* fix the compatibility

* matmul

* disable unittests: test_elementwise_add_op test_scatter_nd_op test_gather_nd_op test_scatter_op test_index_sample_op test_elementwise_add_mkldnn_op

* refine the logic of prepara_parameter logic

* fix Tensor(gpu) 2 Scalar segment fault.

* add multi-attribute. (test_unsqueeze_op); add python_sig_out for customizing op sig out

* fix some bugs, support python_out_sig
上级 65478332
...@@ -731,12 +731,14 @@ class OpTest(unittest.TestCase): ...@@ -731,12 +731,14 @@ class OpTest(unittest.TestCase):
if name in op_proto_attrs: if name in op_proto_attrs:
return op_proto_attrs[name] return op_proto_attrs[name]
elif name in op_inputs: elif name in op_inputs:
assert op_inputs[name].__len__( if len(op_inputs[name]) == 1:
) == 1, "currently don't support multi-input in attribute." # why don't use numpy().item() : if the Tensor is float64, we will change it to python.float32, where we loss accuracy: [allclose_op]
# why don't use numpy().item() : if the Tensor is float64, we will change it to python.float32, where we loss accuracy: [allclose_op] # why we reconstruct a tensor: because we want the tensor in cpu.
# why we reconstruct a tensor: because we want the tensor in cpu. return paddle.to_tensor(
return paddle.to_tensor( op_inputs[name][0].numpy(), place='cpu')
op_inputs[name][0].numpy(), place='cpu') else:
# if this is a list (test_unsqueeze2_op): we just pass it into the python api.
return op_inputs[name]
else: else:
return Empty() return Empty()
...@@ -786,6 +788,8 @@ class OpTest(unittest.TestCase): ...@@ -786,6 +788,8 @@ class OpTest(unittest.TestCase):
return results return results
def construct_output_dict_by_kernel_sig(ret_tuple, output_sig): def construct_output_dict_by_kernel_sig(ret_tuple, output_sig):
if hasattr(self, "python_out_sig"):
output_sig = self.python_out_sig
if not isinstance(ret_tuple, (tuple, list)): if not isinstance(ret_tuple, (tuple, list)):
ret_tuple = [ret_tuple] ret_tuple = [ret_tuple]
if len(output_sig) == len(ret_tuple): if len(output_sig) == len(ret_tuple):
...@@ -795,7 +799,7 @@ class OpTest(unittest.TestCase): ...@@ -795,7 +799,7 @@ class OpTest(unittest.TestCase):
# [assumption]: return multi-Tensor in a single output. such as paddle.split() # [assumption]: return multi-Tensor in a single output. such as paddle.split()
assert len( assert len(
output_sig output_sig
) == 1, "Don't support multi-output with multi-tensor output." ) == 1, "Don't support multi-output with multi-tensor output. (May be you can use set `python_out_sig`, see `test_squeeze2_op` as a example.)"
return {output_sig[0]: ret_tuple} return {output_sig[0]: ret_tuple}
def assumption_assert_and_transform(args, inp_num): def assumption_assert_and_transform(args, inp_num):
...@@ -825,6 +829,9 @@ class OpTest(unittest.TestCase): ...@@ -825,6 +829,9 @@ class OpTest(unittest.TestCase):
""" we think the kernel_sig is missing. """ we think the kernel_sig is missing.
""" """
kernel_sig = None kernel_sig = None
print(
"[Warning: op_test.py] Kernel Signature is not found for %s, fall back to intermediate state."
% self.op_type)
return kernel_sig return kernel_sig
def cal_python_api(python_api, args, kernel_sig): def cal_python_api(python_api, args, kernel_sig):
...@@ -1942,15 +1949,17 @@ class OpTest(unittest.TestCase): ...@@ -1942,15 +1949,17 @@ class OpTest(unittest.TestCase):
attrs_outputs[attrs_name] = self.attrs[attrs_name] attrs_outputs[attrs_name] = self.attrs[attrs_name]
if check_eager: if check_eager:
outputs = self._calc_python_api_output(place, inputs, outputs) eager_outputs = self._calc_python_api_output(place, inputs,
outputs)
# if outputs is None, kernel sig is empty or other error is happens. # if outputs is None, kernel sig is empty or other error is happens.
if not check_eager or outputs is None: if not check_eager or eager_outputs is None:
block.append_op( block.append_op(
type=self.op_type, type=self.op_type,
inputs=inputs, inputs=inputs,
outputs=outputs, outputs=outputs,
attrs=attrs_outputs if hasattr(self, "attrs") else None) attrs=attrs_outputs if hasattr(self, "attrs") else None)
else:
outputs = eager_outputs
if self.dtype == np.uint16: if self.dtype == np.uint16:
cast_inputs = self._find_var_in_dygraph(outputs, cast_inputs = self._find_var_in_dygraph(outputs,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册