未验证 提交 feabed13 编写于 作者: J juncaipeng 提交者: GitHub

Modify op test (#21835)

* remove return in op_test teardown, test=develop
* add check_grad for fp64 and other type
* update white list
上级 3e1404d2
......@@ -22,9 +22,13 @@ from paddle.fluid.tests.unittests.test_elementwise_mul_op import *
from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive
from paddle.fluid.tests.unittests.mkldnn.mkldnn_op_test import __assert_close
import paddle.fluid as fluid
from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci
# For UT coverage, integrate conv2d + elementwise-mul so that nchw16C could be automatically chosen when mkldnn-kernel is enabled
@skip_check_grad_ci(
reason="TODO: this test cannot use white list to skip check_grad, need to add check_grad."
)
class TestElementwiseMulMKLDNNOp_Integrated_With_Convs(ElementwiseMulOp):
def setUp(self):
self.dtype = np.float32
......
......@@ -198,33 +198,31 @@ class OpTest(unittest.TestCase):
"This test do not have op_type in class attrs,"
" please set self.__class__.op_type=the_real_op_type manually.")
if hasattr(
get_numeric_gradient, 'check_shape_time'
) and get_numeric_gradient.check_shape_time == 0 and OpTest.op_type not in check_shape_white_list.NOT_CHECK_OP_LIST and OpTest.op_type not in check_shape_white_list.NEED_TO_FIX_OP_LIST:
# case in NO_FP64_CHECK_GRAD_CASES and op in NO_FP64_CHECK_GRAD_OP_LIST should be fixed
if not hasattr(cls, "no_need_check_grad") \
and cls.op_type not in op_check_grad_white_list.EMPTY_GRAD_OP_LIST:
if cls.dtype is None or \
(cls.dtype in [np.float16, np.int64, np.int32, np.int16] \
and cls.op_type not in op_accuracy_white_list.NO_CHECK_GRAD_OP_LIST \
and not hasattr(cls, "exist_check_grad")):
raise AssertionError("This test of %s op needs check_grad." %
cls.op_type)
if cls.dtype in [np.float32, np.float64] \
and cls.op_type not in op_accuracy_white_list.NO_FP64_CHECK_GRAD_OP_LIST \
and not hasattr(cls, 'exist_fp64_check_grad'):
raise AssertionError(
"This test of %s op needs check_grad with fp64 precision." %
cls.op_type)
if hasattr(get_numeric_gradient, 'check_shape_time') \
and get_numeric_gradient.check_shape_time == 0 \
and OpTest.op_type not in check_shape_white_list.NOT_CHECK_OP_LIST \
and OpTest.op_type not in check_shape_white_list.NEED_TO_FIX_OP_LIST:
raise AssertionError(
"At least one input's shape should be large than or equal to 100 for "
+ OpTest.op_type + " Op.")
# cases and ops do no need check_grad
if hasattr(cls, "no_need_check_grad") \
or cls.op_type in op_check_grad_white_list.EMPTY_GRAD_OP_LIST:
return
# In order to pass ci, and case in NO_FP64_CHECK_GRAD_CASES and op in
# NO_FP64_CHECK_GRAD_OP_LIST should be fixed
if cls.op_type in op_accuracy_white_list.NO_FP64_CHECK_GRAD_OP_LIST:
return
if cls.dtype is None or (cls.dtype in [np.float16, np.int64, np.int32, np.int16] \
and not hasattr(cls, "exist_check_grad")):
raise AssertionError("This test of %s op needs check_grad." %
cls.op_type)
if cls.dtype in [np.float32, np.float64] and \
not hasattr(cls, 'exist_fp64_check_grad'):
raise AssertionError("This test of %s op needs fp64 check_grad." %
cls.op_type)
def try_call_once(self, data_type):
if not self.call_once:
self.call_once = True
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid.core as core
import paddle.fluid as fluid
......@@ -315,6 +315,7 @@ class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8):
self.align_mode = 1
@skip_check_grad_ci(reason="uint8 type only be used in test and inference.")
class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8):
def init_test_case(self):
self.interp_method = 'bilinear'
......
......@@ -180,6 +180,7 @@ class TestEmbedOpError(unittest.TestCase):
fluid.layers.embedding(input=input3, size=(10, 64), dtype='float16')
@skip_check_grad_ci(reason="int8 type only be used in test and inference.")
class TestLookupTableOpInt8(OpTest):
def setUp(self):
self.op_type = "lookup_table"
......@@ -199,6 +200,7 @@ class TestLookupTableOpInt8(OpTest):
pass
@skip_check_grad_ci(reason="int8 type only be used in test and inference.")
class TestLookupTableOpWithTensorIdsInt8(OpTest):
def setUp(self):
self.op_type = "lookup_table"
......@@ -218,6 +220,7 @@ class TestLookupTableOpWithTensorIdsInt8(OpTest):
pass
@skip_check_grad_ci(reason="int8 type only be used in test and inference.")
class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8):
def test_check_output(self):
ids = np.squeeze(self.inputs['Ids'])
......@@ -232,6 +235,7 @@ class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8):
pass
@skip_check_grad_ci(reason="int8 type only be used in test and inference.")
class TestLookupTableOpWithTensorIdsAndPaddingInt8(
TestLookupTableOpWithTensorIdsInt8):
def test_check_output(self):
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid.core as core
import paddle.fluid as fluid
......@@ -278,6 +278,7 @@ class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8):
self.align_corners = True
@skip_check_grad_ci(reason="int8 type only be used in test and inference.")
class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8):
def init_test_case(self):
self.interp_method = 'nearest'
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid.core as core
import paddle.fluid as fluid
......@@ -381,6 +381,7 @@ class TestTrilinearInterpCase1Uint8(TestTrilinearInterpOpUint8):
self.align_mode = 1
@skip_check_grad_ci(reason="int8 type only be used in test and inference.")
class TestTrilinearInterpCase2Uint8(TestTrilinearInterpOpUint8):
def init_test_case(self):
self.interp_method = 'trilinear'
......
......@@ -14,40 +14,28 @@
# For op in NO_FP64_CHECK_GRAD_OP_LIST, the op test requires check_grad with fp64 precision
NO_FP64_CHECK_GRAD_OP_LIST = [
'abs', 'acos', 'add_position_encoding', 'affine_grid', 'asin', 'atan',
'bilinear_interp', 'bilinear_tensor_product', 'brelu', 'center_loss',
'clip', 'concat', 'conv2d', 'conv2d_transpose', 'conv3d',
'conv3d_transpose', 'conv_shift', 'cos', 'cos_sim', 'crop', 'crop_tensor',
'cross_entropy', 'cross_entropy2', 'cudnn_lstm', 'cvm', 'data_norm',
'deformable_conv', 'deformable_conv_v1', 'deformable_psroi_pooling',
'depthwise_conv2d', 'depthwise_conv2d_transpose', 'dropout',
'elementwise_add', 'elementwise_div', 'elementwise_max', 'elementwise_min',
'elementwise_mul', 'elementwise_pow', 'elementwise_sub', 'elu', 'exp',
'expand', 'flatten', 'flatten2', 'fused_elemwise_activation',
'fused_embedding_seq_pool', 'gather', 'gather_nd', 'gelu', 'grid_sampler',
'group_norm', 'hard_shrink', 'hard_sigmoid', 'hard_swish',
'hierarchical_sigmoid', 'hinge_loss', 'huber_loss', 'im2sequence',
'increment', 'kldiv_loss', 'l1_norm', 'leaky_relu', 'lod_reset', 'log',
'log_loss', 'logsigmoid', 'lookup_table', 'lookup_table_v2', 'lrn',
'affine_grid', 'clip', 'conv2d', 'conv2d_transpose', 'conv3d',
'conv3d_transpose', 'conv_shift', 'cos_sim', 'cudnn_lstm', 'cvm',
'data_norm', 'deformable_conv', 'deformable_conv_v1',
'deformable_psroi_pooling', 'depthwise_conv2d',
'depthwise_conv2d_transpose', 'dropout', 'elementwise_max',
'fused_elemwise_activation', 'hierarchical_sigmoid', 'hinge_loss',
'huber_loss', 'im2sequence', 'increment', 'l1_norm', 'log_loss', 'lrn',
'margin_rank_loss', 'match_matrix_tensor', 'matmul',
'max_pool2d_with_index', 'max_pool3d_with_index', 'maxout', 'mean', 'minus',
'modified_huber_loss', 'mul', 'multiplex', 'nce', 'nearest_interp', 'pad',
'pad2d', 'pad_constant_like', 'pixel_shuffle', 'pool2d', 'pool3d', 'pow',
'prelu', 'prroi_pool', 'psroi_pool', 'rank_loss', 'reciprocal',
'reduce_max', 'reduce_min', 'relu', 'relu6', 'reshape2', 'reverse',
'roi_align', 'roi_perspective_transform', 'roi_pool', 'row_conv', 'rsqrt',
'scale', 'scatter', 'scatter_nd_add', 'seed', 'selu', 'sequence_concat',
'sequence_conv', 'sequence_expand', 'sequence_expand_as', 'sequence_pad',
'sequence_pool', 'sequence_reshape', 'sequence_reverse', 'sequence_scatter',
'sequence_slice', 'sequence_softmax', 'sequence_topk_avg_pooling',
'sequence_unpad', 'shuffle_channel', 'sigmoid',
'sigmoid_cross_entropy_with_logits', 'sigmoid_focal_loss', 'sign', 'sin',
'slice', 'smooth_l1_loss', 'soft_relu', 'softmax', 'softshrink', 'softsign',
'space_to_depth', 'spectral_norm', 'split', 'spp', 'sqrt', 'square',
'squared_l2_distance', 'squared_l2_norm', 'squeeze', 'squeeze2', 'stack',
'stanh', 'strided_slice', 'swish', 'tanh', 'tanh_shrink',
'teacher_student_sigmoid_loss', 'temporal_shift', 'thresholded_relu',
'transpose2', 'tree_conv', 'trilinear_interp', 'unfold', 'unpool',
'unsqueeze', 'unsqueeze2', 'unstack', 'var_conv_2d', 'warpctc',
'yolov3_loss'
'max_pool2d_with_index', 'max_pool3d_with_index', 'maxout', 'minus',
'modified_huber_loss', 'mul', 'nce', 'pad', 'pad2d', 'pad_constant_like',
'pool2d', 'pool3d', 'prelu', 'prroi_pool', 'rank_loss', 'reduce_max',
'reduce_min', 'relu', 'reshape2', 'roi_perspective_transform', 'row_conv',
'scale', 'scatter', 'sequence_conv', 'sequence_pool', 'sequence_reverse',
'sequence_slice', 'sequence_topk_avg_pooling', 'shuffle_channel', 'sigmoid',
'smooth_l1_loss', 'softmax', 'spectral_norm', 'spp', 'sqrt',
'squared_l2_distance', 'squared_l2_norm', 'tanh', 'transpose2',
'trilinear_interp', 'var_conv_2d', 'warpctc'
]
NO_CHECK_GRAD_OP_LIST = [
'cudnn_lstm', 'elementwise_div', 'elementwise_mul', 'elementwise_pow',
'expand', 'fused_elemwise_activation', 'increment', 'match_matrix_tensor',
'mul', 'pool2d', 'pool3d', 'relu', 'sigmoid', 'softmax', 'sqrt', 'tanh',
'transpose2', 'var_conv_2d'
]
......@@ -51,5 +51,5 @@ EMPTY_GRAD_OP_LIST = [
'retinanet_target_assign', 'rpn_target_assign', 'requantize',
'distribute_fpn_proposals', 'auc', 'quantize', 'positive_negative_pair',
'hash', 'less_than', 'not_equal', 'eye', 'chunk_eval', 'is_empty',
'proximal_gd', 'collect_fpn_proposals', 'unique_with_counts'
'proximal_gd', 'collect_fpn_proposals', 'unique_with_counts', 'seed'
]
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册