diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py index f8dd1011af2a58a7f2fba53525ff8cb762c88556..b89b3adce3f15bd9908b73a95ed56acab80788b5 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py @@ -22,9 +22,13 @@ from paddle.fluid.tests.unittests.test_elementwise_mul_op import * from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive from paddle.fluid.tests.unittests.mkldnn.mkldnn_op_test import __assert_close import paddle.fluid as fluid +from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci # For UT coverage, integrate conv2d + elementwise-mul so that nchw16C could be automatically chosen when mkldnn-kernel is enabled +@skip_check_grad_ci( + reason="TODO: this test cannot use white list to skip check_grad, need to add check_grad." +) class TestElementwiseMulMKLDNNOp_Integrated_With_Convs(ElementwiseMulOp): def setUp(self): self.dtype = np.float32 diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index a54c8998ef06cedd3e76d247cc0e2e95eae2f46b..01dfb0dabccbbee19ed092aa27887298c0cc690a 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -198,33 +198,31 @@ class OpTest(unittest.TestCase): "This test do not have op_type in class attrs," " please set self.__class__.op_type=the_real_op_type manually.") - if hasattr( - get_numeric_gradient, 'check_shape_time' - ) and get_numeric_gradient.check_shape_time == 0 and OpTest.op_type not in check_shape_white_list.NOT_CHECK_OP_LIST and OpTest.op_type not in check_shape_white_list.NEED_TO_FIX_OP_LIST: + # case in NO_FP64_CHECK_GRAD_CASES and op in NO_FP64_CHECK_GRAD_OP_LIST should be fixed + if not hasattr(cls, "no_need_check_grad") \ + and cls.op_type not in op_check_grad_white_list.EMPTY_GRAD_OP_LIST: + if cls.dtype is None or \ + (cls.dtype in [np.float16, np.int64, np.int32, np.int16] \ + and cls.op_type not in op_accuracy_white_list.NO_CHECK_GRAD_OP_LIST \ + and not hasattr(cls, "exist_check_grad")): + raise AssertionError("This test of %s op needs check_grad." % + cls.op_type) + + if cls.dtype in [np.float32, np.float64] \ + and cls.op_type not in op_accuracy_white_list.NO_FP64_CHECK_GRAD_OP_LIST \ + and not hasattr(cls, 'exist_fp64_check_grad'): + raise AssertionError( + "This test of %s op needs check_grad with fp64 precision." % + cls.op_type) + + if hasattr(get_numeric_gradient, 'check_shape_time') \ + and get_numeric_gradient.check_shape_time == 0 \ + and OpTest.op_type not in check_shape_white_list.NOT_CHECK_OP_LIST \ + and OpTest.op_type not in check_shape_white_list.NEED_TO_FIX_OP_LIST: raise AssertionError( "At least one input's shape should be large than or equal to 100 for " + OpTest.op_type + " Op.") - # cases and ops do no need check_grad - if hasattr(cls, "no_need_check_grad") \ - or cls.op_type in op_check_grad_white_list.EMPTY_GRAD_OP_LIST: - return - - # In order to pass ci, and case in NO_FP64_CHECK_GRAD_CASES and op in - # NO_FP64_CHECK_GRAD_OP_LIST should be fixed - if cls.op_type in op_accuracy_white_list.NO_FP64_CHECK_GRAD_OP_LIST: - return - - if cls.dtype is None or (cls.dtype in [np.float16, np.int64, np.int32, np.int16] \ - and not hasattr(cls, "exist_check_grad")): - raise AssertionError("This test of %s op needs check_grad." % - cls.op_type) - - if cls.dtype in [np.float32, np.float64] and \ - not hasattr(cls, 'exist_fp64_check_grad'): - raise AssertionError("This test of %s op needs fp64 check_grad." % - cls.op_type) - def try_call_once(self, data_type): if not self.call_once: self.call_once = True diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py index f59c8d607829299a862a16a1ba489e81a11ab766..393d65cc4b8278ba77ed712b81e46ccc65a67dc0 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci import paddle.fluid.core as core import paddle.fluid as fluid @@ -315,6 +315,7 @@ class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8): self.align_mode = 1 +@skip_check_grad_ci(reason="uint8 type only be used in test and inference.") class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8): def init_test_case(self): self.interp_method = 'bilinear' diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index 0ddcdfd8e101ef050a8c2e477e9ebb54d2009f52..dc6fa029e4c65d68ef2b82bfcd59cadd2e406707 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -180,6 +180,7 @@ class TestEmbedOpError(unittest.TestCase): fluid.layers.embedding(input=input3, size=(10, 64), dtype='float16') +@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestLookupTableOpInt8(OpTest): def setUp(self): self.op_type = "lookup_table" @@ -199,6 +200,7 @@ class TestLookupTableOpInt8(OpTest): pass +@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestLookupTableOpWithTensorIdsInt8(OpTest): def setUp(self): self.op_type = "lookup_table" @@ -218,6 +220,7 @@ class TestLookupTableOpWithTensorIdsInt8(OpTest): pass +@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8): def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) @@ -232,6 +235,7 @@ class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8): pass +@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestLookupTableOpWithTensorIdsAndPaddingInt8( TestLookupTableOpWithTensorIdsInt8): def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py index c6e05edfaaec216627225cd85b472ef5bda4dee7..85c854c95a642b6d78567422e5a20862f4b96096 100755 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci import paddle.fluid.core as core import paddle.fluid as fluid @@ -278,6 +278,7 @@ class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8): self.align_corners = True +@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8): def init_test_case(self): self.interp_method = 'nearest' diff --git a/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py index e8e025ce6a8c6642736102297589c1f304aeea27..8cfacabf2d846eaa2098a4b52270f8eeda420f44 100755 --- a/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci import paddle.fluid.core as core import paddle.fluid as fluid @@ -381,6 +381,7 @@ class TestTrilinearInterpCase1Uint8(TestTrilinearInterpOpUint8): self.align_mode = 1 +@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestTrilinearInterpCase2Uint8(TestTrilinearInterpOpUint8): def init_test_case(self): self.interp_method = 'trilinear' diff --git a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py index e2ef4f52b90c768b023a59e778981364c1399585..41512f4954e2beffa51bffa008a8a6a76c2aaa6e 100644 --- a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py @@ -14,40 +14,28 @@ # For op in NO_FP64_CHECK_GRAD_OP_LIST, the op test requires check_grad with fp64 precision NO_FP64_CHECK_GRAD_OP_LIST = [ - 'abs', 'acos', 'add_position_encoding', 'affine_grid', 'asin', 'atan', - 'bilinear_interp', 'bilinear_tensor_product', 'brelu', 'center_loss', - 'clip', 'concat', 'conv2d', 'conv2d_transpose', 'conv3d', - 'conv3d_transpose', 'conv_shift', 'cos', 'cos_sim', 'crop', 'crop_tensor', - 'cross_entropy', 'cross_entropy2', 'cudnn_lstm', 'cvm', 'data_norm', - 'deformable_conv', 'deformable_conv_v1', 'deformable_psroi_pooling', - 'depthwise_conv2d', 'depthwise_conv2d_transpose', 'dropout', - 'elementwise_add', 'elementwise_div', 'elementwise_max', 'elementwise_min', - 'elementwise_mul', 'elementwise_pow', 'elementwise_sub', 'elu', 'exp', - 'expand', 'flatten', 'flatten2', 'fused_elemwise_activation', - 'fused_embedding_seq_pool', 'gather', 'gather_nd', 'gelu', 'grid_sampler', - 'group_norm', 'hard_shrink', 'hard_sigmoid', 'hard_swish', - 'hierarchical_sigmoid', 'hinge_loss', 'huber_loss', 'im2sequence', - 'increment', 'kldiv_loss', 'l1_norm', 'leaky_relu', 'lod_reset', 'log', - 'log_loss', 'logsigmoid', 'lookup_table', 'lookup_table_v2', 'lrn', + 'affine_grid', 'clip', 'conv2d', 'conv2d_transpose', 'conv3d', + 'conv3d_transpose', 'conv_shift', 'cos_sim', 'cudnn_lstm', 'cvm', + 'data_norm', 'deformable_conv', 'deformable_conv_v1', + 'deformable_psroi_pooling', 'depthwise_conv2d', + 'depthwise_conv2d_transpose', 'dropout', 'elementwise_max', + 'fused_elemwise_activation', 'hierarchical_sigmoid', 'hinge_loss', + 'huber_loss', 'im2sequence', 'increment', 'l1_norm', 'log_loss', 'lrn', 'margin_rank_loss', 'match_matrix_tensor', 'matmul', - 'max_pool2d_with_index', 'max_pool3d_with_index', 'maxout', 'mean', 'minus', - 'modified_huber_loss', 'mul', 'multiplex', 'nce', 'nearest_interp', 'pad', - 'pad2d', 'pad_constant_like', 'pixel_shuffle', 'pool2d', 'pool3d', 'pow', - 'prelu', 'prroi_pool', 'psroi_pool', 'rank_loss', 'reciprocal', - 'reduce_max', 'reduce_min', 'relu', 'relu6', 'reshape2', 'reverse', - 'roi_align', 'roi_perspective_transform', 'roi_pool', 'row_conv', 'rsqrt', - 'scale', 'scatter', 'scatter_nd_add', 'seed', 'selu', 'sequence_concat', - 'sequence_conv', 'sequence_expand', 'sequence_expand_as', 'sequence_pad', - 'sequence_pool', 'sequence_reshape', 'sequence_reverse', 'sequence_scatter', - 'sequence_slice', 'sequence_softmax', 'sequence_topk_avg_pooling', - 'sequence_unpad', 'shuffle_channel', 'sigmoid', - 'sigmoid_cross_entropy_with_logits', 'sigmoid_focal_loss', 'sign', 'sin', - 'slice', 'smooth_l1_loss', 'soft_relu', 'softmax', 'softshrink', 'softsign', - 'space_to_depth', 'spectral_norm', 'split', 'spp', 'sqrt', 'square', - 'squared_l2_distance', 'squared_l2_norm', 'squeeze', 'squeeze2', 'stack', - 'stanh', 'strided_slice', 'swish', 'tanh', 'tanh_shrink', - 'teacher_student_sigmoid_loss', 'temporal_shift', 'thresholded_relu', - 'transpose2', 'tree_conv', 'trilinear_interp', 'unfold', 'unpool', - 'unsqueeze', 'unsqueeze2', 'unstack', 'var_conv_2d', 'warpctc', - 'yolov3_loss' + 'max_pool2d_with_index', 'max_pool3d_with_index', 'maxout', 'minus', + 'modified_huber_loss', 'mul', 'nce', 'pad', 'pad2d', 'pad_constant_like', + 'pool2d', 'pool3d', 'prelu', 'prroi_pool', 'rank_loss', 'reduce_max', + 'reduce_min', 'relu', 'reshape2', 'roi_perspective_transform', 'row_conv', + 'scale', 'scatter', 'sequence_conv', 'sequence_pool', 'sequence_reverse', + 'sequence_slice', 'sequence_topk_avg_pooling', 'shuffle_channel', 'sigmoid', + 'smooth_l1_loss', 'softmax', 'spectral_norm', 'spp', 'sqrt', + 'squared_l2_distance', 'squared_l2_norm', 'tanh', 'transpose2', + 'trilinear_interp', 'var_conv_2d', 'warpctc' +] + +NO_CHECK_GRAD_OP_LIST = [ + 'cudnn_lstm', 'elementwise_div', 'elementwise_mul', 'elementwise_pow', + 'expand', 'fused_elemwise_activation', 'increment', 'match_matrix_tensor', + 'mul', 'pool2d', 'pool3d', 'relu', 'sigmoid', 'softmax', 'sqrt', 'tanh', + 'transpose2', 'var_conv_2d' ] diff --git a/python/paddle/fluid/tests/unittests/white_list/op_check_grad_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_check_grad_white_list.py index 389c304759aff4829d41a1d4aa2cd2c84d064a1d..43775950d232a705936ae099b043262c4c25ce3b 100644 --- a/python/paddle/fluid/tests/unittests/white_list/op_check_grad_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/op_check_grad_white_list.py @@ -51,5 +51,5 @@ EMPTY_GRAD_OP_LIST = [ 'retinanet_target_assign', 'rpn_target_assign', 'requantize', 'distribute_fpn_proposals', 'auc', 'quantize', 'positive_negative_pair', 'hash', 'less_than', 'not_equal', 'eye', 'chunk_eval', 'is_empty', - 'proximal_gd', 'collect_fpn_proposals', 'unique_with_counts' + 'proximal_gd', 'collect_fpn_proposals', 'unique_with_counts', 'seed' ]