From b1ec1d54cf5d925a79713293aade913b6767fe7d Mon Sep 17 00:00:00 2001 From: juncaipeng <52520497+juncaipeng@users.noreply.github.com> Date: Thu, 26 Dec 2019 12:11:57 +0800 Subject: [PATCH] Delete check grad for int, collect op for now (#21922) --- python/paddle/fluid/tests/unittests/op_test.py | 4 ++-- .../fluid/tests/unittests/test_bilinear_interp_op.py | 3 +-- .../paddle/fluid/tests/unittests/test_lookup_table_op.py | 4 ---- .../paddle/fluid/tests/unittests/test_nearest_interp_op.py | 3 +-- .../fluid/tests/unittests/test_trilinear_interp_op.py | 3 +-- .../tests/unittests/white_list/op_accuracy_white_list.py | 7 ++----- 6 files changed, 7 insertions(+), 17 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 01dfb0dabcc..ab63ab18abc 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -202,8 +202,8 @@ class OpTest(unittest.TestCase): if not hasattr(cls, "no_need_check_grad") \ and cls.op_type not in op_check_grad_white_list.EMPTY_GRAD_OP_LIST: if cls.dtype is None or \ - (cls.dtype in [np.float16, np.int64, np.int32, np.int16] \ - and cls.op_type not in op_accuracy_white_list.NO_CHECK_GRAD_OP_LIST \ + (cls.dtype == np.float16 \ + and cls.op_type not in op_accuracy_white_list.NO_FP16_CHECK_GRAD_OP_LIST \ and not hasattr(cls, "exist_check_grad")): raise AssertionError("This test of %s op needs check_grad." % cls.op_type) diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py index 393d65cc4b8..f59c8d60782 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest, skip_check_grad_ci +from op_test import OpTest import paddle.fluid.core as core import paddle.fluid as fluid @@ -315,7 +315,6 @@ class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8): self.align_mode = 1 -@skip_check_grad_ci(reason="uint8 type only be used in test and inference.") class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8): def init_test_case(self): self.interp_method = 'bilinear' diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index dc6fa029e4c..0ddcdfd8e10 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -180,7 +180,6 @@ class TestEmbedOpError(unittest.TestCase): fluid.layers.embedding(input=input3, size=(10, 64), dtype='float16') -@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestLookupTableOpInt8(OpTest): def setUp(self): self.op_type = "lookup_table" @@ -200,7 +199,6 @@ class TestLookupTableOpInt8(OpTest): pass -@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestLookupTableOpWithTensorIdsInt8(OpTest): def setUp(self): self.op_type = "lookup_table" @@ -220,7 +218,6 @@ class TestLookupTableOpWithTensorIdsInt8(OpTest): pass -@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8): def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) @@ -235,7 +232,6 @@ class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8): pass -@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestLookupTableOpWithTensorIdsAndPaddingInt8( TestLookupTableOpWithTensorIdsInt8): def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py index 85c854c95a6..c6e05edfaae 100755 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest, skip_check_grad_ci +from op_test import OpTest import paddle.fluid.core as core import paddle.fluid as fluid @@ -278,7 +278,6 @@ class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8): self.align_corners = True -@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8): def init_test_case(self): self.interp_method = 'nearest' diff --git a/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py index 8cfacabf2d8..e8e025ce6a8 100755 --- a/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest, skip_check_grad_ci +from op_test import OpTest import paddle.fluid.core as core import paddle.fluid as fluid @@ -381,7 +381,6 @@ class TestTrilinearInterpCase1Uint8(TestTrilinearInterpOpUint8): self.align_mode = 1 -@skip_check_grad_ci(reason="int8 type only be used in test and inference.") class TestTrilinearInterpCase2Uint8(TestTrilinearInterpOpUint8): def init_test_case(self): self.interp_method = 'trilinear' diff --git a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py index 41512f4954e..ca8821e1431 100644 --- a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py @@ -33,9 +33,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [ 'trilinear_interp', 'var_conv_2d', 'warpctc' ] -NO_CHECK_GRAD_OP_LIST = [ - 'cudnn_lstm', 'elementwise_div', 'elementwise_mul', 'elementwise_pow', - 'expand', 'fused_elemwise_activation', 'increment', 'match_matrix_tensor', - 'mul', 'pool2d', 'pool3d', 'relu', 'sigmoid', 'softmax', 'sqrt', 'tanh', - 'transpose2', 'var_conv_2d' +NO_FP16_CHECK_GRAD_OP_LIST = [ + 'fused_elemwise_activation', 'pool2d', 'pool3d', 'softmax' ] -- GitLab