From 538c8489581b0d293b11ae09d2f2d136d0df8669 Mon Sep 17 00:00:00 2001 From: Zhang Ting <709968123@qq.com> Date: Fri, 20 Dec 2019 10:51:10 +0800 Subject: [PATCH] add decorator skip_check_grad_ci (#21836) --- .../paddle/fluid/tests/unittests/op_test.py | 26 +++++++++++++++- .../fluid/tests/unittests/test_dropout_op.py | 8 ++++- ..._executor_return_tensor_not_overwriting.py | 3 +- .../unittests/test_fused_emb_seq_pool_op.py | 4 ++- .../tests/unittests/test_lookup_table_op.py | 20 ++++++------ .../unittests/test_lookup_table_v2_op.py | 20 ++++++------ .../fluid/tests/unittests/test_seq_pool.py | 6 ++-- .../white_list/op_check_grad_white_list.py | 31 ------------------- 8 files changed, 59 insertions(+), 59 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 666824a87d..cc2a54b95e 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -146,6 +146,30 @@ def get_numeric_gradient(place, return gradient_flat.reshape(tensor_to_check.shape()) +def skip_check_grad_ci(reason=None): + """Decorator to skip check_grad CI. + + Check_grad is required for Op test cases. However, there are some special + cases that do not need to do check_grad. This decorator is used to skip the + check_grad of the above cases. + + Note: the execution of unit test will not be skipped. It just avoids check_grad + checking in tearDownClass method by setting a `no_need_check_grad` flag. + + Example: + @skip_check_grad_ci(reason="For inference, check_grad is not required.") + class TestInference(OpTest): + """ + if not isinstance(reason, str): + raise AssertionError("The reason for skipping check_grad is required.") + + def wrapper(cls): + cls.no_need_check_grad = True + return cls + + return wrapper + + class OpTest(unittest.TestCase): @classmethod def setUpClass(cls): @@ -182,7 +206,7 @@ class OpTest(unittest.TestCase): + OpTest.op_type + " Op.") # cases and ops do no need check_grad - if cls.__name__ in op_check_grad_white_list.NO_NEED_CHECK_GRAD_CASES \ + if hasattr(cls, "no_need_check_grad") \ or cls.op_type in op_check_grad_white_list.EMPTY_GRAD_OP_LIST: return diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index cf66e703c8..cc3910d1b0 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -17,7 +17,7 @@ from __future__ import print_function import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -61,6 +61,7 @@ class TestDropoutOp3(TestDropoutOp): } +@skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOp4(OpTest): def setUp(self): self.op_type = "dropout" @@ -74,6 +75,7 @@ class TestDropoutOp4(OpTest): self.check_output() +@skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOp5(OpTest): def setUp(self): self.op_type = "dropout" @@ -119,6 +121,7 @@ class TestDropoutOp7(TestDropoutOp): } +@skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOp8(OpTest): def setUp(self): self.op_type = "dropout" @@ -135,6 +138,7 @@ class TestDropoutOp8(OpTest): self.check_output() +@skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOp9(OpTest): def setUp(self): self.op_type = "dropout" @@ -174,6 +178,7 @@ class TestDropoutOpWithSeed(OpTest): @unittest.skipIf( not core.is_compiled_with_cuda() or not core.op_support_gpu("dropout"), "core is not compiled with CUDA or core is not support dropout") +@skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestFP16DropoutOp(OpTest): def setUp(self): self.op_type = "dropout" @@ -201,6 +206,7 @@ class TestFP16DropoutOp(OpTest): @unittest.skipIf( not core.is_compiled_with_cuda() or not core.op_support_gpu("dropout"), "core is not compiled with CUDA or core is not support dropout") +@skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestFP16DropoutOp2(TestFP16DropoutOp): def init_test_case(self): self.input_size = [32, 64, 3] diff --git a/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py b/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py index 3504081e01..a7ee6b31b0 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py +++ b/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py @@ -17,9 +17,10 @@ import unittest import numpy as np import paddle.fluid.core as core import paddle.fluid as fluid -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci +@skip_check_grad_ci(reason="Not op test but call the method of class OpTest.") class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest): def setUp(self): pass diff --git a/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py b/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py index 0095d438aa..b7ebfc6b9f 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py @@ -17,7 +17,7 @@ from __future__ import print_function import unittest import platform import numpy as np -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid.op import Operator @@ -25,6 +25,8 @@ import paddle.compat as cpt import paddle.version as ver +@skip_check_grad_ci(reason="check_grad is called when ver.mkl() == ON" + "and 'Linux' in platform.platform().") class TestFusedEmbeddingSeqPoolOp(OpTest): def setUp(self): self.op_type = "fused_embedding_seq_pool" diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index 0c292170fc..0ddcdfd8e1 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci import paddle.fluid.core as core from paddle.fluid.op import Operator import paddle.compat as cpt @@ -56,6 +56,10 @@ class TestLookupTableOpWithTensorIds(OpTest): self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) +@skip_check_grad_ci( + reason="Since paddings are not trainable and fixed in forward," + "the gradient of paddings makes no sense and we don't " + "test the gradient here.") class TestLookupTableOpWithPadding(TestLookupTableOp): def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) @@ -64,12 +68,11 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): self.attrs = {'padding_idx': int(padding_idx)} self.check_output() - def test_check_grad(self): - # Since paddings are not trainable and fixed in forward, the gradient of - # paddings makes no sense and we don't test the gradient here. - pass - +@skip_check_grad_ci( + reason="Since paddings are not trainable and fixed in forward," + "the gradient of paddings makes no sense and we don't " + "test the gradient here.") class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): def test_check_output(self): ids = self.inputs['Ids'] @@ -79,11 +82,6 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): self.attrs = {'padding_idx': cpt.long_type(padding_idx)} self.check_output() - def test_check_grad(self): - # Since paddings are not trainable and fixed in forward, the gradient of - # paddings makes no sense and we don't test the gradient here. - pass - class TestLookupTableWIsSelectedRows(unittest.TestCase): def prepare_ids(self, scope, place): diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py index 0fcd6d0afe..9c026f0482 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid.op import Operator @@ -55,6 +55,10 @@ class TestLookupTableOpWithTensorIds(OpTest): self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) +@skip_check_grad_ci( + reason="Since paddings are not trainable and fixed in forward," + "the gradient of paddings makes no sense and we don't " + "test the gradient here.") class TestLookupTableOpWithPadding(TestLookupTableOp): def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) @@ -63,12 +67,11 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): self.attrs = {'padding_idx': int(padding_idx)} self.check_output() - def test_check_grad(self): - # Since paddings are not trainable and fixed in forward, the gradient of - # paddings makes no sense and we don't test the gradient here. - pass - +@skip_check_grad_ci( + reason="Since paddings are not trainable and fixed in forward," + "the gradient of paddings makes no sense and we don't " + "test the gradient here.") class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): def test_check_output(self): ids = self.inputs['Ids'] @@ -78,11 +81,6 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): self.attrs = {'padding_idx': cpt.long_type(padding_idx)} self.check_output() - def test_check_grad(self): - # Since paddings are not trainable and fixed in forward, the gradient of - # paddings makes no sense and we don't test the gradient here. - pass - class TestLookupTableWIsSelectedRows(unittest.TestCase): def prepare_ids(self, scope, place): diff --git a/python/paddle/fluid/tests/unittests/test_seq_pool.py b/python/paddle/fluid/tests/unittests/test_seq_pool.py index 7591a1b313..9087b9cc01 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_pool.py +++ b/python/paddle/fluid/tests/unittests/test_seq_pool.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci from test_reorder_lod_tensor import convert_to_offset @@ -355,6 +355,8 @@ class TestSeqMaxPool2DLen0LoDLevel2(TestSeqMaxPool2D): return [[1, 0, 2, 2], [0, 3, 0, 10, 0]] +@skip_check_grad_ci(reason="Grad computation does not apply to Sequence MAX " + "Pool executed when is_test is true.") class TestSeqMaxPool2DInference(TestSeqMaxPool2D): def compute(self, x, offset, out): self.attrs = {"pad_value": 1.0, 'pooltype': "MAX", 'is_test': True} @@ -368,7 +370,7 @@ class TestSeqMaxPool2DInference(TestSeqMaxPool2D): out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 11)) def test_check_grad(self): - """Grad computation does not apply to Sequence MAX + """Grad computation does not apply to Sequence MAX Pool executed when is_test is true """ return diff --git a/python/paddle/fluid/tests/unittests/white_list/op_check_grad_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_check_grad_white_list.py index 869235eca9..389c304759 100644 --- a/python/paddle/fluid/tests/unittests/white_list/op_check_grad_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/op_check_grad_white_list.py @@ -53,34 +53,3 @@ EMPTY_GRAD_OP_LIST = [ 'hash', 'less_than', 'not_equal', 'eye', 'chunk_eval', 'is_empty', 'proximal_gd', 'collect_fpn_proposals', 'unique_with_counts' ] - -# Special cases do not need to check grad -NO_NEED_CHECK_GRAD_CASES = [ - 'TestLookupTableOpWithPadding', - 'TestLookupTableOpWithTensorIdsAndPadding', - 'TestLookupTableOpWithPadding', - 'TestLookupTableOpWithTensorIdsAndPadding', - 'TestSeqMaxPool2DInference', - 'TestSeqMaxPool2DInferenceLen0', - 'TestSeqMaxPool2DInferenceLen0LoDLevel2', - 'TestDropoutOp4', - 'TestDropoutOp5', - 'TestDropoutOp8', - 'TestDropoutOp9', - 'TestFP16DropoutOp', - 'TestFP16DropoutOp2', - 'TestExpandOpBoolean', - 'TestFusedEmbeddingSeqPoolOp', - 'TestMKLDNNConcatOp', - 'TestMKLDNNConcatOp', - 'TestMKLDNNConcatOp3', - 'TestElementwiseMulMKLDNNOp_Integrated_With_Convs', - 'TestConv2dTransposeMKLDNNOp', - 'TestMKLDNNFuseBias', - 'TestMKLDNNWithPad', - 'TestMKLDNNWithStride', - 'TestMKLDNNWithAsymPad', - 'TestMKLDNNWithSamePad', - 'TestMKLDNNWithValidPad', - 'TestMKLDNNWithValidPad_NHWC', -] -- GitLab