提交 538c8489 编写于 作者: Z Zhang Ting 提交者: Tao Luo

add decorator skip_check_grad_ci (#21836)

上级 bf9c5de7
......@@ -146,6 +146,30 @@ def get_numeric_gradient(place,
return gradient_flat.reshape(tensor_to_check.shape())
def skip_check_grad_ci(reason=None):
"""Decorator to skip check_grad CI.
Check_grad is required for Op test cases. However, there are some special
cases that do not need to do check_grad. This decorator is used to skip the
check_grad of the above cases.
Note: the execution of unit test will not be skipped. It just avoids check_grad
checking in tearDownClass method by setting a `no_need_check_grad` flag.
Example:
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestInference(OpTest):
"""
if not isinstance(reason, str):
raise AssertionError("The reason for skipping check_grad is required.")
def wrapper(cls):
cls.no_need_check_grad = True
return cls
return wrapper
class OpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
......@@ -182,7 +206,7 @@ class OpTest(unittest.TestCase):
+ OpTest.op_type + " Op.")
# cases and ops do no need check_grad
if cls.__name__ in op_check_grad_white_list.NO_NEED_CHECK_GRAD_CASES \
if hasattr(cls, "no_need_check_grad") \
or cls.op_type in op_check_grad_white_list.EMPTY_GRAD_OP_LIST:
return
......
......@@ -17,7 +17,7 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
......@@ -61,6 +61,7 @@ class TestDropoutOp3(TestDropoutOp):
}
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestDropoutOp4(OpTest):
def setUp(self):
self.op_type = "dropout"
......@@ -74,6 +75,7 @@ class TestDropoutOp4(OpTest):
self.check_output()
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestDropoutOp5(OpTest):
def setUp(self):
self.op_type = "dropout"
......@@ -119,6 +121,7 @@ class TestDropoutOp7(TestDropoutOp):
}
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestDropoutOp8(OpTest):
def setUp(self):
self.op_type = "dropout"
......@@ -135,6 +138,7 @@ class TestDropoutOp8(OpTest):
self.check_output()
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestDropoutOp9(OpTest):
def setUp(self):
self.op_type = "dropout"
......@@ -174,6 +178,7 @@ class TestDropoutOpWithSeed(OpTest):
@unittest.skipIf(
not core.is_compiled_with_cuda() or not core.op_support_gpu("dropout"),
"core is not compiled with CUDA or core is not support dropout")
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestFP16DropoutOp(OpTest):
def setUp(self):
self.op_type = "dropout"
......@@ -201,6 +206,7 @@ class TestFP16DropoutOp(OpTest):
@unittest.skipIf(
not core.is_compiled_with_cuda() or not core.op_support_gpu("dropout"),
"core is not compiled with CUDA or core is not support dropout")
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestFP16DropoutOp2(TestFP16DropoutOp):
def init_test_case(self):
self.input_size = [32, 64, 3]
......
......@@ -17,9 +17,10 @@ import unittest
import numpy as np
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
@skip_check_grad_ci(reason="Not op test but call the method of class OpTest.")
class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest):
def setUp(self):
pass
......
......@@ -17,7 +17,7 @@ from __future__ import print_function
import unittest
import platform
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.op import Operator
......@@ -25,6 +25,8 @@ import paddle.compat as cpt
import paddle.version as ver
@skip_check_grad_ci(reason="check_grad is called when ver.mkl() == ON"
"and 'Linux' in platform.platform().")
class TestFusedEmbeddingSeqPoolOp(OpTest):
def setUp(self):
self.op_type = "fused_embedding_seq_pool"
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.compat as cpt
......@@ -56,6 +56,10 @@ class TestLookupTableOpWithTensorIds(OpTest):
self.check_grad(['W'], 'Out', no_grad_set=set('Ids'))
@skip_check_grad_ci(
reason="Since paddings are not trainable and fixed in forward,"
"the gradient of paddings makes no sense and we don't "
"test the gradient here.")
class TestLookupTableOpWithPadding(TestLookupTableOp):
def test_check_output(self):
ids = np.squeeze(self.inputs['Ids'])
......@@ -64,12 +68,11 @@ class TestLookupTableOpWithPadding(TestLookupTableOp):
self.attrs = {'padding_idx': int(padding_idx)}
self.check_output()
def test_check_grad(self):
# Since paddings are not trainable and fixed in forward, the gradient of
# paddings makes no sense and we don't test the gradient here.
pass
@skip_check_grad_ci(
reason="Since paddings are not trainable and fixed in forward,"
"the gradient of paddings makes no sense and we don't "
"test the gradient here.")
class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds):
def test_check_output(self):
ids = self.inputs['Ids']
......@@ -79,11 +82,6 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds):
self.attrs = {'padding_idx': cpt.long_type(padding_idx)}
self.check_output()
def test_check_grad(self):
# Since paddings are not trainable and fixed in forward, the gradient of
# paddings makes no sense and we don't test the gradient here.
pass
class TestLookupTableWIsSelectedRows(unittest.TestCase):
def prepare_ids(self, scope, place):
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.op import Operator
......@@ -55,6 +55,10 @@ class TestLookupTableOpWithTensorIds(OpTest):
self.check_grad(['W'], 'Out', no_grad_set=set('Ids'))
@skip_check_grad_ci(
reason="Since paddings are not trainable and fixed in forward,"
"the gradient of paddings makes no sense and we don't "
"test the gradient here.")
class TestLookupTableOpWithPadding(TestLookupTableOp):
def test_check_output(self):
ids = np.squeeze(self.inputs['Ids'])
......@@ -63,12 +67,11 @@ class TestLookupTableOpWithPadding(TestLookupTableOp):
self.attrs = {'padding_idx': int(padding_idx)}
self.check_output()
def test_check_grad(self):
# Since paddings are not trainable and fixed in forward, the gradient of
# paddings makes no sense and we don't test the gradient here.
pass
@skip_check_grad_ci(
reason="Since paddings are not trainable and fixed in forward,"
"the gradient of paddings makes no sense and we don't "
"test the gradient here.")
class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds):
def test_check_output(self):
ids = self.inputs['Ids']
......@@ -78,11 +81,6 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds):
self.attrs = {'padding_idx': cpt.long_type(padding_idx)}
self.check_output()
def test_check_grad(self):
# Since paddings are not trainable and fixed in forward, the gradient of
# paddings makes no sense and we don't test the gradient here.
pass
class TestLookupTableWIsSelectedRows(unittest.TestCase):
def prepare_ids(self, scope, place):
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
from test_reorder_lod_tensor import convert_to_offset
......@@ -355,6 +355,8 @@ class TestSeqMaxPool2DLen0LoDLevel2(TestSeqMaxPool2D):
return [[1, 0, 2, 2], [0, 3, 0, 10, 0]]
@skip_check_grad_ci(reason="Grad computation does not apply to Sequence MAX "
"Pool executed when is_test is true.")
class TestSeqMaxPool2DInference(TestSeqMaxPool2D):
def compute(self, x, offset, out):
self.attrs = {"pad_value": 1.0, 'pooltype': "MAX", 'is_test': True}
......@@ -368,7 +370,7 @@ class TestSeqMaxPool2DInference(TestSeqMaxPool2D):
out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 11))
def test_check_grad(self):
"""Grad computation does not apply to Sequence MAX
"""Grad computation does not apply to Sequence MAX
Pool executed when is_test is true """
return
......
......@@ -53,34 +53,3 @@ EMPTY_GRAD_OP_LIST = [
'hash', 'less_than', 'not_equal', 'eye', 'chunk_eval', 'is_empty',
'proximal_gd', 'collect_fpn_proposals', 'unique_with_counts'
]
# Special cases do not need to check grad
NO_NEED_CHECK_GRAD_CASES = [
'TestLookupTableOpWithPadding',
'TestLookupTableOpWithTensorIdsAndPadding',
'TestLookupTableOpWithPadding',
'TestLookupTableOpWithTensorIdsAndPadding',
'TestSeqMaxPool2DInference',
'TestSeqMaxPool2DInferenceLen0',
'TestSeqMaxPool2DInferenceLen0LoDLevel2',
'TestDropoutOp4',
'TestDropoutOp5',
'TestDropoutOp8',
'TestDropoutOp9',
'TestFP16DropoutOp',
'TestFP16DropoutOp2',
'TestExpandOpBoolean',
'TestFusedEmbeddingSeqPoolOp',
'TestMKLDNNConcatOp',
'TestMKLDNNConcatOp',
'TestMKLDNNConcatOp3',
'TestElementwiseMulMKLDNNOp_Integrated_With_Convs',
'TestConv2dTransposeMKLDNNOp',
'TestMKLDNNFuseBias',
'TestMKLDNNWithPad',
'TestMKLDNNWithStride',
'TestMKLDNNWithAsymPad',
'TestMKLDNNWithSamePad',
'TestMKLDNNWithValidPad',
'TestMKLDNNWithValidPad_NHWC',
]
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册