From b681215aa37fdd0a66d4f23943175721305f1b38 Mon Sep 17 00:00:00 2001 From: lidanqing Date: Sun, 1 Mar 2020 14:14:47 +0100 Subject: [PATCH] Add @skip_check_grad_ci to UT conv_mkldnn and int8_mul_mkldnn (#22760) --- .../unittests/mkldnn/test_conv2d_mkldnn_op.py | 24 ++++--------------- .../mkldnn/test_mul_int8_mkldnn_op.py | 14 ++++------- .../fluid/tests/unittests/test_conv2d_op.py | 9 ++++--- 3 files changed, 15 insertions(+), 32 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py index e070f5fbbe0..6600d1456d7 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestConv2dOp_v2 @@ -104,6 +104,8 @@ class TestConv2dMKLDNNOp(TestConv2dOp): self.outputs['Output'] = output +@skip_check_grad_ci( + reason="Fusion is for inference only, check_grad is not required.") class TestWithbreluFusion(TestConv2dMKLDNNOp): def init_test_case(self): TestConv2dMKLDNNOp.init_test_case(self) @@ -111,16 +113,9 @@ class TestWithbreluFusion(TestConv2dMKLDNNOp): self.fuse_alpha = 6.0 self.dsttype = np.float32 - def test_check_grad(self): - pass - - def test_check_grad_no_filter(self): - pass - - def test_check_grad_no_input(self): - pass - +@skip_check_grad_ci( + reason="Fusion is for inference only, check_grad is not required.") class TestWithFuse(TestConv2dMKLDNNOp): def init_test_case(self): TestConv2dMKLDNNOp.init_test_case(self) @@ -130,15 +125,6 @@ class TestWithFuse(TestConv2dMKLDNNOp): self.fuse_residual_connection = True self.input_residual_size = [2, 6, 5, 5] - def test_check_grad(self): - pass - - def test_check_grad_no_filter(self): - pass - - def test_check_grad_no_input(self): - pass - class TestWithPadWithBias(TestConv2dMKLDNNOp): def init_test_case(self): diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py index 9280bf886fb..0c91868d302 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py @@ -17,12 +17,15 @@ from __future__ import print_function import unittest import numpy as np import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci ''' test case for s8 * s8 ''' +@skip_check_grad_ci( + reason="mul_mkldnn_op does not implement grad operator, check_grad is not required." +) class TestMKLDNNMulOpS8S8(OpTest): def setUp(self): self.op_type = "mul" @@ -77,15 +80,6 @@ class TestMKLDNNMulOpS8S8(OpTest): self.check_output_with_place( core.CPUPlace(), atol=0, check_dygraph=False) - def test_check_grad_normal(self): - pass - - def test_check_grad_ingore_x(self): - pass - - def test_check_grad_ingore_y(self): - pass - ''' test case for s8 * u8 diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 29f45b22802..8025a332396 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -355,7 +355,8 @@ class TestConv2dOp(OpTest): place, atol=1e-5, check_dygraph=(self.use_mkldnn == False)) def test_check_grad(self): - if self.dtype == np.float16: + if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and + self.no_need_check_grad == True): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode @@ -366,7 +367,8 @@ class TestConv2dOp(OpTest): check_dygraph=(self.use_mkldnn == False)) def test_check_grad_no_filter(self): - if self.dtype == np.float16: + if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and + self.no_need_check_grad == True): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode @@ -378,7 +380,8 @@ class TestConv2dOp(OpTest): check_dygraph=(self.use_mkldnn == False)) def test_check_grad_no_input(self): - if self.dtype == np.float16: + if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and + self.no_need_check_grad == True): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode -- GitLab