未验证 提交 b681215a 编写于 作者: L lidanqing 提交者: GitHub

Add @skip_check_grad_ci to UT conv_mkldnn and int8_mul_mkldnn (#22760)

上级 68a92e46
......@@ -18,7 +18,7 @@ import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestConv2dOp_v2
......@@ -104,6 +104,8 @@ class TestConv2dMKLDNNOp(TestConv2dOp):
self.outputs['Output'] = output
@skip_check_grad_ci(
reason="Fusion is for inference only, check_grad is not required.")
class TestWithbreluFusion(TestConv2dMKLDNNOp):
def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self)
......@@ -111,16 +113,9 @@ class TestWithbreluFusion(TestConv2dMKLDNNOp):
self.fuse_alpha = 6.0
self.dsttype = np.float32
def test_check_grad(self):
pass
def test_check_grad_no_filter(self):
pass
def test_check_grad_no_input(self):
pass
@skip_check_grad_ci(
reason="Fusion is for inference only, check_grad is not required.")
class TestWithFuse(TestConv2dMKLDNNOp):
def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self)
......@@ -130,15 +125,6 @@ class TestWithFuse(TestConv2dMKLDNNOp):
self.fuse_residual_connection = True
self.input_residual_size = [2, 6, 5, 5]
def test_check_grad(self):
pass
def test_check_grad_no_filter(self):
pass
def test_check_grad_no_input(self):
pass
class TestWithPadWithBias(TestConv2dMKLDNNOp):
def init_test_case(self):
......
......@@ -17,12 +17,15 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
'''
test case for s8 * s8
'''
@skip_check_grad_ci(
reason="mul_mkldnn_op does not implement grad operator, check_grad is not required."
)
class TestMKLDNNMulOpS8S8(OpTest):
def setUp(self):
self.op_type = "mul"
......@@ -77,15 +80,6 @@ class TestMKLDNNMulOpS8S8(OpTest):
self.check_output_with_place(
core.CPUPlace(), atol=0, check_dygraph=False)
def test_check_grad_normal(self):
pass
def test_check_grad_ingore_x(self):
pass
def test_check_grad_ingore_y(self):
pass
'''
test case for s8 * u8
......
......@@ -355,7 +355,8 @@ class TestConv2dOp(OpTest):
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
def test_check_grad(self):
if self.dtype == np.float16:
if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and
self.no_need_check_grad == True):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
......@@ -366,7 +367,8 @@ class TestConv2dOp(OpTest):
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_no_filter(self):
if self.dtype == np.float16:
if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and
self.no_need_check_grad == True):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
......@@ -378,7 +380,8 @@ class TestConv2dOp(OpTest):
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_no_input(self):
if self.dtype == np.float16:
if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and
self.no_need_check_grad == True):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册