未验证 提交 548efcd2 编写于 作者: Z Zhang Ting 提交者: GitHub

Fix unit tests to avoid check_grad checking failures (#21554)

* fix python API tests that do not need to inherit OpTest, test=develop

* fix fp16 cases that will only be enabled in GPU mode, test=develop

* remove TestSoftmaxFP16Op from test cases of softmax_mkldnn_op, test=develop

* fix tests so that the cases are only created in GPU mode, test=develop
上级 4ad9b755
......@@ -18,7 +18,7 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.test_softmax_op import *
from paddle.fluid.tests.unittests.test_softmax_op import TestSoftmaxOp, TestSoftmaxOp2, TestSoftmaxOp3, TestSoftmaxOp4, TestSoftmaxOp5, TestSoftmaxOp6
from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd
......
......@@ -37,7 +37,7 @@ class TestAssignOp(op_test.OpTest):
self.check_grad(['X'], 'Out')
class TestAssignOpError(op_test.OpTest):
class TestAssignOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The type of input must be Variable or numpy.ndarray.
......
......@@ -22,6 +22,8 @@ from paddle.fluid import core
alignment = 256
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestAllocContinuousSpace(OpTest):
def setUp(self):
self.op_type = "coalesce_tensor"
......@@ -78,13 +80,12 @@ class TestAllocContinuousSpace(OpTest):
return outputs, coalesce_tensor_var
def test_check_output(self):
if core.is_compiled_with_cuda():
self.check_output_with_place(
place=core.CUDAPlace(0),
no_check_set=["FusedOutput"],
atol=1e-5)
place=core.CUDAPlace(0), no_check_set=["FusedOutput"], atol=1e-5)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestAllocContinuousSpace2(TestAllocContinuousSpace):
def init_attr(self):
return {
......@@ -95,11 +96,8 @@ class TestAllocContinuousSpace2(TestAllocContinuousSpace):
}
def test_check_output(self):
if core.is_compiled_with_cuda():
self.check_output_with_place(
place=core.CUDAPlace(0),
no_check_set=["FusedOutput"],
atol=1e-5)
place=core.CUDAPlace(0), no_check_set=["FusedOutput"], atol=1e-5)
if __name__ == '__main__':
......
......@@ -18,7 +18,7 @@ import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid import compiler, Program, program_guard, core
class TestConcatOp(OpTest):
......@@ -134,6 +134,8 @@ create_test_AxisTensor(TestConcatOp5)
def create_test_fp16(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestConcatFp16(parent):
def get_dtype(self):
return np.float16
......
......@@ -697,7 +697,7 @@ class TestCUDNNExhaustiveSearch(TestConv2dOp):
self.exhaustive_search = True
class TestConv2dOpError(OpTest):
class TestConv2dOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
......
......@@ -210,6 +210,8 @@ def create_test_channel_last_class(parent):
def create_test_cudnn_channel_last_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCudnnChannelLastCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
......@@ -403,11 +405,15 @@ class TestWithDilation(TestConv3dOp):
#---------------- Conv3dCUDNN ----------------
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNN(TestConv3dOp):
def init_kernel_type(self):
self.use_cudnn = True
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16CUDNN(TestConv3dOp):
def init_kernel_type(self):
self.use_cudnn = True
......@@ -420,11 +426,15 @@ class TestFP16CUDNN(TestConv3dOp):
self.check_output_with_place(place, atol=2e-2)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestWithGroup1CUDNN(TestWithGroup1):
def init_kernel_type(self):
self.use_cudnn = True
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16WithGroup1CUDNN(TestWithGroup1):
def init_kernel_type(self):
self.use_cudnn = True
......@@ -437,11 +447,15 @@ class TestFP16WithGroup1CUDNN(TestWithGroup1):
self.check_output_with_place(place, atol=2e-2)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestWithGroup2CUDNN(TestWithGroup2):
def init_kernel_type(self):
self.use_cudnn = True
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16WithGroup2CUDNN(TestWithGroup2):
def init_kernel_type(self):
self.use_cudnn = True
......@@ -454,11 +468,15 @@ class TestFP16WithGroup2CUDNN(TestWithGroup2):
self.check_output_with_place(place, atol=2e-2)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestWith1x1CUDNN(TestWith1x1):
def init_kernel_type(self):
self.use_cudnn = True
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16With1x1CUDNN(TestWith1x1):
def init_kernel_type(self):
self.use_cudnn = True
......@@ -471,11 +489,15 @@ class TestFP16With1x1CUDNN(TestWith1x1):
self.check_output_with_place(place, atol=2e-2)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestWithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1):
def init_kernel_type(self):
self.use_cudnn = True
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16WithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1):
def init_kernel_type(self):
self.use_cudnn = True
......
......@@ -359,7 +359,7 @@ create_test_class(TestCrossEntropyOp7RemoveLastDim,
"TestCrossEntropyF16Op7RemoveLastDim")
class TestCrossEntropyOpError(OpTest):
class TestCrossEntropyOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
......
......@@ -278,7 +278,7 @@ class TestDataNormOpWithSlotDim(OpTest):
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False)
class TestDataNormOpWithSyncStats(OpTest):
class TestDataNormOpWithSyncStats(unittest.TestCase):
"""
test class for data norm op
test forward and backward
......
......@@ -89,6 +89,8 @@ class TestElementwiseAddOp(OpTest):
pass
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
def init_dtype(self):
self.dtype = np.float16
......
......@@ -15,6 +15,7 @@
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
......@@ -204,6 +205,8 @@ class TestElementwiseDivOp_INT(OpTest):
pass
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestElementwiseDivOpFp16(ElementwiseDivOp):
def init_dtype(self):
self.dtype = np.float16
......
......@@ -177,6 +177,8 @@ class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
self.init_kernel_type()
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestElementwiseMulOpFp16(ElementwiseMulOp):
def init_dtype(self):
self.dtype = np.float16
......
......@@ -90,7 +90,7 @@ class TestFillConstantBatchSizeLikeWithLoDTensor(OpTest):
# Test python API
class TestFillConstantBatchSizeLikeAPI(OpTest):
class TestFillConstantBatchSizeLikeAPI(unittest.TestCase):
def test_api(self):
like = fluid.layers.fill_constant(
shape=[1, 200], value=10, dtype='int64')
......
......@@ -325,6 +325,25 @@ for mode in {0, 1}:
'functor_list': ["scale", "elementwise_add"],
'save_intermediate_out': save_intermediate_out,
})
create_test_class('add_scale' + suffix, add_scale_func, {
'scale': scale,
'functor_list': ["elementwise_add", "scale"],
'save_intermediate_out': save_intermediate_out,
})
create_test_class('add_relu' + suffix, add_relu_func, {
'functor_list': ["elementwise_add", "relu"],
'save_intermediate_out': save_intermediate_out,
})
create_test_class('relu_add' + suffix, relu_add_func, {
'functor_list': ["relu", "elementwise_add"],
'save_intermediate_out': save_intermediate_out,
})
create_test_class('mul_scale' + suffix, mul_scale_func, {
'scale': scale,
'functor_list': ["elementwise_mul", "scale"],
'save_intermediate_out': save_intermediate_out,
})
if core.is_compiled_with_cuda():
create_test_class(
'scale_add_fp16' + suffix,
scale_add_func, {
......@@ -334,11 +353,6 @@ for mode in {0, 1}:
},
dtype=np.float16,
grad_chek=False)
create_test_class('add_scale' + suffix, add_scale_func, {
'scale': scale,
'functor_list': ["elementwise_add", "scale"],
'save_intermediate_out': save_intermediate_out,
})
create_test_class(
'add_scale_fp16' + suffix,
add_scale_func, {
......@@ -348,14 +362,7 @@ for mode in {0, 1}:
},
dtype=np.float16,
grad_chek=False)
create_test_class('add_relu' + suffix, add_relu_func, {
'functor_list': ["elementwise_add", "relu"],
'save_intermediate_out': save_intermediate_out,
})
create_test_class('relu_add' + suffix, relu_add_func, {
'functor_list': ["relu", "elementwise_add"],
'save_intermediate_out': save_intermediate_out,
})
create_test_class(
'add_relu_fp16' + suffix,
add_relu_func, {
......@@ -372,13 +379,8 @@ for mode in {0, 1}:
},
dtype=np.float16,
grad_chek=False)
create_test_class('mul_scale' + suffix, mul_scale_func, {
'scale': scale,
'functor_list': ["elementwise_mul", "scale"],
'save_intermediate_out': save_intermediate_out,
})
create_test_class(
'mul_scale' + suffix,
'mul_scale_fp16' + suffix,
mul_scale_func, {
'scale': scale,
'functor_list': ["elementwise_mul", "scale"],
......
......@@ -20,6 +20,8 @@ from op_test import OpTest
import paddle.fluid.core as core
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFusionTransposeFlattenConcationOp(OpTest):
def setUp(self):
self.init_test_case()
......@@ -48,11 +50,8 @@ class TestFusionTransposeFlattenConcationOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, 1e-6)
else:
pass
def init_test_case(self):
self.shapes = [(3, 4, 17, 17), (3, 8, 7, 7), (3, 12, 5, 5)]
......@@ -61,6 +60,8 @@ class TestFusionTransposeFlattenConcationOp(OpTest):
self.concat_axis = 1
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCase1(TestFusionTransposeFlattenConcationOp):
def init_test_case(self):
self.shapes = [(3, 4, 18, 17), (3, 8, 18, 7), (6, 12, 9, 5)]
......@@ -69,6 +70,8 @@ class TestCase1(TestFusionTransposeFlattenConcationOp):
self.concat_axis = 1
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCase2(TestFusionTransposeFlattenConcationOp):
def init_test_case(self):
self.shapes = [(3, 8, 20, 17), (3, 8, 19, 17), (3, 8, 40, 17)]
......@@ -77,6 +80,8 @@ class TestCase2(TestFusionTransposeFlattenConcationOp):
self.concat_axis = 0
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCase3(TestFusionTransposeFlattenConcationOp):
def init_test_case(self):
self.shapes = [(3, 8, 20, 17), (3, 8, 19, 17), (3, 8, 40, 17)]
......@@ -85,6 +90,8 @@ class TestCase3(TestFusionTransposeFlattenConcationOp):
self.concat_axis = 1
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCase4(TestFusionTransposeFlattenConcationOp):
def init_test_case(self):
self.shapes = [(3, 8, 9, 17), (8, 3, 9, 17), (4, 6, 9, 17)]
......@@ -93,6 +100,8 @@ class TestCase4(TestFusionTransposeFlattenConcationOp):
self.concat_axis = 1
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCase5(TestFusionTransposeFlattenConcationOp):
def init_test_case(self):
self.shapes = [(3, 8, 9, 17, 2), (3, 8, 2, 17, 9), (3, 17, 9, 8, 2)]
......
......@@ -14,6 +14,7 @@
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
......@@ -37,6 +38,8 @@ class TestInf(OpTest):
self.check_output()
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16Inf(TestInf):
def init_dtype(self):
self.dtype = np.float16
......@@ -62,6 +65,8 @@ class TestNAN(OpTest):
self.check_output()
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16NAN(TestNAN):
def init_dtype(self):
self.dtype = np.float16
......@@ -88,6 +93,8 @@ class TestIsfinite(OpTest):
self.check_output()
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16Isfinite(TestIsfinite):
def init_dtype(self):
self.dtype = np.float16
......
......@@ -116,6 +116,8 @@ def lstm_naive(
return output, pre_h, pre_c
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNLstmOp(OpTest):
def setUp(self):
self.op_type = "cudnn_lstm"
......@@ -172,24 +174,18 @@ class TestCUDNNLstmOp(OpTest):
def test_output_with_place(self):
# depend on the scope structure
if self.has_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5, check_dygraph=False)
def test_grad_with_place(self):
# depend on the scope structure
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place,
set(['Input', 'W', 'InitH', 'InitC']),
['Out', 'last_h', 'last_c'],
set(['Input', 'W', 'InitH', 'InitC']), ['Out', 'last_h', 'last_c'],
max_relative_error=0.02,
check_dygraph=False)
def has_cuda(self):
return core.is_compiled_with_cuda()
if __name__ == '__main__':
unittest.main()
......@@ -114,7 +114,7 @@ class Generator(object):
['X'], 'Out', max_relative_error=1e-3, no_grad_set=set('Y'))
class TestMatmulOpError(OpTest):
class TestMatmulOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The inputs type of matmul_op must be Variable.
......@@ -173,7 +173,7 @@ def api_test(dim_x, dim_y, trans_x, trans_y):
dim_x, dim_y, trans_x, trans_y))
shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x,
trans_y)
globals()[test_name] = type(test_name, (OpTest, ), {
globals()[test_name] = type(test_name, (unittest.TestCase, ), {
'shape_X': shape_x,
'shape_Y': shape_y,
'transpose_X': trans_x,
......
......@@ -220,7 +220,7 @@ class TestNCECase1SelectedRows(unittest.TestCase):
self.assertEqual(rets[0], rets[1])
class TestNCE_OpError(OpTest):
class TestNCE_OpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input1 = fluid.create_lod_tensor(
......
......@@ -17,6 +17,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
class TestPadOp(OpTest):
......@@ -75,6 +76,8 @@ class TestCase3(TestPadOp):
def create_test_fp16(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestPadFp16(parent):
def get_dtype(self):
return np.float16
......
......@@ -36,7 +36,7 @@ class TestSignOp(OpTest):
self.check_grad(['X'], 'Out')
class TestSignOpError(OpTest):
class TestSignOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of sign_op must be Variable or numpy.ndarray.
......
......@@ -16,6 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from test_softmax_op import stable_softmax
......@@ -106,6 +107,8 @@ class TestSoftmaxWithCrossEntropyOpNoCudnn(TestSoftmaxWithCrossEntropyOp):
self.dtype = np.float64
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp):
def initParams(self):
self.op_type = "softmax_with_cross_entropy"
......
......@@ -18,7 +18,7 @@ import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid import compiler, Program, program_guard, core
class TestSplitOp(OpTest):
......@@ -210,6 +210,8 @@ class TestSplitByrefOp(OpTest):
def create_test_fp16(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSplitFp16(parent):
def get_dtype(self):
return np.float16
......
......@@ -80,7 +80,7 @@ class TestCase4(TestTransposeOp):
self.axis = (4, 2, 3, 1, 0, 5)
class TestTransposeOpError(OpTest):
class TestTransposeOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[10, 5, 3], dtype='float32')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册