From 548efcd2e4fb06187a7fc18ac74f089fff175ffb Mon Sep 17 00:00:00 2001 From: Zhang Ting <709968123@qq.com> Date: Mon, 9 Dec 2019 16:27:32 +0800 Subject: [PATCH] Fix unit tests to avoid check_grad checking failures (#21554) * fix python API tests that do not need to inherit OpTest, test=develop * fix fp16 cases that will only be enabled in GPU mode, test=develop * remove TestSoftmaxFP16Op from test cases of softmax_mkldnn_op, test=develop * fix tests so that the cases are only created in GPU mode, test=develop --- .../mkldnn/test_softmax_mkldnn_op.py | 2 +- .../fluid/tests/unittests/test_assign_op.py | 2 +- .../unittests/test_coalesce_tensor_op.py | 18 ++-- .../fluid/tests/unittests/test_concat_op.py | 4 +- .../fluid/tests/unittests/test_conv2d_op.py | 2 +- .../fluid/tests/unittests/test_conv3d_op.py | 22 +++++ .../tests/unittests/test_cross_entropy_op.py | 2 +- .../tests/unittests/test_data_norm_op.py | 2 +- .../unittests/test_elementwise_add_op.py | 2 + .../unittests/test_elementwise_div_op.py | 3 + .../unittests/test_elementwise_mul_op.py | 2 + .../test_fill_constant_batch_size_like_op.py | 2 +- .../test_fused_elemwise_activation_op.py | 88 ++++++++++--------- ...test_fusion_transpose_flatten_concat_op.py | 19 ++-- .../fluid/tests/unittests/test_isfinite_op.py | 7 ++ .../tests/unittests/test_lstm_cudnn_op.py | 24 +++-- .../fluid/tests/unittests/test_matmul_op.py | 4 +- .../paddle/fluid/tests/unittests/test_nce.py | 2 +- .../fluid/tests/unittests/test_pad_op.py | 3 + .../fluid/tests/unittests/test_sign_op.py | 2 +- .../test_softmax_with_cross_entropy_op.py | 3 + .../fluid/tests/unittests/test_split_op.py | 4 +- .../tests/unittests/test_transpose_op.py | 2 +- 23 files changed, 136 insertions(+), 85 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py index 3fd4b8e12d..769a921b80 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np from paddle.fluid.tests.unittests.op_test import OpTest import paddle.fluid.core as core -from paddle.fluid.tests.unittests.test_softmax_op import * +from paddle.fluid.tests.unittests.test_softmax_op import TestSoftmaxOp, TestSoftmaxOp2, TestSoftmaxOp3, TestSoftmaxOp4, TestSoftmaxOp5, TestSoftmaxOp6 from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index 4d43747676..63308dc9f5 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -37,7 +37,7 @@ class TestAssignOp(op_test.OpTest): self.check_grad(['X'], 'Out') -class TestAssignOpError(op_test.OpTest): +class TestAssignOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The type of input must be Variable or numpy.ndarray. diff --git a/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py b/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py index 139d50009d..a5b3033044 100644 --- a/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py @@ -22,6 +22,8 @@ from paddle.fluid import core alignment = 256 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestAllocContinuousSpace(OpTest): def setUp(self): self.op_type = "coalesce_tensor" @@ -78,13 +80,12 @@ class TestAllocContinuousSpace(OpTest): return outputs, coalesce_tensor_var def test_check_output(self): - if core.is_compiled_with_cuda(): - self.check_output_with_place( - place=core.CUDAPlace(0), - no_check_set=["FusedOutput"], - atol=1e-5) + self.check_output_with_place( + place=core.CUDAPlace(0), no_check_set=["FusedOutput"], atol=1e-5) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestAllocContinuousSpace2(TestAllocContinuousSpace): def init_attr(self): return { @@ -95,11 +96,8 @@ class TestAllocContinuousSpace2(TestAllocContinuousSpace): } def test_check_output(self): - if core.is_compiled_with_cuda(): - self.check_output_with_place( - place=core.CUDAPlace(0), - no_check_set=["FusedOutput"], - atol=1e-5) + self.check_output_with_place( + place=core.CUDAPlace(0), no_check_set=["FusedOutput"], atol=1e-5) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 92d127e9a7..30334de1de 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np from op_test import OpTest import paddle.fluid as fluid -from paddle.fluid import compiler, Program, program_guard +from paddle.fluid import compiler, Program, program_guard, core class TestConcatOp(OpTest): @@ -134,6 +134,8 @@ create_test_AxisTensor(TestConcatOp5) def create_test_fp16(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestConcatFp16(parent): def get_dtype(self): return np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 15a00e0d40..aee693a54b 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -697,7 +697,7 @@ class TestCUDNNExhaustiveSearch(TestConv2dOp): self.exhaustive_search = True -class TestConv2dOpError(OpTest): +class TestConv2dOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index 8946ae09c9..d83da2ce15 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -210,6 +210,8 @@ def create_test_channel_last_class(parent): def create_test_cudnn_channel_last_class(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCudnnChannelLastCase(parent): def init_kernel_type(self): self.use_cudnn = True @@ -403,11 +405,15 @@ class TestWithDilation(TestConv3dOp): #---------------- Conv3dCUDNN ---------------- +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCUDNN(TestConv3dOp): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16CUDNN(TestConv3dOp): def init_kernel_type(self): self.use_cudnn = True @@ -420,11 +426,15 @@ class TestFP16CUDNN(TestConv3dOp): self.check_output_with_place(place, atol=2e-2) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestWithGroup1CUDNN(TestWithGroup1): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16WithGroup1CUDNN(TestWithGroup1): def init_kernel_type(self): self.use_cudnn = True @@ -437,11 +447,15 @@ class TestFP16WithGroup1CUDNN(TestWithGroup1): self.check_output_with_place(place, atol=2e-2) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestWithGroup2CUDNN(TestWithGroup2): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16WithGroup2CUDNN(TestWithGroup2): def init_kernel_type(self): self.use_cudnn = True @@ -454,11 +468,15 @@ class TestFP16WithGroup2CUDNN(TestWithGroup2): self.check_output_with_place(place, atol=2e-2) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestWith1x1CUDNN(TestWith1x1): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16With1x1CUDNN(TestWith1x1): def init_kernel_type(self): self.use_cudnn = True @@ -471,11 +489,15 @@ class TestFP16With1x1CUDNN(TestWith1x1): self.check_output_with_place(place, atol=2e-2) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestWithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16WithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1): def init_kernel_type(self): self.use_cudnn = True diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index 613f074b4a..fc4b477321 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -359,7 +359,7 @@ create_test_class(TestCrossEntropyOp7RemoveLastDim, "TestCrossEntropyF16Op7RemoveLastDim") -class TestCrossEntropyOpError(OpTest): +class TestCrossEntropyOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): diff --git a/python/paddle/fluid/tests/unittests/test_data_norm_op.py b/python/paddle/fluid/tests/unittests/test_data_norm_op.py index ceaf54fccd..2f13185341 100644 --- a/python/paddle/fluid/tests/unittests/test_data_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_data_norm_op.py @@ -278,7 +278,7 @@ class TestDataNormOpWithSlotDim(OpTest): self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False) -class TestDataNormOpWithSyncStats(OpTest): +class TestDataNormOpWithSyncStats(unittest.TestCase): """ test class for data norm op test forward and backward diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index 4ea4a1f027..2848060159 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -89,6 +89,8 @@ class TestElementwiseAddOp(OpTest): pass +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16ElementwiseAddOp(TestElementwiseAddOp): def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index 34db81ac69..3c012ad00c 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest @@ -204,6 +205,8 @@ class TestElementwiseDivOp_INT(OpTest): pass +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestElementwiseDivOpFp16(ElementwiseDivOp): def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py index 2d9dac5da3..71967a51bd 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py @@ -177,6 +177,8 @@ class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): self.init_kernel_type() +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestElementwiseMulOpFp16(ElementwiseMulOp): def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py index 273d7e070c..4a19298fec 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py @@ -90,7 +90,7 @@ class TestFillConstantBatchSizeLikeWithLoDTensor(OpTest): # Test python API -class TestFillConstantBatchSizeLikeAPI(OpTest): +class TestFillConstantBatchSizeLikeAPI(unittest.TestCase): def test_api(self): like = fluid.layers.fill_constant( shape=[1, 200], value=10, dtype='int64') diff --git a/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py b/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py index 5141bd47a8..d371acc117 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py @@ -325,29 +325,11 @@ for mode in {0, 1}: 'functor_list': ["scale", "elementwise_add"], 'save_intermediate_out': save_intermediate_out, }) - create_test_class( - 'scale_add_fp16' + suffix, - scale_add_func, { - 'scale': scale, - 'functor_list': ["scale", "elementwise_add"], - 'save_intermediate_out': save_intermediate_out, - }, - dtype=np.float16, - grad_chek=False) create_test_class('add_scale' + suffix, add_scale_func, { 'scale': scale, 'functor_list': ["elementwise_add", "scale"], 'save_intermediate_out': save_intermediate_out, }) - create_test_class( - 'add_scale_fp16' + suffix, - add_scale_func, { - 'scale': scale, - 'functor_list': ["elementwise_add", "scale"], - 'save_intermediate_out': save_intermediate_out, - }, - dtype=np.float16, - grad_chek=False) create_test_class('add_relu' + suffix, add_relu_func, { 'functor_list': ["elementwise_add", "relu"], 'save_intermediate_out': save_intermediate_out, @@ -356,36 +338,56 @@ for mode in {0, 1}: 'functor_list': ["relu", "elementwise_add"], 'save_intermediate_out': save_intermediate_out, }) - create_test_class( - 'add_relu_fp16' + suffix, - add_relu_func, { - 'functor_list': ["elementwise_add", "relu"], - 'save_intermediate_out': save_intermediate_out, - }, - dtype=np.float16, - grad_chek=False) - create_test_class( - 'relu_add_fp16' + suffix, - relu_add_func, { - 'functor_list': ["relu", "elementwise_add"], - 'save_intermediate_out': save_intermediate_out, - }, - dtype=np.float16, - grad_chek=False) create_test_class('mul_scale' + suffix, mul_scale_func, { 'scale': scale, 'functor_list': ["elementwise_mul", "scale"], 'save_intermediate_out': save_intermediate_out, }) - create_test_class( - 'mul_scale' + suffix, - mul_scale_func, { - 'scale': scale, - 'functor_list': ["elementwise_mul", "scale"], - 'save_intermediate_out': save_intermediate_out, - }, - dtype=np.float16, - grad_chek=False) + if core.is_compiled_with_cuda(): + create_test_class( + 'scale_add_fp16' + suffix, + scale_add_func, { + 'scale': scale, + 'functor_list': ["scale", "elementwise_add"], + 'save_intermediate_out': save_intermediate_out, + }, + dtype=np.float16, + grad_chek=False) + create_test_class( + 'add_scale_fp16' + suffix, + add_scale_func, { + 'scale': scale, + 'functor_list': ["elementwise_add", "scale"], + 'save_intermediate_out': save_intermediate_out, + }, + dtype=np.float16, + grad_chek=False) + + create_test_class( + 'add_relu_fp16' + suffix, + add_relu_func, { + 'functor_list': ["elementwise_add", "relu"], + 'save_intermediate_out': save_intermediate_out, + }, + dtype=np.float16, + grad_chek=False) + create_test_class( + 'relu_add_fp16' + suffix, + relu_add_func, { + 'functor_list': ["relu", "elementwise_add"], + 'save_intermediate_out': save_intermediate_out, + }, + dtype=np.float16, + grad_chek=False) + create_test_class( + 'mul_scale_fp16' + suffix, + mul_scale_func, { + 'scale': scale, + 'functor_list': ["elementwise_mul", "scale"], + 'save_intermediate_out': save_intermediate_out, + }, + dtype=np.float16, + grad_chek=False) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py b/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py index 4aa7f76495..9fe1df39d3 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py @@ -20,6 +20,8 @@ from op_test import OpTest import paddle.fluid.core as core +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFusionTransposeFlattenConcationOp(OpTest): def setUp(self): self.init_test_case() @@ -48,11 +50,8 @@ class TestFusionTransposeFlattenConcationOp(OpTest): self.outputs = {'Out': out} def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - self.check_output_with_place(place, 1e-6) - else: - pass + place = core.CUDAPlace(0) + self.check_output_with_place(place, 1e-6) def init_test_case(self): self.shapes = [(3, 4, 17, 17), (3, 8, 7, 7), (3, 12, 5, 5)] @@ -61,6 +60,8 @@ class TestFusionTransposeFlattenConcationOp(OpTest): self.concat_axis = 1 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCase1(TestFusionTransposeFlattenConcationOp): def init_test_case(self): self.shapes = [(3, 4, 18, 17), (3, 8, 18, 7), (6, 12, 9, 5)] @@ -69,6 +70,8 @@ class TestCase1(TestFusionTransposeFlattenConcationOp): self.concat_axis = 1 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCase2(TestFusionTransposeFlattenConcationOp): def init_test_case(self): self.shapes = [(3, 8, 20, 17), (3, 8, 19, 17), (3, 8, 40, 17)] @@ -77,6 +80,8 @@ class TestCase2(TestFusionTransposeFlattenConcationOp): self.concat_axis = 0 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCase3(TestFusionTransposeFlattenConcationOp): def init_test_case(self): self.shapes = [(3, 8, 20, 17), (3, 8, 19, 17), (3, 8, 40, 17)] @@ -85,6 +90,8 @@ class TestCase3(TestFusionTransposeFlattenConcationOp): self.concat_axis = 1 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCase4(TestFusionTransposeFlattenConcationOp): def init_test_case(self): self.shapes = [(3, 8, 9, 17), (8, 3, 9, 17), (4, 6, 9, 17)] @@ -93,6 +100,8 @@ class TestCase4(TestFusionTransposeFlattenConcationOp): self.concat_axis = 1 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCase5(TestFusionTransposeFlattenConcationOp): def init_test_case(self): self.shapes = [(3, 8, 9, 17, 2), (3, 8, 2, 17, 9), (3, 17, 9, 8, 2)] diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_op.py index d96ae15c72..22bd3bcf4a 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_op.py @@ -14,6 +14,7 @@ import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest @@ -37,6 +38,8 @@ class TestInf(OpTest): self.check_output() +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16Inf(TestInf): def init_dtype(self): self.dtype = np.float16 @@ -62,6 +65,8 @@ class TestNAN(OpTest): self.check_output() +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16NAN(TestNAN): def init_dtype(self): self.dtype = np.float16 @@ -88,6 +93,8 @@ class TestIsfinite(OpTest): self.check_output() +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16Isfinite(TestIsfinite): def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py index 89916f936e..0b7e42e7c8 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py @@ -116,6 +116,8 @@ def lstm_naive( return output, pre_h, pre_c +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCUDNNLstmOp(OpTest): def setUp(self): self.op_type = "cudnn_lstm" @@ -172,23 +174,17 @@ class TestCUDNNLstmOp(OpTest): def test_output_with_place(self): # depend on the scope structure - if self.has_cuda(): - place = core.CUDAPlace(0) - self.check_output_with_place(place, atol=1e-5, check_dygraph=False) + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5, check_dygraph=False) def test_grad_with_place(self): # depend on the scope structure - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - self.check_grad_with_place( - place, - set(['Input', 'W', 'InitH', 'InitC']), - ['Out', 'last_h', 'last_c'], - max_relative_error=0.02, - check_dygraph=False) - - def has_cuda(self): - return core.is_compiled_with_cuda() + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, + set(['Input', 'W', 'InitH', 'InitC']), ['Out', 'last_h', 'last_c'], + max_relative_error=0.02, + check_dygraph=False) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 02a5caf0e3..f60630b22f 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -114,7 +114,7 @@ class Generator(object): ['X'], 'Out', max_relative_error=1e-3, no_grad_set=set('Y')) -class TestMatmulOpError(OpTest): +class TestMatmulOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The inputs type of matmul_op must be Variable. @@ -173,7 +173,7 @@ def api_test(dim_x, dim_y, trans_x, trans_y): dim_x, dim_y, trans_x, trans_y)) shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x, trans_y) - globals()[test_name] = type(test_name, (OpTest, ), { + globals()[test_name] = type(test_name, (unittest.TestCase, ), { 'shape_X': shape_x, 'shape_Y': shape_y, 'transpose_X': trans_x, diff --git a/python/paddle/fluid/tests/unittests/test_nce.py b/python/paddle/fluid/tests/unittests/test_nce.py index e950c47fcb..dd9093bade 100644 --- a/python/paddle/fluid/tests/unittests/test_nce.py +++ b/python/paddle/fluid/tests/unittests/test_nce.py @@ -220,7 +220,7 @@ class TestNCECase1SelectedRows(unittest.TestCase): self.assertEqual(rets[0], rets[1]) -class TestNCE_OpError(OpTest): +class TestNCE_OpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): input1 = fluid.create_lod_tensor( diff --git a/python/paddle/fluid/tests/unittests/test_pad_op.py b/python/paddle/fluid/tests/unittests/test_pad_op.py index e2afa19091..f0f65a4d7d 100644 --- a/python/paddle/fluid/tests/unittests/test_pad_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad_op.py @@ -17,6 +17,7 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core class TestPadOp(OpTest): @@ -75,6 +76,8 @@ class TestCase3(TestPadOp): def create_test_fp16(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestPadFp16(parent): def get_dtype(self): return np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_sign_op.py b/python/paddle/fluid/tests/unittests/test_sign_op.py index a5412439fa..0ff348d0de 100644 --- a/python/paddle/fluid/tests/unittests/test_sign_op.py +++ b/python/paddle/fluid/tests/unittests/test_sign_op.py @@ -36,7 +36,7 @@ class TestSignOp(OpTest): self.check_grad(['X'], 'Out') -class TestSignOpError(OpTest): +class TestSignOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The input type of sign_op must be Variable or numpy.ndarray. diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index ae1429719b..4db3e4d946 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -16,6 +16,7 @@ from __future__ import print_function import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest from test_softmax_op import stable_softmax @@ -106,6 +107,8 @@ class TestSoftmaxWithCrossEntropyOpNoCudnn(TestSoftmaxWithCrossEntropyOp): self.dtype = np.float64 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): def initParams(self): self.op_type = "softmax_with_cross_entropy" diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index 69615cf9f8..ef86ba7f88 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np from op_test import OpTest import paddle.fluid as fluid -from paddle.fluid import compiler, Program, program_guard +from paddle.fluid import compiler, Program, program_guard, core class TestSplitOp(OpTest): @@ -210,6 +210,8 @@ class TestSplitByrefOp(OpTest): def create_test_fp16(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestSplitFp16(parent): def get_dtype(self): return np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index 37d153aaad..f3bd411794 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -80,7 +80,7 @@ class TestCase4(TestTransposeOp): self.axis = (4, 2, 3, 1, 0, 5) -class TestTransposeOpError(OpTest): +class TestTransposeOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[10, 5, 3], dtype='float32') -- GitLab