diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py index 3fd4b8e12d14ec907a1f4758d388825d0f5fb130..769a921b80645011efad4e7e9d46736c400ede5e 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np from paddle.fluid.tests.unittests.op_test import OpTest import paddle.fluid.core as core -from paddle.fluid.tests.unittests.test_softmax_op import * +from paddle.fluid.tests.unittests.test_softmax_op import TestSoftmaxOp, TestSoftmaxOp2, TestSoftmaxOp3, TestSoftmaxOp4, TestSoftmaxOp5, TestSoftmaxOp6 from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index 4d43747676dfe31c1600cb3adae3bd14fd927b69..63308dc9f5b608c3abe1a51d6df629201f25d804 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -37,7 +37,7 @@ class TestAssignOp(op_test.OpTest): self.check_grad(['X'], 'Out') -class TestAssignOpError(op_test.OpTest): +class TestAssignOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The type of input must be Variable or numpy.ndarray. diff --git a/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py b/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py index 139d50009dc868b13531dbd50ad9c5f7646baa98..a5b30330448d291128ab27512a858eb4fc33a68e 100644 --- a/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py @@ -22,6 +22,8 @@ from paddle.fluid import core alignment = 256 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestAllocContinuousSpace(OpTest): def setUp(self): self.op_type = "coalesce_tensor" @@ -78,13 +80,12 @@ class TestAllocContinuousSpace(OpTest): return outputs, coalesce_tensor_var def test_check_output(self): - if core.is_compiled_with_cuda(): - self.check_output_with_place( - place=core.CUDAPlace(0), - no_check_set=["FusedOutput"], - atol=1e-5) + self.check_output_with_place( + place=core.CUDAPlace(0), no_check_set=["FusedOutput"], atol=1e-5) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestAllocContinuousSpace2(TestAllocContinuousSpace): def init_attr(self): return { @@ -95,11 +96,8 @@ class TestAllocContinuousSpace2(TestAllocContinuousSpace): } def test_check_output(self): - if core.is_compiled_with_cuda(): - self.check_output_with_place( - place=core.CUDAPlace(0), - no_check_set=["FusedOutput"], - atol=1e-5) + self.check_output_with_place( + place=core.CUDAPlace(0), no_check_set=["FusedOutput"], atol=1e-5) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 92d127e9a79ce29352c8c8286664f3e0bed5b96f..30334de1de28a9824200e74ddc686bca43967937 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np from op_test import OpTest import paddle.fluid as fluid -from paddle.fluid import compiler, Program, program_guard +from paddle.fluid import compiler, Program, program_guard, core class TestConcatOp(OpTest): @@ -134,6 +134,8 @@ create_test_AxisTensor(TestConcatOp5) def create_test_fp16(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestConcatFp16(parent): def get_dtype(self): return np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 15a00e0d407ef2b2b4a847eff336de7606ef18fc..aee693a54b90f2a4decfb3dd48d6a5af0f135dd0 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -697,7 +697,7 @@ class TestCUDNNExhaustiveSearch(TestConv2dOp): self.exhaustive_search = True -class TestConv2dOpError(OpTest): +class TestConv2dOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index 8946ae09c926dcc93e33e65858b127fc5b5fed38..d83da2ce150bb3359abbe25073a28dfd76cd33e6 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -210,6 +210,8 @@ def create_test_channel_last_class(parent): def create_test_cudnn_channel_last_class(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCudnnChannelLastCase(parent): def init_kernel_type(self): self.use_cudnn = True @@ -403,11 +405,15 @@ class TestWithDilation(TestConv3dOp): #---------------- Conv3dCUDNN ---------------- +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCUDNN(TestConv3dOp): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16CUDNN(TestConv3dOp): def init_kernel_type(self): self.use_cudnn = True @@ -420,11 +426,15 @@ class TestFP16CUDNN(TestConv3dOp): self.check_output_with_place(place, atol=2e-2) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestWithGroup1CUDNN(TestWithGroup1): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16WithGroup1CUDNN(TestWithGroup1): def init_kernel_type(self): self.use_cudnn = True @@ -437,11 +447,15 @@ class TestFP16WithGroup1CUDNN(TestWithGroup1): self.check_output_with_place(place, atol=2e-2) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestWithGroup2CUDNN(TestWithGroup2): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16WithGroup2CUDNN(TestWithGroup2): def init_kernel_type(self): self.use_cudnn = True @@ -454,11 +468,15 @@ class TestFP16WithGroup2CUDNN(TestWithGroup2): self.check_output_with_place(place, atol=2e-2) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestWith1x1CUDNN(TestWith1x1): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16With1x1CUDNN(TestWith1x1): def init_kernel_type(self): self.use_cudnn = True @@ -471,11 +489,15 @@ class TestFP16With1x1CUDNN(TestWith1x1): self.check_output_with_place(place, atol=2e-2) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestWithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16WithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1): def init_kernel_type(self): self.use_cudnn = True diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index 613f074b4a8a40c1ee595df877a44e89e87ec36b..fc4b47732176fbc93679e68b8fa90ca581fd404e 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -359,7 +359,7 @@ create_test_class(TestCrossEntropyOp7RemoveLastDim, "TestCrossEntropyF16Op7RemoveLastDim") -class TestCrossEntropyOpError(OpTest): +class TestCrossEntropyOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): diff --git a/python/paddle/fluid/tests/unittests/test_data_norm_op.py b/python/paddle/fluid/tests/unittests/test_data_norm_op.py index ceaf54fccd8956f982796a6d28f848622d3edf73..2f131853412758aa7e348714163b81bdec8247ca 100644 --- a/python/paddle/fluid/tests/unittests/test_data_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_data_norm_op.py @@ -278,7 +278,7 @@ class TestDataNormOpWithSlotDim(OpTest): self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False) -class TestDataNormOpWithSyncStats(OpTest): +class TestDataNormOpWithSyncStats(unittest.TestCase): """ test class for data norm op test forward and backward diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index 4ea4a1f0272b56efba2144c75f173bbfbac8bbde..284806015974884b6b6a705f58f11afd3009452b 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -89,6 +89,8 @@ class TestElementwiseAddOp(OpTest): pass +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16ElementwiseAddOp(TestElementwiseAddOp): def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index 34db81ac693e7e79e583844ba7181a097ceb2c7b..3c012ad00c672f56fb0415ed8a5908e712c52ba8 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest @@ -204,6 +205,8 @@ class TestElementwiseDivOp_INT(OpTest): pass +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestElementwiseDivOpFp16(ElementwiseDivOp): def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py index 2d9dac5da320e26bbf6f7ad2dff88130f113109a..71967a51bdc76f8317008e8878ee598c945686cd 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py @@ -177,6 +177,8 @@ class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): self.init_kernel_type() +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestElementwiseMulOpFp16(ElementwiseMulOp): def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py index 273d7e070c0ef555485aa3d30063707ee5311f8d..4a19298fecc095356a3a0676b768ae3949d31293 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py @@ -90,7 +90,7 @@ class TestFillConstantBatchSizeLikeWithLoDTensor(OpTest): # Test python API -class TestFillConstantBatchSizeLikeAPI(OpTest): +class TestFillConstantBatchSizeLikeAPI(unittest.TestCase): def test_api(self): like = fluid.layers.fill_constant( shape=[1, 200], value=10, dtype='int64') diff --git a/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py b/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py index 5141bd47a80deef7e1ee540b3d32439ec37f99b4..d371acc117ed084d9450e9f25eed0cb01a51c360 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py @@ -325,29 +325,11 @@ for mode in {0, 1}: 'functor_list': ["scale", "elementwise_add"], 'save_intermediate_out': save_intermediate_out, }) - create_test_class( - 'scale_add_fp16' + suffix, - scale_add_func, { - 'scale': scale, - 'functor_list': ["scale", "elementwise_add"], - 'save_intermediate_out': save_intermediate_out, - }, - dtype=np.float16, - grad_chek=False) create_test_class('add_scale' + suffix, add_scale_func, { 'scale': scale, 'functor_list': ["elementwise_add", "scale"], 'save_intermediate_out': save_intermediate_out, }) - create_test_class( - 'add_scale_fp16' + suffix, - add_scale_func, { - 'scale': scale, - 'functor_list': ["elementwise_add", "scale"], - 'save_intermediate_out': save_intermediate_out, - }, - dtype=np.float16, - grad_chek=False) create_test_class('add_relu' + suffix, add_relu_func, { 'functor_list': ["elementwise_add", "relu"], 'save_intermediate_out': save_intermediate_out, @@ -356,36 +338,56 @@ for mode in {0, 1}: 'functor_list': ["relu", "elementwise_add"], 'save_intermediate_out': save_intermediate_out, }) - create_test_class( - 'add_relu_fp16' + suffix, - add_relu_func, { - 'functor_list': ["elementwise_add", "relu"], - 'save_intermediate_out': save_intermediate_out, - }, - dtype=np.float16, - grad_chek=False) - create_test_class( - 'relu_add_fp16' + suffix, - relu_add_func, { - 'functor_list': ["relu", "elementwise_add"], - 'save_intermediate_out': save_intermediate_out, - }, - dtype=np.float16, - grad_chek=False) create_test_class('mul_scale' + suffix, mul_scale_func, { 'scale': scale, 'functor_list': ["elementwise_mul", "scale"], 'save_intermediate_out': save_intermediate_out, }) - create_test_class( - 'mul_scale' + suffix, - mul_scale_func, { - 'scale': scale, - 'functor_list': ["elementwise_mul", "scale"], - 'save_intermediate_out': save_intermediate_out, - }, - dtype=np.float16, - grad_chek=False) + if core.is_compiled_with_cuda(): + create_test_class( + 'scale_add_fp16' + suffix, + scale_add_func, { + 'scale': scale, + 'functor_list': ["scale", "elementwise_add"], + 'save_intermediate_out': save_intermediate_out, + }, + dtype=np.float16, + grad_chek=False) + create_test_class( + 'add_scale_fp16' + suffix, + add_scale_func, { + 'scale': scale, + 'functor_list': ["elementwise_add", "scale"], + 'save_intermediate_out': save_intermediate_out, + }, + dtype=np.float16, + grad_chek=False) + + create_test_class( + 'add_relu_fp16' + suffix, + add_relu_func, { + 'functor_list': ["elementwise_add", "relu"], + 'save_intermediate_out': save_intermediate_out, + }, + dtype=np.float16, + grad_chek=False) + create_test_class( + 'relu_add_fp16' + suffix, + relu_add_func, { + 'functor_list': ["relu", "elementwise_add"], + 'save_intermediate_out': save_intermediate_out, + }, + dtype=np.float16, + grad_chek=False) + create_test_class( + 'mul_scale_fp16' + suffix, + mul_scale_func, { + 'scale': scale, + 'functor_list': ["elementwise_mul", "scale"], + 'save_intermediate_out': save_intermediate_out, + }, + dtype=np.float16, + grad_chek=False) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py b/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py index 4aa7f76495abc03646ced1f183731f30d50c4223..9fe1df39d3a5ec84c5cf2390b87543b963d6550d 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py @@ -20,6 +20,8 @@ from op_test import OpTest import paddle.fluid.core as core +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFusionTransposeFlattenConcationOp(OpTest): def setUp(self): self.init_test_case() @@ -48,11 +50,8 @@ class TestFusionTransposeFlattenConcationOp(OpTest): self.outputs = {'Out': out} def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - self.check_output_with_place(place, 1e-6) - else: - pass + place = core.CUDAPlace(0) + self.check_output_with_place(place, 1e-6) def init_test_case(self): self.shapes = [(3, 4, 17, 17), (3, 8, 7, 7), (3, 12, 5, 5)] @@ -61,6 +60,8 @@ class TestFusionTransposeFlattenConcationOp(OpTest): self.concat_axis = 1 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCase1(TestFusionTransposeFlattenConcationOp): def init_test_case(self): self.shapes = [(3, 4, 18, 17), (3, 8, 18, 7), (6, 12, 9, 5)] @@ -69,6 +70,8 @@ class TestCase1(TestFusionTransposeFlattenConcationOp): self.concat_axis = 1 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCase2(TestFusionTransposeFlattenConcationOp): def init_test_case(self): self.shapes = [(3, 8, 20, 17), (3, 8, 19, 17), (3, 8, 40, 17)] @@ -77,6 +80,8 @@ class TestCase2(TestFusionTransposeFlattenConcationOp): self.concat_axis = 0 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCase3(TestFusionTransposeFlattenConcationOp): def init_test_case(self): self.shapes = [(3, 8, 20, 17), (3, 8, 19, 17), (3, 8, 40, 17)] @@ -85,6 +90,8 @@ class TestCase3(TestFusionTransposeFlattenConcationOp): self.concat_axis = 1 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCase4(TestFusionTransposeFlattenConcationOp): def init_test_case(self): self.shapes = [(3, 8, 9, 17), (8, 3, 9, 17), (4, 6, 9, 17)] @@ -93,6 +100,8 @@ class TestCase4(TestFusionTransposeFlattenConcationOp): self.concat_axis = 1 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCase5(TestFusionTransposeFlattenConcationOp): def init_test_case(self): self.shapes = [(3, 8, 9, 17, 2), (3, 8, 2, 17, 9), (3, 17, 9, 8, 2)] diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_op.py index d96ae15c7288c9a8d585d8d70d2aa8922b8f22b3..22bd3bcf4a1e2a292bfddf01f4bc206b86b9cc0e 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_op.py @@ -14,6 +14,7 @@ import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest @@ -37,6 +38,8 @@ class TestInf(OpTest): self.check_output() +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16Inf(TestInf): def init_dtype(self): self.dtype = np.float16 @@ -62,6 +65,8 @@ class TestNAN(OpTest): self.check_output() +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16NAN(TestNAN): def init_dtype(self): self.dtype = np.float16 @@ -88,6 +93,8 @@ class TestIsfinite(OpTest): self.check_output() +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16Isfinite(TestIsfinite): def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py index 89916f936ef422ab7167e433b48e2367cf95f579..0b7e42e7c8e7896c12c62b6ec496d30b335ebd35 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py @@ -116,6 +116,8 @@ def lstm_naive( return output, pre_h, pre_c +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCUDNNLstmOp(OpTest): def setUp(self): self.op_type = "cudnn_lstm" @@ -172,23 +174,17 @@ class TestCUDNNLstmOp(OpTest): def test_output_with_place(self): # depend on the scope structure - if self.has_cuda(): - place = core.CUDAPlace(0) - self.check_output_with_place(place, atol=1e-5, check_dygraph=False) + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5, check_dygraph=False) def test_grad_with_place(self): # depend on the scope structure - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - self.check_grad_with_place( - place, - set(['Input', 'W', 'InitH', 'InitC']), - ['Out', 'last_h', 'last_c'], - max_relative_error=0.02, - check_dygraph=False) - - def has_cuda(self): - return core.is_compiled_with_cuda() + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, + set(['Input', 'W', 'InitH', 'InitC']), ['Out', 'last_h', 'last_c'], + max_relative_error=0.02, + check_dygraph=False) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 02a5caf0e30ad17ff76fda760f62e0f502e46d21..f60630b22f364cfa9c070839330589d76ac9e7e6 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -114,7 +114,7 @@ class Generator(object): ['X'], 'Out', max_relative_error=1e-3, no_grad_set=set('Y')) -class TestMatmulOpError(OpTest): +class TestMatmulOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The inputs type of matmul_op must be Variable. @@ -173,7 +173,7 @@ def api_test(dim_x, dim_y, trans_x, trans_y): dim_x, dim_y, trans_x, trans_y)) shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x, trans_y) - globals()[test_name] = type(test_name, (OpTest, ), { + globals()[test_name] = type(test_name, (unittest.TestCase, ), { 'shape_X': shape_x, 'shape_Y': shape_y, 'transpose_X': trans_x, diff --git a/python/paddle/fluid/tests/unittests/test_nce.py b/python/paddle/fluid/tests/unittests/test_nce.py index e950c47fcb0a276ad041eb2c94cd453487b8102e..dd9093badec85e579f407b0587a65eabd18502ce 100644 --- a/python/paddle/fluid/tests/unittests/test_nce.py +++ b/python/paddle/fluid/tests/unittests/test_nce.py @@ -220,7 +220,7 @@ class TestNCECase1SelectedRows(unittest.TestCase): self.assertEqual(rets[0], rets[1]) -class TestNCE_OpError(OpTest): +class TestNCE_OpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): input1 = fluid.create_lod_tensor( diff --git a/python/paddle/fluid/tests/unittests/test_pad_op.py b/python/paddle/fluid/tests/unittests/test_pad_op.py index e2afa190910b879e3c6c5980ad75bf0174ec599c..f0f65a4d7d850755c30f15154d94faef4fbb030e 100644 --- a/python/paddle/fluid/tests/unittests/test_pad_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad_op.py @@ -17,6 +17,7 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core class TestPadOp(OpTest): @@ -75,6 +76,8 @@ class TestCase3(TestPadOp): def create_test_fp16(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestPadFp16(parent): def get_dtype(self): return np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_sign_op.py b/python/paddle/fluid/tests/unittests/test_sign_op.py index a5412439facdc4ce566586bed9a38081b8c43c10..0ff348d0de679fdce746a6efa9dba6181e801aad 100644 --- a/python/paddle/fluid/tests/unittests/test_sign_op.py +++ b/python/paddle/fluid/tests/unittests/test_sign_op.py @@ -36,7 +36,7 @@ class TestSignOp(OpTest): self.check_grad(['X'], 'Out') -class TestSignOpError(OpTest): +class TestSignOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The input type of sign_op must be Variable or numpy.ndarray. diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index ae1429719b532d94aadf01c3c69c792d47b8a69c..4db3e4d94642e1cddfbee1654ae230d8853c26aa 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -16,6 +16,7 @@ from __future__ import print_function import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest from test_softmax_op import stable_softmax @@ -106,6 +107,8 @@ class TestSoftmaxWithCrossEntropyOpNoCudnn(TestSoftmaxWithCrossEntropyOp): self.dtype = np.float64 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): def initParams(self): self.op_type = "softmax_with_cross_entropy" diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index 69615cf9f886b257c31c3ec0cef4f390410cabef..ef86ba7f8891d3d0b4fe466d0a0e1bca9b511f91 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np from op_test import OpTest import paddle.fluid as fluid -from paddle.fluid import compiler, Program, program_guard +from paddle.fluid import compiler, Program, program_guard, core class TestSplitOp(OpTest): @@ -210,6 +210,8 @@ class TestSplitByrefOp(OpTest): def create_test_fp16(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestSplitFp16(parent): def get_dtype(self): return np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index 37d153aaad884796a9339692ce76811522ff2419..f3bd4117948ebc64a859fd475e43f305c76e4245 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -80,7 +80,7 @@ class TestCase4(TestTransposeOp): self.axis = (4, 2, 3, 1, 0, 5) -class TestTransposeOpError(OpTest): +class TestTransposeOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[10, 5, 3], dtype='float32')