From 0155f91679726662b5907599856599fa11d089de Mon Sep 17 00:00:00 2001 From: zhupengyang Date: Thu, 5 Nov 2020 16:31:56 +0800 Subject: [PATCH] enable softmax unittest (#28362) --- .../fluid/tests/unittests/test_softmax_op.py | 25 ++++++------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 71df2c4acc..71c4e9c495 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -267,22 +267,11 @@ class TestSoftmaxFP16Op(TestSoftmaxOp): pass -@unittest.skip('disable TestSoftmaxFP16Op2') -class TestSoftmaxFP16Op2(TestSoftmaxOp): - def init_kernel_type(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestSoftmaxFP16Op2(TestSoftmaxFP16Op): def get_x_shape(self): - return [2, 3, 4, 5] - - def test_check_grad(self): - pass + return [2, 3, 4, 10] @unittest.skipIf(not core.is_compiled_with_cuda(), @@ -354,10 +343,12 @@ class TestSoftmaxAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softmax, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', shape=[2, 3], dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[2, 3], dtype='int32') self.assertRaises(TypeError, F.softmax, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', shape=[2, 3], dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[2, 3], dtype='float16') F.softmax(x_fp16) -- GitLab