test_softmax_op.py 12.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, convert_float_to_uint16
18
import paddle.fluid.core as core
19
import paddle
20
import paddle.nn.functional as F
21 22

np.random.seed(10)
Q
qijun 已提交
23 24 25 26


def stable_softmax(x):
    """Compute the softmax of vector x in a numerically stable way."""
27 28 29
    # clip to shiftx, otherwise, when calc loss with
    # log(exp(shiftx)), may get log(0)=INF
    shiftx = (x - np.max(x)).clip(-64.)
Q
qijun 已提交
30 31 32 33
    exps = np.exp(shiftx)
    return exps / np.sum(exps)


34 35 36 37 38 39 40 41 42
def ref_softmax(x, axis=None, dtype=None):
    x_t = x.copy()
    if dtype is not None:
        x_t = x_t.astype(dtype)
    if axis is None:
        axis = -1
    return np.apply_along_axis(stable_softmax, axis, x_t)


Q
qijun 已提交
43
class TestSoftmaxOp(OpTest):
44

F
fengjiayi 已提交
45 46 47
    def get_x_shape(self):
        return [10, 10]

D
dengkaipeng 已提交
48 49 50
    def get_axis(self):
        return -1

Q
qijun 已提交
51
    def setUp(self):
Q
fix bug  
qijun 已提交
52
        self.op_type = "softmax"
53
        self.use_cudnn = False
K
Kexin Zhao 已提交
54
        self.use_mkldnn = False
55 56
        # explicilty use float32 for ROCm, as MIOpen does not yet support float64
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
K
Kexin Zhao 已提交
57
        self.init_kernel_type()
F
fengjiayi 已提交
58
        self.shape = self.get_x_shape()
D
dengkaipeng 已提交
59
        self.axis = self.get_axis()
F
fengjiayi 已提交
60

61
        np.random.seed(0)
F
fengjiayi 已提交
62
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
D
dengkaipeng 已提交
63
        out = np.apply_along_axis(stable_softmax, self.axis, x)
K
Kexin Zhao 已提交
64 65 66

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
67
        self.attrs = {
D
dengkaipeng 已提交
68
            'axis': self.axis,
69
            'use_cudnn': self.use_cudnn,
70
            'use_mkldnn': self.use_mkldnn
71
        }
72

K
Kexin Zhao 已提交
73
    def init_kernel_type(self):
74
        pass
Q
qijun 已提交
75

Q
qijun 已提交
76
    def test_check_output(self):
77
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
78 79
        if self.use_cudnn:
            place = core.CUDAPlace(0)
80 81
            self.check_output_with_place(
                place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
82
        else:
83
            self.check_output(check_dygraph=(self.use_mkldnn == False))
Q
qijun 已提交
84

Q
qijun 已提交
85
    def test_check_grad(self):
86
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
C
chengduo 已提交
87
        if self.use_cudnn or self.dtype == np.float16:
88
            place = core.CUDAPlace(0)
C
chengduo 已提交
89 90
            if core.is_float16_supported(place):
                self.check_grad_with_place(
91 92 93 94
                    place, ["X"],
                    "Out",
                    max_relative_error=0.01,
                    check_dygraph=(self.use_mkldnn == False))
95
        else:
96 97 98 99
            self.check_grad(["X"],
                            "Out",
                            max_relative_error=0.01,
                            check_dygraph=(self.use_mkldnn == False))
100 101


F
fengjiayi 已提交
102
class TestSoftmaxOp2(TestSoftmaxOp):
103

F
fengjiayi 已提交
104 105 106 107
    def get_x_shape(self):
        return [2, 3, 4, 5]


D
dengkaipeng 已提交
108
class TestSoftmaxOp3(TestSoftmaxOp):
109

D
dengkaipeng 已提交
110 111 112 113 114 115 116 117
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 0


class TestSoftmaxOp4(TestSoftmaxOp):
118

D
dengkaipeng 已提交
119 120 121 122 123 124 125 126
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 1


class TestSoftmaxOp5(TestSoftmaxOp):
127

D
dengkaipeng 已提交
128 129 130 131 132 133 134
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 2


135
class TestSoftmaxOp6(TestSoftmaxOp):
136

D
dengkaipeng 已提交
137 138 139 140 141 142 143
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 3


144 145
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
146
class TestSoftmaxCUDNNOp(TestSoftmaxOp):
147

K
Kexin Zhao 已提交
148 149 150 151
    def init_kernel_type(self):
        self.use_cudnn = True


F
fengjiayi 已提交
152 153 154
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
155

F
fengjiayi 已提交
156 157 158 159
    def get_x_shape(self):
        return [2, 3, 4, 5]


G
GaoWei8 已提交
160 161 162
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp3(TestSoftmaxCUDNNOp):
163

G
GaoWei8 已提交
164 165 166 167 168 169 170 171 172 173
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 0


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp4(TestSoftmaxCUDNNOp):
174

G
GaoWei8 已提交
175 176 177 178 179 180 181
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 1


D
dengkaipeng 已提交
182 183
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
D
dengkaipeng 已提交
184
class TestSoftmaxCUDNNOp5(TestSoftmaxCUDNNOp):
185

D
dengkaipeng 已提交
186 187 188
    def get_x_shape(self):
        return [2, 3, 4, 5]

G
GaoWei8 已提交
189 190 191 192 193 194 195
    def get_axis(self):
        return 2


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp6(TestSoftmaxCUDNNOp):
196

G
GaoWei8 已提交
197 198 199
    def get_x_shape(self):
        return [2, 3, 4, 5]

D
dengkaipeng 已提交
200
    def get_axis(self):
201
        return 3
D
dengkaipeng 已提交
202 203


G
GaoWei8 已提交
204 205 206
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp7(TestSoftmaxCUDNNOp):
207

G
GaoWei8 已提交
208 209 210 211 212 213 214
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp8(TestSoftmaxCUDNNOp):
215

G
GaoWei8 已提交
216 217 218 219 220 221 222 223 224 225
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 0


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp9(TestSoftmaxCUDNNOp):
226

G
GaoWei8 已提交
227 228 229 230 231 232 233 234 235 236
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 1


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp10(TestSoftmaxCUDNNOp):
237

G
GaoWei8 已提交
238 239 240 241 242 243 244 245 246 247
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 2


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp11(TestSoftmaxCUDNNOp):
248

G
GaoWei8 已提交
249 250 251 252 253 254 255 256 257 258
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 3


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp12(TestSoftmaxCUDNNOp):
259

G
GaoWei8 已提交
260 261 262 263 264 265 266
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 4


267 268
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
269
class TestSoftmaxFP16Op(TestSoftmaxOp):
270

271 272 273 274 275 276 277 278 279
    def init_kernel_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

C
chengduo 已提交
280 281 282 283
    # FIXME: If the x_shape is [10, 10], gradient failed.
    def test_check_grad(self):
        pass

284

Z
zhupengyang 已提交
285 286 287
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxFP16Op2(TestSoftmaxFP16Op):
288

F
fengjiayi 已提交
289
    def get_x_shape(self):
Z
zhupengyang 已提交
290
        return [2, 3, 4, 10]
291

F
fengjiayi 已提交
292

293 294
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
295
class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
296

K
Kexin Zhao 已提交
297
    def init_kernel_type(self):
298
        self.use_cudnn = True
K
Kexin Zhao 已提交
299 300 301 302 303 304 305
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)
Q
Qiao Longfei 已提交
306 307


F
fengjiayi 已提交
308 309 310
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
311

F
fengjiayi 已提交
312 313 314 315
    def get_x_shape(self):
        return [2, 3, 4, 5]


316 317 318
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxBF16Op(OpTest):
319

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
    def setUp(self):
        self.op_type = "softmax"
        self.use_cudnn = self.init_cudnn()
        self.use_mkldnn = False
        self.dtype = np.uint16
        self.shape = [10, 10]
        self.axis = -1

        np.random.seed(0)
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
        out = np.apply_along_axis(stable_softmax, self.axis, x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}
        self.attrs = {
            'axis': self.axis,
            'use_cudnn': self.use_cudnn,
            'use_mkldnn': self.use_mkldnn
        }

    def init_cudnn(self):
        return False

    def test_check_output(self):
        place = core.CUDAPlace(0)
347 348
        self.check_output_with_place(place,
                                     check_dygraph=(self.use_mkldnn == False))
349 350 351

    def test_check_grad(self):
        place = core.CUDAPlace(0)
352 353 354 355
        self.check_grad_with_place(place, ["X"],
                                   "Out",
                                   numeric_grad_delta=0.05,
                                   check_dygraph=(self.use_mkldnn == False))
356 357 358


@unittest.skipIf(
359 360 361 362
    not core.is_compiled_with_cuda() or core.cudnn_version() < 8100
    or paddle.device.cuda.get_device_capability()[0] < 8,
    "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0"
)
363
class TestSoftmaxBF16CUDNNOp(TestSoftmaxBF16Op):
364

365 366 367 368
    def init_cudnn(self):
        return True


369
class TestSoftmaxAPI(unittest.TestCase):
370

371
    def setUp(self):
372 373
        self.place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
374 375
        self.x_np = np.random.uniform(-1., 1., [2, 3, 4, 5]).astype('float32')
        self.out_ref = np.apply_along_axis(stable_softmax, -1, self.x_np)
376 377 378 379
        self.executed_api()

    def executed_api(self):
        self.softmax = F.softmax
380

381 382
    def test_static_check(self):
        with paddle.static.program_guard(paddle.static.Program()):
383
            x = paddle.fluid.data('X', self.x_np.shape, 'float32')
384
            out1 = self.softmax(x)
385 386
            m = paddle.nn.Softmax()
            out2 = m(x)
387
            exe = paddle.static.Executor(self.place)
388 389 390
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
        for r in res:
391
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
392

393
    def test_dygraph_check(self):
394
        paddle.disable_static(self.place)
395

396
        x = paddle.to_tensor(self.x_np)
397 398
        out1 = self.softmax(x)
        x = paddle.to_tensor(self.x_np)
399 400 401 402
        m = paddle.nn.Softmax()
        out2 = m(x)
        out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
        for r in [out1, out2]:
403
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
404

405 406
        out1 = self.softmax(x, axis=0)
        x = paddle.to_tensor(self.x_np)
407 408 409 410
        m = paddle.nn.Softmax(axis=0)
        out2 = m(x)
        out_ref = ref_softmax(self.x_np, axis=0, dtype=None)
        for r in [out1, out2]:
411
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
412

413 414 415 416 417 418 419
        # explicilty use float32 for ROCm, as MIOpen does not yet support float64
        if core.is_compiled_with_rocm():
            out = self.softmax(x, dtype=np.float32)
            out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float32)
        else:
            out = self.softmax(x, dtype=np.float64)
            out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float64)
420
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
421

422
        paddle.enable_static()
423 424

    def test_error(self):
425 426
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
427
            self.assertRaises(TypeError, self.softmax, 1)
428
            # The input dtype must be float16, float32, float64.
429 430 431
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[2, 3],
                                        dtype='int32')
432
            self.assertRaises(TypeError, self.softmax, x_int32)
433
            # support the input dtype is float16
434 435 436
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[2, 3],
                                       dtype='float16')
437 438 439 440
            self.softmax(x_fp16)


class TestSoftmaxInplaceAPI(TestSoftmaxAPI):
441

442 443
    def executed_api(self):
        self.softmax = F.softmax_
444 445


C
caoying03 已提交
446
if __name__ == "__main__":
Q
qijun 已提交
447
    unittest.main()