test_softmax_op.py 12.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
19
from op_test import OpTest, convert_float_to_uint16
20
import paddle.fluid.core as core
21 22
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
23
import paddle
24
import paddle.nn.functional as F
25 26

np.random.seed(10)
Q
qijun 已提交
27 28 29 30


def stable_softmax(x):
    """Compute the softmax of vector x in a numerically stable way."""
31 32 33
    # clip to shiftx, otherwise, when calc loss with
    # log(exp(shiftx)), may get log(0)=INF
    shiftx = (x - np.max(x)).clip(-64.)
Q
qijun 已提交
34 35 36 37
    exps = np.exp(shiftx)
    return exps / np.sum(exps)


38 39 40 41 42 43 44 45 46
def ref_softmax(x, axis=None, dtype=None):
    x_t = x.copy()
    if dtype is not None:
        x_t = x_t.astype(dtype)
    if axis is None:
        axis = -1
    return np.apply_along_axis(stable_softmax, axis, x_t)


Q
qijun 已提交
47
class TestSoftmaxOp(OpTest):
48

F
fengjiayi 已提交
49 50 51
    def get_x_shape(self):
        return [10, 10]

D
dengkaipeng 已提交
52 53 54
    def get_axis(self):
        return -1

Q
qijun 已提交
55
    def setUp(self):
Q
fix bug  
qijun 已提交
56
        self.op_type = "softmax"
57
        self.use_cudnn = False
K
Kexin Zhao 已提交
58
        self.use_mkldnn = False
59 60
        # explicilty use float32 for ROCm, as MIOpen does not yet support float64
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
K
Kexin Zhao 已提交
61
        self.init_kernel_type()
F
fengjiayi 已提交
62
        self.shape = self.get_x_shape()
D
dengkaipeng 已提交
63
        self.axis = self.get_axis()
F
fengjiayi 已提交
64

65
        np.random.seed(0)
F
fengjiayi 已提交
66
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
D
dengkaipeng 已提交
67
        out = np.apply_along_axis(stable_softmax, self.axis, x)
K
Kexin Zhao 已提交
68 69 70

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
71
        self.attrs = {
D
dengkaipeng 已提交
72
            'axis': self.axis,
73
            'use_cudnn': self.use_cudnn,
74
            'use_mkldnn': self.use_mkldnn
75
        }
76

K
Kexin Zhao 已提交
77
    def init_kernel_type(self):
78
        pass
Q
qijun 已提交
79

Q
qijun 已提交
80
    def test_check_output(self):
81
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
82 83
        if self.use_cudnn:
            place = core.CUDAPlace(0)
84 85
            self.check_output_with_place(
                place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
86
        else:
87
            self.check_output(check_dygraph=(self.use_mkldnn == False))
Q
qijun 已提交
88

Q
qijun 已提交
89
    def test_check_grad(self):
90
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
C
chengduo 已提交
91
        if self.use_cudnn or self.dtype == np.float16:
92
            place = core.CUDAPlace(0)
C
chengduo 已提交
93 94
            if core.is_float16_supported(place):
                self.check_grad_with_place(
95 96 97 98
                    place, ["X"],
                    "Out",
                    max_relative_error=0.01,
                    check_dygraph=(self.use_mkldnn == False))
99
        else:
100 101 102 103
            self.check_grad(["X"],
                            "Out",
                            max_relative_error=0.01,
                            check_dygraph=(self.use_mkldnn == False))
104 105


F
fengjiayi 已提交
106
class TestSoftmaxOp2(TestSoftmaxOp):
107

F
fengjiayi 已提交
108 109 110 111
    def get_x_shape(self):
        return [2, 3, 4, 5]


D
dengkaipeng 已提交
112
class TestSoftmaxOp3(TestSoftmaxOp):
113

D
dengkaipeng 已提交
114 115 116 117 118 119 120 121
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 0


class TestSoftmaxOp4(TestSoftmaxOp):
122

D
dengkaipeng 已提交
123 124 125 126 127 128 129 130
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 1


class TestSoftmaxOp5(TestSoftmaxOp):
131

D
dengkaipeng 已提交
132 133 134 135 136 137 138
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 2


139
class TestSoftmaxOp6(TestSoftmaxOp):
140

D
dengkaipeng 已提交
141 142 143 144 145 146 147
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 3


148 149
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
150
class TestSoftmaxCUDNNOp(TestSoftmaxOp):
151

K
Kexin Zhao 已提交
152 153 154 155
    def init_kernel_type(self):
        self.use_cudnn = True


F
fengjiayi 已提交
156 157 158
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
159

F
fengjiayi 已提交
160 161 162 163
    def get_x_shape(self):
        return [2, 3, 4, 5]


G
GaoWei8 已提交
164 165 166
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp3(TestSoftmaxCUDNNOp):
167

G
GaoWei8 已提交
168 169 170 171 172 173 174 175 176 177
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 0


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp4(TestSoftmaxCUDNNOp):
178

G
GaoWei8 已提交
179 180 181 182 183 184 185
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 1


D
dengkaipeng 已提交
186 187
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
D
dengkaipeng 已提交
188
class TestSoftmaxCUDNNOp5(TestSoftmaxCUDNNOp):
189

D
dengkaipeng 已提交
190 191 192
    def get_x_shape(self):
        return [2, 3, 4, 5]

G
GaoWei8 已提交
193 194 195 196 197 198 199
    def get_axis(self):
        return 2


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp6(TestSoftmaxCUDNNOp):
200

G
GaoWei8 已提交
201 202 203
    def get_x_shape(self):
        return [2, 3, 4, 5]

D
dengkaipeng 已提交
204
    def get_axis(self):
205
        return 3
D
dengkaipeng 已提交
206 207


G
GaoWei8 已提交
208 209 210
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp7(TestSoftmaxCUDNNOp):
211

G
GaoWei8 已提交
212 213 214 215 216 217 218
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp8(TestSoftmaxCUDNNOp):
219

G
GaoWei8 已提交
220 221 222 223 224 225 226 227 228 229
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 0


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp9(TestSoftmaxCUDNNOp):
230

G
GaoWei8 已提交
231 232 233 234 235 236 237 238 239 240
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 1


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp10(TestSoftmaxCUDNNOp):
241

G
GaoWei8 已提交
242 243 244 245 246 247 248 249 250 251
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 2


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp11(TestSoftmaxCUDNNOp):
252

G
GaoWei8 已提交
253 254 255 256 257 258 259 260 261 262
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 3


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp12(TestSoftmaxCUDNNOp):
263

G
GaoWei8 已提交
264 265 266 267 268 269 270
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 4


271 272
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
273
class TestSoftmaxFP16Op(TestSoftmaxOp):
274

275 276 277 278 279 280 281 282 283
    def init_kernel_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

C
chengduo 已提交
284 285 286 287
    # FIXME: If the x_shape is [10, 10], gradient failed.
    def test_check_grad(self):
        pass

288

Z
zhupengyang 已提交
289 290 291
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxFP16Op2(TestSoftmaxFP16Op):
292

F
fengjiayi 已提交
293
    def get_x_shape(self):
Z
zhupengyang 已提交
294
        return [2, 3, 4, 10]
295

F
fengjiayi 已提交
296

297 298
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
299
class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
300

K
Kexin Zhao 已提交
301
    def init_kernel_type(self):
302
        self.use_cudnn = True
K
Kexin Zhao 已提交
303 304 305 306 307 308 309
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)
Q
Qiao Longfei 已提交
310 311


F
fengjiayi 已提交
312 313 314
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
315

F
fengjiayi 已提交
316 317 318 319
    def get_x_shape(self):
        return [2, 3, 4, 5]


320 321 322
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxBF16Op(OpTest):
323

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
    def setUp(self):
        self.op_type = "softmax"
        self.use_cudnn = self.init_cudnn()
        self.use_mkldnn = False
        self.dtype = np.uint16
        self.shape = [10, 10]
        self.axis = -1

        np.random.seed(0)
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
        out = np.apply_along_axis(stable_softmax, self.axis, x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}
        self.attrs = {
            'axis': self.axis,
            'use_cudnn': self.use_cudnn,
            'use_mkldnn': self.use_mkldnn
        }

    def init_cudnn(self):
        return False

    def test_check_output(self):
        place = core.CUDAPlace(0)
351 352
        self.check_output_with_place(place,
                                     check_dygraph=(self.use_mkldnn == False))
353 354 355

    def test_check_grad(self):
        place = core.CUDAPlace(0)
356 357 358 359
        self.check_grad_with_place(place, ["X"],
                                   "Out",
                                   numeric_grad_delta=0.05,
                                   check_dygraph=(self.use_mkldnn == False))
360 361 362 363 364 365


@unittest.skipIf(
    not core.is_compiled_with_cuda() or core.cudnn_version() < 8100,
    "core is not compiled with CUDA and cudnn version need larger than 8.1.0")
class TestSoftmaxBF16CUDNNOp(TestSoftmaxBF16Op):
366

367 368 369 370
    def init_cudnn(self):
        return True


371
class TestSoftmaxAPI(unittest.TestCase):
372

373
    def setUp(self):
374 375
        self.place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
376 377
        self.x_np = np.random.uniform(-1., 1., [2, 3, 4, 5]).astype('float32')
        self.out_ref = np.apply_along_axis(stable_softmax, -1, self.x_np)
378 379 380 381
        self.executed_api()

    def executed_api(self):
        self.softmax = F.softmax
382

383 384
    def test_static_check(self):
        with paddle.static.program_guard(paddle.static.Program()):
385
            x = paddle.fluid.data('X', self.x_np.shape, 'float32')
386
            out1 = self.softmax(x)
387 388
            m = paddle.nn.Softmax()
            out2 = m(x)
389
            exe = paddle.static.Executor(self.place)
390 391 392
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
        for r in res:
393
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
394

395
    def test_dygraph_check(self):
396
        paddle.disable_static(self.place)
397

398
        x = paddle.to_tensor(self.x_np)
399 400
        out1 = self.softmax(x)
        x = paddle.to_tensor(self.x_np)
401 402 403 404
        m = paddle.nn.Softmax()
        out2 = m(x)
        out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
        for r in [out1, out2]:
405
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
406

407 408
        out1 = self.softmax(x, axis=0)
        x = paddle.to_tensor(self.x_np)
409 410 411 412
        m = paddle.nn.Softmax(axis=0)
        out2 = m(x)
        out_ref = ref_softmax(self.x_np, axis=0, dtype=None)
        for r in [out1, out2]:
413
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
414

415 416 417 418 419 420 421
        # explicilty use float32 for ROCm, as MIOpen does not yet support float64
        if core.is_compiled_with_rocm():
            out = self.softmax(x, dtype=np.float32)
            out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float32)
        else:
            out = self.softmax(x, dtype=np.float64)
            out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float64)
422
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
423

424
        paddle.enable_static()
425 426

    def test_error(self):
427 428
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
429
            self.assertRaises(TypeError, self.softmax, 1)
430
            # The input dtype must be float16, float32, float64.
431 432 433
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[2, 3],
                                        dtype='int32')
434
            self.assertRaises(TypeError, self.softmax, x_int32)
435
            # support the input dtype is float16
436 437 438
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[2, 3],
                                       dtype='float16')
439 440 441 442
            self.softmax(x_fp16)


class TestSoftmaxInplaceAPI(TestSoftmaxAPI):
443

444 445
    def executed_api(self):
        self.softmax = F.softmax_
446 447


C
caoying03 已提交
448
if __name__ == "__main__":
Q
qijun 已提交
449
    unittest.main()