test_softmax_op.py 4.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
Q
qijun 已提交
19
from op_test import OpTest
20
import paddle.fluid.core as core
Q
qijun 已提交
21 22 23 24


def stable_softmax(x):
    """Compute the softmax of vector x in a numerically stable way."""
C
caoying03 已提交
25
    shiftx = x - np.max(x).clip(-64.)
Q
qijun 已提交
26 27 28 29
    exps = np.exp(shiftx)
    return exps / np.sum(exps)


Q
qijun 已提交
30
class TestSoftmaxOp(OpTest):
F
fengjiayi 已提交
31 32 33
    def get_x_shape(self):
        return [10, 10]

Q
qijun 已提交
34
    def setUp(self):
Q
fix bug  
qijun 已提交
35
        self.op_type = "softmax"
36
        self.use_cudnn = False
K
Kexin Zhao 已提交
37
        self.use_mkldnn = False
K
Kexin Zhao 已提交
38 39
        self.dtype = np.float32
        self.init_kernel_type()
F
fengjiayi 已提交
40 41 42 43 44 45
        self.shape = self.get_x_shape()

        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.apply_along_axis(stable_softmax, 1,
                                  x.reshape([-1, self.shape[-1]]))
        out = out.reshape(self.shape)
K
Kexin Zhao 已提交
46 47 48

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
49 50
        self.attrs = {
            'use_cudnn': self.use_cudnn,
51
            'use_mkldnn': self.use_mkldnn
52
        }
53

K
Kexin Zhao 已提交
54
    def init_kernel_type(self):
55
        pass
Q
qijun 已提交
56

Q
qijun 已提交
57
    def test_check_output(self):
58 59 60 61 62
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)
        else:
            self.check_output()
Q
qijun 已提交
63

Q
qijun 已提交
64
    def test_check_grad(self):
C
chengduo 已提交
65
        if self.use_cudnn or self.dtype == np.float16:
66
            place = core.CUDAPlace(0)
C
chengduo 已提交
67 68 69
            if core.is_float16_supported(place):
                self.check_grad_with_place(
                    place, ["X"], "Out", max_relative_error=0.01)
70 71 72 73
        else:
            self.check_grad(["X"], "Out", max_relative_error=0.01)


F
fengjiayi 已提交
74 75 76 77 78
class TestSoftmaxOp2(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


79 80
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
81
class TestSoftmaxCUDNNOp(TestSoftmaxOp):
K
Kexin Zhao 已提交
82 83 84 85
    def init_kernel_type(self):
        self.use_cudnn = True


F
fengjiayi 已提交
86 87 88 89 90 91 92
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


93 94
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
95 96 97 98 99 100 101 102 103 104
class TestSoftmaxFP16Op(TestSoftmaxOp):
    def init_kernel_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

C
chengduo 已提交
105 106 107 108
    # FIXME: If the x_shape is [10, 10], gradient failed.
    def test_check_grad(self):
        pass

109

F
fengjiayi 已提交
110 111
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
chengduo 已提交
112 113 114 115 116 117 118 119 120 121
class TestSoftmaxFP16Op2(TestSoftmaxOp):
    def init_kernel_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

F
fengjiayi 已提交
122 123 124 125
    def get_x_shape(self):
        return [2, 3, 4, 5]


126 127
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
128 129
class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
    def init_kernel_type(self):
130
        self.use_cudnn = True
K
Kexin Zhao 已提交
131 132 133 134 135 136 137
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)
Q
Qiao Longfei 已提交
138 139


F
fengjiayi 已提交
140 141 142 143 144 145 146
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


C
caoying03 已提交
147
if __name__ == "__main__":
Q
qijun 已提交
148
    unittest.main()