test_softmax_op.py 5.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
Q
qijun 已提交
19
from op_test import OpTest
20
import paddle.fluid.core as core
Q
qijun 已提交
21 22 23 24


def stable_softmax(x):
    """Compute the softmax of vector x in a numerically stable way."""
C
caoying03 已提交
25
    shiftx = x - np.max(x).clip(-64.)
Q
qijun 已提交
26 27 28 29
    exps = np.exp(shiftx)
    return exps / np.sum(exps)


Q
qijun 已提交
30
class TestSoftmaxOp(OpTest):
F
fengjiayi 已提交
31 32 33
    def get_x_shape(self):
        return [10, 10]

D
dengkaipeng 已提交
34 35 36
    def get_axis(self):
        return -1

Q
qijun 已提交
37
    def setUp(self):
Q
fix bug  
qijun 已提交
38
        self.op_type = "softmax"
39
        self.use_cudnn = False
K
Kexin Zhao 已提交
40
        self.use_mkldnn = False
K
Kexin Zhao 已提交
41 42
        self.dtype = np.float32
        self.init_kernel_type()
F
fengjiayi 已提交
43
        self.shape = self.get_x_shape()
D
dengkaipeng 已提交
44
        self.axis = self.get_axis()
F
fengjiayi 已提交
45 46

        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
D
dengkaipeng 已提交
47
        out = np.apply_along_axis(stable_softmax, self.axis, x)
K
Kexin Zhao 已提交
48 49 50

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
51
        self.attrs = {
D
dengkaipeng 已提交
52
            'axis': self.axis,
53
            'use_cudnn': self.use_cudnn,
54
            'use_mkldnn': self.use_mkldnn
55
        }
56

K
Kexin Zhao 已提交
57
    def init_kernel_type(self):
58
        pass
Q
qijun 已提交
59

Q
qijun 已提交
60
    def test_check_output(self):
61 62 63 64 65
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)
        else:
            self.check_output()
Q
qijun 已提交
66

Q
qijun 已提交
67
    def test_check_grad(self):
C
chengduo 已提交
68
        if self.use_cudnn or self.dtype == np.float16:
69
            place = core.CUDAPlace(0)
C
chengduo 已提交
70 71 72
            if core.is_float16_supported(place):
                self.check_grad_with_place(
                    place, ["X"], "Out", max_relative_error=0.01)
73 74 75 76
        else:
            self.check_grad(["X"], "Out", max_relative_error=0.01)


F
fengjiayi 已提交
77 78 79 80 81
class TestSoftmaxOp2(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


D
dengkaipeng 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
class TestSoftmaxOp3(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 0


class TestSoftmaxOp4(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 1


class TestSoftmaxOp5(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 2


class TestSoftmaxOp5(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 3


114 115
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
116
class TestSoftmaxCUDNNOp(TestSoftmaxOp):
K
Kexin Zhao 已提交
117 118 119 120
    def init_kernel_type(self):
        self.use_cudnn = True


F
fengjiayi 已提交
121 122 123 124 125 126 127
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


D
dengkaipeng 已提交
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp3(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 1


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 2


148 149
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
150 151 152 153 154 155 156 157 158 159
class TestSoftmaxFP16Op(TestSoftmaxOp):
    def init_kernel_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

C
chengduo 已提交
160 161 162 163
    # FIXME: If the x_shape is [10, 10], gradient failed.
    def test_check_grad(self):
        pass

164

F
fengjiayi 已提交
165 166
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
chengduo 已提交
167 168 169 170 171 172 173 174 175 176
class TestSoftmaxFP16Op2(TestSoftmaxOp):
    def init_kernel_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

F
fengjiayi 已提交
177 178 179 180
    def get_x_shape(self):
        return [2, 3, 4, 5]


181 182
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
183 184
class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
    def init_kernel_type(self):
185
        self.use_cudnn = True
K
Kexin Zhao 已提交
186 187 188 189 190 191 192
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)
Q
Qiao Longfei 已提交
193 194


F
fengjiayi 已提交
195 196 197 198 199 200 201
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


C
caoying03 已提交
202
if __name__ == "__main__":
Q
qijun 已提交
203
    unittest.main()