test_softmax_op.py 6.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
Q
qijun 已提交
19
from op_test import OpTest
20
import paddle.fluid.core as core
21 22
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
23 24 25 26


def stable_softmax(x):
    """Compute the softmax of vector x in a numerically stable way."""
27 28 29
    # clip to shiftx, otherwise, when calc loss with
    # log(exp(shiftx)), may get log(0)=INF
    shiftx = (x - np.max(x)).clip(-64.)
Q
qijun 已提交
30 31 32 33
    exps = np.exp(shiftx)
    return exps / np.sum(exps)


Q
qijun 已提交
34
class TestSoftmaxOp(OpTest):
F
fengjiayi 已提交
35 36 37
    def get_x_shape(self):
        return [10, 10]

D
dengkaipeng 已提交
38 39 40
    def get_axis(self):
        return -1

Q
qijun 已提交
41
    def setUp(self):
Q
fix bug  
qijun 已提交
42
        self.op_type = "softmax"
43
        self.use_cudnn = False
K
Kexin Zhao 已提交
44
        self.use_mkldnn = False
K
Kexin Zhao 已提交
45 46
        self.dtype = np.float32
        self.init_kernel_type()
F
fengjiayi 已提交
47
        self.shape = self.get_x_shape()
D
dengkaipeng 已提交
48
        self.axis = self.get_axis()
F
fengjiayi 已提交
49 50

        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
D
dengkaipeng 已提交
51
        out = np.apply_along_axis(stable_softmax, self.axis, x)
K
Kexin Zhao 已提交
52 53 54

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
55
        self.attrs = {
D
dengkaipeng 已提交
56
            'axis': self.axis,
57
            'use_cudnn': self.use_cudnn,
58
            'use_mkldnn': self.use_mkldnn
59
        }
60

K
Kexin Zhao 已提交
61
    def init_kernel_type(self):
62
        pass
Q
qijun 已提交
63

Q
qijun 已提交
64
    def test_check_output(self):
65
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
66 67
        if self.use_cudnn:
            place = core.CUDAPlace(0)
68 69
            self.check_output_with_place(
                place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
70
        else:
71
            self.check_output(check_dygraph=(self.use_mkldnn == False))
Q
qijun 已提交
72

Q
qijun 已提交
73
    def test_check_grad(self):
74
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
C
chengduo 已提交
75
        if self.use_cudnn or self.dtype == np.float16:
76
            place = core.CUDAPlace(0)
C
chengduo 已提交
77 78
            if core.is_float16_supported(place):
                self.check_grad_with_place(
79 80 81 82
                    place, ["X"],
                    "Out",
                    max_relative_error=0.01,
                    check_dygraph=(self.use_mkldnn == False))
83
        else:
84 85 86 87 88
            self.check_grad(
                ["X"],
                "Out",
                max_relative_error=0.01,
                check_dygraph=(self.use_mkldnn == False))
89 90


91 92 93 94 95 96 97
class TestSoftmaxOpError(OpTest):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of softmax_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.softmax, x1)
98
            # The input dtype of softmax_op must be float16, float32 or float64.
99 100
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.softmax, x2)
101 102
            x3 = fluid.layers.data(name='x3', shape=[4], dtype="float16")
            fluid.layers.softmax(x3)
103 104


F
fengjiayi 已提交
105 106 107 108 109
class TestSoftmaxOp2(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


D
dengkaipeng 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
class TestSoftmaxOp3(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 0


class TestSoftmaxOp4(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 1


class TestSoftmaxOp5(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 2


134
class TestSoftmaxOp6(TestSoftmaxOp):
D
dengkaipeng 已提交
135 136 137 138 139 140 141
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 3


142 143
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
144
class TestSoftmaxCUDNNOp(TestSoftmaxOp):
K
Kexin Zhao 已提交
145 146 147 148
    def init_kernel_type(self):
        self.use_cudnn = True


F
fengjiayi 已提交
149 150 151 152 153 154 155
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


D
dengkaipeng 已提交
156 157
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
D
dengkaipeng 已提交
158
class TestSoftmaxCUDNNOp5(TestSoftmaxCUDNNOp):
D
dengkaipeng 已提交
159 160 161 162
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
163
        return 3
D
dengkaipeng 已提交
164 165


166 167
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
168 169 170 171 172 173 174 175 176 177
class TestSoftmaxFP16Op(TestSoftmaxOp):
    def init_kernel_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

C
chengduo 已提交
178 179 180 181
    # FIXME: If the x_shape is [10, 10], gradient failed.
    def test_check_grad(self):
        pass

182

F
fengjiayi 已提交
183 184
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
chengduo 已提交
185 186 187 188 189 190 191 192 193 194
class TestSoftmaxFP16Op2(TestSoftmaxOp):
    def init_kernel_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

F
fengjiayi 已提交
195 196 197 198
    def get_x_shape(self):
        return [2, 3, 4, 5]


199 200
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
201 202
class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
    def init_kernel_type(self):
203
        self.use_cudnn = True
K
Kexin Zhao 已提交
204 205 206 207 208 209 210
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)
Q
Qiao Longfei 已提交
211 212


F
fengjiayi 已提交
213 214 215 216 217 218 219
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


C
caoying03 已提交
220
if __name__ == "__main__":
Q
qijun 已提交
221
    unittest.main()