test_softmax_op.py 4.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
Q
qijun 已提交
19
from op_test import OpTest
20
import paddle.fluid.core as core
Q
qijun 已提交
21 22 23 24


def stable_softmax(x):
    """Compute the softmax of vector x in a numerically stable way."""
C
caoying03 已提交
25
    shiftx = x - np.max(x).clip(-64.)
Q
qijun 已提交
26 27 28 29
    exps = np.exp(shiftx)
    return exps / np.sum(exps)


Q
qijun 已提交
30
class TestSoftmaxOp(OpTest):
F
fengjiayi 已提交
31 32 33
    def get_x_shape(self):
        return [10, 10]

Q
qijun 已提交
34
    def setUp(self):
Q
fix bug  
qijun 已提交
35
        self.op_type = "softmax"
36
        self.use_cudnn = False
K
Kexin Zhao 已提交
37
        self.use_mkldnn = False
K
Kexin Zhao 已提交
38 39
        self.dtype = np.float32
        self.init_kernel_type()
F
fengjiayi 已提交
40 41 42 43 44 45
        self.shape = self.get_x_shape()

        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.apply_along_axis(stable_softmax, 1,
                                  x.reshape([-1, self.shape[-1]]))
        out = out.reshape(self.shape)
K
Kexin Zhao 已提交
46 47 48

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
49 50 51 52
        self.attrs = {
            'use_cudnn': self.use_cudnn,
            'use_mkldnn': self.use_mkldnn
        }
53

K
Kexin Zhao 已提交
54
    def init_kernel_type(self):
55
        pass
Q
qijun 已提交
56

Q
qijun 已提交
57
    def test_check_output(self):
58 59 60 61 62
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)
        else:
            self.check_output()
Q
qijun 已提交
63

Q
qijun 已提交
64
    def test_check_grad(self):
K
Kexin Zhao 已提交
65 66
        if self.dtype == np.float16:
            return
67 68 69 70 71 72 73 74
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place, ["X"], "Out", max_relative_error=0.01)
        else:
            self.check_grad(["X"], "Out", max_relative_error=0.01)


F
fengjiayi 已提交
75 76 77 78 79
class TestSoftmaxOp2(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


80 81
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
82
class TestSoftmaxCUDNNOp(TestSoftmaxOp):
K
Kexin Zhao 已提交
83 84 85 86
    def init_kernel_type(self):
        self.use_cudnn = True


F
fengjiayi 已提交
87 88 89 90 91 92 93
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


94 95
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
96 97 98 99 100 101 102 103 104 105 106
class TestSoftmaxFP16Op(TestSoftmaxOp):
    def init_kernel_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)


F
fengjiayi 已提交
107 108 109 110 111 112 113
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxFP16Op2(TestSoftmaxFP16Op):
    def get_x_shape(self):
        return [2, 3, 4, 5]


114 115
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
116 117
class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
    def init_kernel_type(self):
118
        self.use_cudnn = True
K
Kexin Zhao 已提交
119 120 121 122 123 124 125
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)
Q
Qiao Longfei 已提交
126 127


F
fengjiayi 已提交
128 129 130 131 132 133 134
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


K
Kexin Zhao 已提交
135 136
class TestSoftmaxMKLDNNOp(TestSoftmaxOp):
    def init_kernel_type(self):
137 138 139
        self.use_mkldnn = True


F
fengjiayi 已提交
140 141 142 143 144
class TestSoftmaxMKLDNNOp2(TestSoftmaxMKLDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


C
caoying03 已提交
145
if __name__ == "__main__":
Q
qijun 已提交
146
    unittest.main()