test_logsumexp.py 7.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#  Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
import unittest
import numpy as np
X
xiaohemaikoo 已提交
18
import paddle.fluid.core as core
19 20 21
from op_test import OpTest


22 23 24 25 26 27 28 29 30 31 32
def ref_logsumexp(x, axis=None, keepdim=False, reduce_all=False):
    if isinstance(axis, int):
        axis = (axis, )
    elif isinstance(axis, list):
        axis = tuple(axis)
    if reduce_all:
        axis = None
    out = np.log(np.exp(x).sum(axis=axis, keepdims=keepdim))
    return out


33 34 35 36 37 38
def logsumexp_wrapper(x, axis=None, keepdim=False, allreduce=False):
    if allreduce:
        return paddle.logsumexp(x, None, keepdim)
    return paddle.logsumexp(x, axis, keepdim)


X
xiaohemaikoo 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
def logsumexp_op_grad(x, axis=None, keepdim=False, reduce_all=False):
    paddle.disable_static()
    tensor_x = paddle.to_tensor(x)
    tensor_x.stop_gradient = False
    out = logsumexp_wrapper(tensor_x, axis, keepdim, reduce_all)
    grad = paddle.grad(out, [tensor_x])
    x_grad = grad[0].numpy()
    paddle.enable_static()
    return x_grad


def logsumexp_ref_grad(x):
    sum = np.exp(x).sum()
    return np.exp(x) / sum


55
class TestLogsumexp(OpTest):
56

57 58
    def setUp(self):
        self.op_type = 'logsumexp'
59
        self.python_api = logsumexp_wrapper
60 61 62 63 64 65 66 67 68 69 70 71 72 73
        self.shape = [2, 3, 4, 5]
        self.dtype = 'float64'
        self.axis = [-1]
        self.keepdim = False
        self.reduce_all = False
        self.set_attrs()

        np.random.seed(10)
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        out = ref_logsumexp(x, self.axis, self.keepdim, self.reduce_all)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}
        self.attrs = {
74 75
            'axis': self.axis,
            'keepdim': self.keepdim,
76 77
            'reduce_all': self.reduce_all
        }
78 79 80
        self.user_defined_grads = None
        self.user_defined_grad_outputs = None
        self.set_attrs_addition()
81 82 83 84

    def set_attrs(self):
        pass

85 86 87
    def set_attrs_addition(self):
        pass

88
    def test_check_output(self):
89
        self.check_output(check_eager=True)
90 91

    def test_check_grad(self):
92 93 94
        self.check_grad(
            ['X'], ['Out'],
            user_defined_grads=self.user_defined_grads,
95 96
            user_defined_grad_outputs=self.user_defined_grad_outputs,
            check_eager=True)
97 98 99 100 101 102

    def calc_grad(self):
        dy = np.ones(1, dtype=self.dtype)
        x = self.inputs['X']
        y = self.outputs['Out']
        return dy * np.exp(x - y)
103 104 105


class TestLogsumexp_shape(TestLogsumexp):
106

107 108 109 110 111
    def set_attrs(self):
        self.shape = [4, 5, 6]


class TestLogsumexp_axis(TestLogsumexp):
112

113 114 115 116 117
    def set_attrs(self):
        self.axis = [0, -1]


class TestLogsumexp_axis_all(TestLogsumexp):
118

119 120 121
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]

122 123 124 125 126
    def set_attrs_addition(self):
        if paddle.fluid.core.is_compiled_with_rocm():
            self.user_defined_grads = [self.calc_grad()]
            self.user_defined_grad_outputs = [np.ones(1, dtype=self.dtype)]

127 128

class TestLogsumexp_keepdim(TestLogsumexp):
129

130 131 132 133 134
    def set_attrs(self):
        self.keepdim = True


class TestLogsumexp_reduce_all(TestLogsumexp):
135

136 137 138
    def set_attrs(self):
        self.reduce_all = True

139 140 141 142 143
    def set_attrs_addition(self):
        if paddle.fluid.core.is_compiled_with_rocm():
            self.user_defined_grads = [self.calc_grad()]
            self.user_defined_grad_outputs = [np.ones(1, dtype=self.dtype)]

144

X
xiaohemaikoo 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
class TestLogsumexp_FP32(TestLogsumexp):

    def set_attrs(self):
        self.dtype = 'float32'

    def test_check_grad(self):
        self.__class__.dtype = self.dtype
        x_grad = logsumexp_op_grad(self.inputs['X'])
        ref_x_grad = logsumexp_ref_grad(self.inputs['X'])
        np.testing.assert_allclose(x_grad, ref_x_grad, rtol=1e-08, atol=1e-08)


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestLogsumexp_FP16(TestLogsumexp):

    def set_attrs(self):
        self.dtype = 'float16'

    def test_check_output(self):
        ref_x = self.inputs['X'].astype(np.float32)
        out_ref = ref_logsumexp(ref_x)
        paddle.disable_static()
        x = self.inputs['X'].astype(np.float16)
        tensor_x = paddle.to_tensor(x)
        out_pad = logsumexp_wrapper(tensor_x)
        paddle.enable_static()
        np.testing.assert_allclose(out_pad.numpy(),
                                   out_ref,
                                   rtol=1e-03,
                                   atol=1e-08)

    def test_check_grad(self):
        self.__class__.dtype = self.dtype
        ref_x = self.inputs['X'].astype(np.float32)
        ref_x_grad = logsumexp_ref_grad(ref_x)
        x = self.inputs['X'].astype(np.float16)
        x_grad = logsumexp_op_grad(x)
        np.testing.assert_allclose(x_grad, ref_x_grad, rtol=1e-03, atol=1e-05)


186
class TestLogsumexpError(unittest.TestCase):
187

188
    def test_errors(self):
189 190
        with paddle.static.program_guard(paddle.static.Program()):
            self.assertRaises(TypeError, paddle.logsumexp, 1)
191
            x1 = paddle.fluid.data(name='x1', shape=[120], dtype="int32")
192 193 194 195
            self.assertRaises(TypeError, paddle.logsumexp, x1)


class TestLogsumexpAPI(unittest.TestCase):
196

197 198 199 200 201 202 203 204 205
    def setUp(self):
        self.shape = [2, 3, 4, 5]
        self.x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
        self.place = paddle.CUDAPlace(0) if paddle.fluid.core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def api_case(self, axis=None, keepdim=False):
        out_ref = ref_logsumexp(self.x, axis, keepdim)
        with paddle.static.program_guard(paddle.static.Program()):
206
            x = paddle.fluid.data('X', self.shape)
207 208 209
            out = paddle.logsumexp(x, axis, keepdim)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x}, fetch_list=[out])
210
        np.testing.assert_allclose(res[0], out_ref, rtol=1e-05)
211 212

        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
213
        x = paddle.to_tensor(self.x)
214
        out = paddle.logsumexp(x, axis, keepdim)
215
        np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-05)
216 217 218 219 220 221 222 223 224 225 226 227
        paddle.enable_static()

    def test_api(self):
        self.api_case()
        self.api_case(2)
        self.api_case([-1])
        self.api_case([2, -3])
        self.api_case((0, 1, -1))
        self.api_case(keepdim=True)

    def test_alias(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
228
        x = paddle.to_tensor(self.x)
229 230 231 232 233
        out1 = paddle.logsumexp(x)
        out2 = paddle.tensor.logsumexp(x)
        out3 = paddle.tensor.math.logsumexp(x)
        out_ref = ref_logsumexp(self.x)
        for out in [out1, out2, out3]:
234
            np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-05)
235
        paddle.enable_static()
236 237 238 239


if __name__ == '__main__':
    unittest.main()