test_max_min_amax_amin_op.py 6.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from op_test import OpTest

paddle.enable_static()


T
Tao Luo 已提交
26
class TestMaxMinAmaxAminAPI(unittest.TestCase):
27

28 29 30
    def setUp(self):
        self.init_case()
        self.cal_np_out_and_gradient()
31 32
        self.place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
33 34 35 36 37

    def init_case(self):
        self.x_np = np.array([[0.2, 0.3, 0.5, 0.9], [0.1, 0.2, 0.6, 0.7]])
        self.shape = [2, 4]
        self.dtype = 'float64'
T
Tao Luo 已提交
38
        self.axis = 0
39 40
        self.keepdim = False

T
Tao Luo 已提交
41
    # If there are multiple minimum or maximum elements, max/min/amax/amin is non-derivable,
42
    # its gradient check is not supported by unittest framework,
43 44
    # thus we calculate the gradient by numpy function.
    def cal_np_out_and_gradient(self):
45

46
        def _cal_np_out_and_gradient(func):
47
            if func == 'amax':
T
Tao Luo 已提交
48
                out = np.amax(self.x_np, axis=self.axis, keepdims=self.keepdim)
49
            elif func == 'amin':
T
Tao Luo 已提交
50
                out = np.amin(self.x_np, axis=self.axis, keepdims=self.keepdim)
51
            elif func == 'max':
52
                out = np.max(self.x_np, axis=self.axis, keepdims=self.keepdim)
53
            elif func == 'min':
54 55
                out = np.min(self.x_np, axis=self.axis, keepdims=self.keepdim)
            else:
T
Tao Luo 已提交
56 57
                print('This unittest only test amax/amin/max/min, but now is',
                      func)
58 59
            self.np_out[func] = out
            grad = np.zeros(self.shape)
T
Tao Luo 已提交
60
            out_b = np.broadcast_to(out.view(), self.shape)
61
            grad[self.x_np == out_b] = 1
T
Tao Luo 已提交
62 63 64 65 66
            if func in ['amax', 'amin']:
                grad_sum = grad.sum(self.axis).reshape(out.shape)
                grad_b = np.broadcast_to(grad_sum, self.shape)
                grad /= grad_sum

67 68 69 70
            self.np_grad[func] = grad

        self.np_out = dict()
        self.np_grad = dict()
T
Tao Luo 已提交
71 72
        _cal_np_out_and_gradient('amax')
        _cal_np_out_and_gradient('amin')
73 74 75 76
        _cal_np_out_and_gradient('max')
        _cal_np_out_and_gradient('min')

    def _choose_paddle_func(self, func, x):
77
        if func == 'amax':
T
Tao Luo 已提交
78
            out = paddle.amax(x, self.axis, self.keepdim)
79
        elif func == 'amin':
T
Tao Luo 已提交
80
            out = paddle.amin(x, self.axis, self.keepdim)
81
        elif func == 'max':
82
            out = paddle.max(x, self.axis, self.keepdim)
83
        elif func == 'min':
84 85
            out = paddle.min(x, self.axis, self.keepdim)
        else:
T
Tao Luo 已提交
86
            print('This unittest only test amax/amin/max/min, but now is', func)
87 88 89 90
        return out

    # We check the output between paddle API and numpy in static graph.
    def test_static_graph(self):
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105
        def _test_static_graph(func):
            startup_program = fluid.Program()
            train_program = fluid.Program()
            with fluid.program_guard(startup_program, train_program):
                x = fluid.data(name='input', dtype=self.dtype, shape=self.shape)
                x.stop_gradient = False
                out = self._choose_paddle_func(func, x)

                exe = fluid.Executor(self.place)
                res = exe.run(fluid.default_main_program(),
                              feed={'input': self.x_np},
                              fetch_list=[out])
                self.assertTrue((np.array(res[0]) == self.np_out[func]).all())

T
Tao Luo 已提交
106 107
        _test_static_graph('amax')
        _test_static_graph('amin')
108 109 110
        _test_static_graph('max')
        _test_static_graph('min')

111
    # As dygraph is easy to compute gradient, we check the gradient between
112 113
    # paddle API and numpy in dygraph.
    def test_dygraph(self):
114

115 116
        def _test_dygraph(func):
            paddle.disable_static()
117 118 119
            x = paddle.to_tensor(self.x_np,
                                 dtype=self.dtype,
                                 stop_gradient=False)
120 121 122 123
            out = self._choose_paddle_func(func, x)
            grad_tensor = paddle.ones_like(x)
            paddle.autograd.backward([out], [grad_tensor], True)

124 125 126 127
            np.testing.assert_allclose(self.np_out[func],
                                       out.numpy(),
                                       rtol=1e-05)
            np.testing.assert_allclose(self.np_grad[func], x.grad, rtol=1e-05)
128 129
            paddle.enable_static()

T
Tao Luo 已提交
130 131
        _test_dygraph('amax')
        _test_dygraph('amin')
132 133 134 135
        _test_dygraph('max')
        _test_dygraph('min')


T
Tao Luo 已提交
136 137
    # test two minimum or maximum elements
class TestMaxMinAmaxAminAPI2(TestMaxMinAmaxAminAPI):
138

139 140 141 142 143 144 145 146 147
    def init_case(self):
        self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]])
        self.shape = [2, 4]
        self.dtype = 'float64'
        self.axis = None
        self.keepdim = False


# test different axis
T
Tao Luo 已提交
148
class TestMaxMinAmaxAminAPI3(TestMaxMinAmaxAminAPI):
149

150 151 152 153 154 155 156 157 158
    def init_case(self):
        self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]])
        self.shape = [2, 4]
        self.dtype = 'float64'
        self.axis = 0
        self.keepdim = False


# test keepdim = True
T
Tao Luo 已提交
159
class TestMaxMinAmaxAminAPI4(TestMaxMinAmaxAminAPI):
160

161 162 163 164 165 166 167 168 169
    def init_case(self):
        self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]])
        self.shape = [2, 4]
        self.dtype = 'float64'
        self.axis = 1
        self.keepdim = True


# test axis is tuple
T
Tao Luo 已提交
170
class TestMaxMinAmaxAminAPI5(TestMaxMinAmaxAminAPI):
171

172
    def init_case(self):
173 174
        self.x_np = np.array([[[1, 2], [3, 4]], [[5, 6], [7,
                                                          8]]]).astype(np.int32)
175 176 177 178
        self.shape = [2, 2, 2]
        self.dtype = 'int32'
        self.axis = (0, 1)
        self.keepdim = False
T
Tao Luo 已提交
179 180 181 182


# test multiple minimum or maximum elements
class TestMaxMinAmaxAminAPI6(TestMaxMinAmaxAminAPI):
183

T
Tao Luo 已提交
184 185 186 187 188 189
    def init_case(self):
        self.x_np = np.array([[0.2, 0.9, 0.9, 0.9], [0.9, 0.9, 0.2, 0.2]])
        self.shape = [2, 4]
        self.dtype = 'float64'
        self.axis = None
        self.keepdim = False
190 191 192 193


if __name__ == '__main__':
    unittest.main()