test_adaptive_avg_pool2d.py 11.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
from __future__ import division

import unittest
import numpy as np

import paddle.fluid.core as core
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard


def adaptive_start_index(index, input_size, output_size):
    return int(np.floor(index * input_size / output_size))


def adaptive_end_index(index, input_size, output_size):
    return int(np.ceil((index + 1) * input_size / output_size))


36 37 38
def adaptive_pool2d_forward(x,
                            output_size,
                            data_format='NCHW',
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
                            pool_type="avg"):

    N = x.shape[0]
    C, H, W = [x.shape[1], x.shape[2], x.shape[3]] if data_format == 'NCHW' \
        else [x.shape[3], x.shape[1], x.shape[2]]

    if (isinstance(output_size, int) or output_size == None):
        H_out = output_size
        W_out = output_size
        output_size = [H_out, W_out]
    else:
        H_out, W_out = output_size

    if output_size[0] == None:
        output_size[0] = H
        H_out = H
    if output_size[1] == None:
        output_size[1] = W
        W_out = W

    out = np.zeros((N, C, H_out, W_out)) if data_format=='NCHW' \
        else np.zeros((N, H_out, W_out, C))

    for i in range(H_out):
        in_h_start = adaptive_start_index(i, H, output_size[0])
        in_h_end = adaptive_end_index(i, H, output_size[0])

        for j in range(W_out):
            in_w_start = adaptive_start_index(j, W, output_size[1])
            in_w_end = adaptive_end_index(j, W, output_size[1])

            if data_format == 'NCHW':
                x_masked = x[:, :, in_h_start:in_h_end, in_w_start:in_w_end]
                if pool_type == 'avg':
73 74
                    field_size = ((in_h_end - in_h_start) *
                                  (in_w_end - in_w_start))
75 76 77 78 79 80
                    out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size
                elif pool_type == 'max':
                    out[:, :, i, j] = np.max(x_masked, axis=(2, 3))
            elif data_format == 'NHWC':
                x_masked = x[:, in_h_start:in_h_end, in_w_start:in_w_end, :]
                if pool_type == 'avg':
81 82
                    field_size = ((in_h_end - in_h_start) *
                                  (in_w_end - in_w_start))
83 84 85 86 87 88
                    out[:, i, j, :] = np.sum(x_masked, axis=(1, 2)) / field_size
                elif pool_type == 'max':
                    out[:, i, j, :] = np.max(x_masked, axis=(1, 2))
    return out


C
cnn 已提交
89
class TestAdaptiveAvgPool2DAPI(unittest.TestCase):
90

91 92
    def setUp(self):
        self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
93 94 95
        self.res_1_np = adaptive_pool2d_forward(x=self.x_np,
                                                output_size=[3, 3],
                                                pool_type="avg")
96

97 98 99
        self.res_2_np = adaptive_pool2d_forward(x=self.x_np,
                                                output_size=5,
                                                pool_type="avg")
100

101 102 103
        self.res_3_np = adaptive_pool2d_forward(x=self.x_np,
                                                output_size=[2, 5],
                                                pool_type="avg")
104

105 106 107 108
        self.res_4_np = adaptive_pool2d_forward(x=self.x_np,
                                                output_size=[3, 3],
                                                pool_type="avg",
                                                data_format="NHWC")
109

110 111 112
        self.res_5_np = adaptive_pool2d_forward(x=self.x_np,
                                                output_size=[None, 3],
                                                pool_type="avg")
113 114 115 116 117 118

    def test_static_graph(self):
        for use_cuda in ([False, True]
                         if core.is_compiled_with_cuda() else [False]):
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.enable_static()
119
            x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
120

121 122
            out_1 = paddle.nn.functional.adaptive_avg_pool2d(x=x,
                                                             output_size=[3, 3])
123 124 125

            out_2 = paddle.nn.functional.adaptive_avg_pool2d(x=x, output_size=5)

126 127
            out_3 = paddle.nn.functional.adaptive_avg_pool2d(x=x,
                                                             output_size=[2, 5])
128

129 130 131
            out_4 = paddle.nn.functional.adaptive_avg_pool2d(x=x,
                                                             output_size=[3, 3],
                                                             data_format="NHWC")
132 133 134 135 136

            out_5 = paddle.nn.functional.adaptive_avg_pool2d(
                x=x, output_size=[None, 3])

            exe = paddle.static.Executor(place=place)
137 138 139 140
            [res_1, res_2, res_3, res_4,
             res_5] = exe.run(fluid.default_main_program(),
                              feed={"x": self.x_np},
                              fetch_list=[out_1, out_2, out_3, out_4, out_5])
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

            assert np.allclose(res_1, self.res_1_np)

            assert np.allclose(res_2, self.res_2_np)

            assert np.allclose(res_3, self.res_3_np)

            assert np.allclose(res_4, self.res_4_np)

            assert np.allclose(res_5, self.res_5_np)

    def test_dynamic_graph(self):
        for use_cuda in ([False, True]
                         if core.is_compiled_with_cuda() else [False]):
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.disable_static(place=place)
Z
Zhou Wei 已提交
157
            x = paddle.to_tensor(self.x_np)
158

159 160
            out_1 = paddle.nn.functional.adaptive_avg_pool2d(x=x,
                                                             output_size=[3, 3])
161 162 163

            out_2 = paddle.nn.functional.adaptive_avg_pool2d(x=x, output_size=5)

164 165
            out_3 = paddle.nn.functional.adaptive_avg_pool2d(x=x,
                                                             output_size=[2, 5])
166

167 168 169
            out_4 = paddle.nn.functional.adaptive_avg_pool2d(x=x,
                                                             output_size=[3, 3],
                                                             data_format="NHWC")
170 171 172 173

            out_5 = paddle.nn.functional.adaptive_avg_pool2d(
                x=x, output_size=[None, 3])

174 175 176
            out_6 = paddle.nn.functional.interpolate(x=x,
                                                     mode="area",
                                                     size=[2, 5])
177

178 179 180 181 182 183 184 185 186 187
            assert np.allclose(out_1.numpy(), self.res_1_np)

            assert np.allclose(out_2.numpy(), self.res_2_np)

            assert np.allclose(out_3.numpy(), self.res_3_np)

            assert np.allclose(out_4.numpy(), self.res_4_np)

            assert np.allclose(out_5.numpy(), self.res_5_np)

188 189
            assert np.allclose(out_6.numpy(), self.res_3_np)

190

C
cnn 已提交
191
class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase):
192

193 194
    def setUp(self):
        self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
195 196 197
        self.res_1_np = adaptive_pool2d_forward(x=self.x_np,
                                                output_size=[3, 3],
                                                pool_type="avg")
198

199 200 201
        self.res_2_np = adaptive_pool2d_forward(x=self.x_np,
                                                output_size=5,
                                                pool_type="avg")
202

203 204 205
        self.res_3_np = adaptive_pool2d_forward(x=self.x_np,
                                                output_size=[2, 5],
                                                pool_type="avg")
206

207 208 209 210
        self.res_4_np = adaptive_pool2d_forward(x=self.x_np,
                                                output_size=[3, 3],
                                                pool_type="avg",
                                                data_format="NHWC")
211

212 213 214
        self.res_5_np = adaptive_pool2d_forward(x=self.x_np,
                                                output_size=[None, 3],
                                                pool_type="avg")
215 216 217 218 219 220

    def test_static_graph(self):
        for use_cuda in ([False, True]
                         if core.is_compiled_with_cuda() else [False]):
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.enable_static()
221
            x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
222

C
cnn 已提交
223
            adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3])
224 225
            out_1 = adaptive_avg_pool(x=x)

C
cnn 已提交
226
            adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=5)
227 228
            out_2 = adaptive_avg_pool(x=x)

C
cnn 已提交
229
            adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[2, 5])
230 231
            out_3 = adaptive_avg_pool(x=x)

232 233
            adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3],
                                                            data_format="NHWC")
234 235
            out_4 = adaptive_avg_pool(x=x)

C
cnn 已提交
236
            adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(
237 238 239 240
                output_size=[None, 3])
            out_5 = adaptive_avg_pool(x=x)

            exe = paddle.static.Executor(place=place)
241 242 243 244
            [res_1, res_2, res_3, res_4,
             res_5] = exe.run(fluid.default_main_program(),
                              feed={"x": self.x_np},
                              fetch_list=[out_1, out_2, out_3, out_4, out_5])
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260

            assert np.allclose(res_1, self.res_1_np)

            assert np.allclose(res_2, self.res_2_np)

            assert np.allclose(res_3, self.res_3_np)

            assert np.allclose(res_4, self.res_4_np)

            assert np.allclose(res_5, self.res_5_np)

    def test_dynamic_graph(self):
        for use_cuda in ([False, True]
                         if core.is_compiled_with_cuda() else [False]):
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.disable_static(place=place)
Z
Zhou Wei 已提交
261
            x = paddle.to_tensor(self.x_np)
262

C
cnn 已提交
263
            adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3])
264 265
            out_1 = adaptive_avg_pool(x=x)

C
cnn 已提交
266
            adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=5)
267 268
            out_2 = adaptive_avg_pool(x=x)

C
cnn 已提交
269
            adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[2, 5])
270 271
            out_3 = adaptive_avg_pool(x=x)

272 273
            adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3],
                                                            data_format="NHWC")
274 275
            out_4 = adaptive_avg_pool(x=x)

C
cnn 已提交
276
            adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
                output_size=[None, 3])
            out_5 = adaptive_avg_pool(x=x)

            assert np.allclose(out_1.numpy(), self.res_1_np)

            assert np.allclose(out_2.numpy(), self.res_2_np)

            assert np.allclose(out_3.numpy(), self.res_3_np)

            assert np.allclose(out_4.numpy(), self.res_4_np)

            assert np.allclose(out_5.numpy(), self.res_5_np)


if __name__ == '__main__':
    unittest.main()