test_adaptive_max_pool3d.py 11.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np

import paddle.fluid.core as core
19
from op_test import check_out_dtype
20 21
import paddle
import paddle.fluid as fluid
22
import paddle.nn.functional as F
23 24 25 26 27 28 29 30 31 32


def adaptive_start_index(index, input_size, output_size):
    return int(np.floor(index * input_size / output_size))


def adaptive_end_index(index, input_size, output_size):
    return int(np.ceil((index + 1) * input_size / output_size))


33 34 35
def adaptive_pool3d_forward(
    x, output_size, adaptive=True, data_format='NCDHW', pool_type='max'
):
36 37

    N = x.shape[0]
38 39 40 41 42
    C, D, H, W = (
        [x.shape[1], x.shape[2], x.shape[3], x.shape[4]]
        if data_format == 'NCDHW'
        else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]]
    )
43

44
    if isinstance(output_size, int) or output_size is None:
45 46 47 48 49 50 51
        H_out = output_size
        W_out = output_size
        D_out = output_size
        output_size = [D_out, H_out, W_out]
    else:
        D_out, H_out, W_out = output_size

52
    if output_size[0] is None:
53 54
        output_size[0] = D
        D_out = D
55
    if output_size[1] is None:
56 57
        output_size[1] = H
        H_out = H
58
    if output_size[2] is None:
59 60 61
        output_size[2] = W
        W_out = W

62 63 64
    out = (
        np.zeros((N, C, D_out, H_out, W_out))
        if data_format == 'NCDHW'
65
        else np.zeros((N, D_out, H_out, W_out, C))
66
    )
67 68 69 70 71 72 73 74 75 76 77 78 79
    for k in range(D_out):
        d_start = adaptive_start_index(k, D, output_size[0])
        d_end = adaptive_end_index(k, D, output_size[0])

        for i in range(H_out):
            h_start = adaptive_start_index(i, H, output_size[1])
            h_end = adaptive_end_index(i, H, output_size[1])

            for j in range(W_out):
                w_start = adaptive_start_index(j, W, output_size[2])
                w_end = adaptive_end_index(j, W, output_size[2])

                if data_format == 'NCDHW':
80 81 82
                    x_masked = x[
                        :, :, d_start:d_end, h_start:h_end, w_start:w_end
                    ]
83
                    if pool_type == 'avg':
84 85 86 87 88 89 90 91
                        field_size = (
                            (d_end - d_start)
                            * (h_end - h_start)
                            * (w_end - w_start)
                        )
                        out[:, :, k, i, j] = (
                            np.sum(x_masked, axis=(2, 3, 4)) / field_size
                        )
92 93 94 95
                    elif pool_type == 'max':
                        out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4))

                elif data_format == 'NDHWC':
96 97 98
                    x_masked = x[
                        :, d_start:d_end, h_start:h_end, w_start:w_end, :
                    ]
99
                    if pool_type == 'avg':
100 101 102 103 104 105 106 107
                        field_size = (
                            (d_end - d_start)
                            * (h_end - h_start)
                            * (w_end - w_start)
                        )
                        out[:, k, i, j, :] = (
                            np.sum(x_masked, axis=(1, 2, 3)) / field_size
                        )
108 109 110 111 112
                    elif pool_type == 'max':
                        out[:, k, i, j, :] = np.max(x_masked, axis=(1, 2, 3))
    return out


C
cnn 已提交
113
class TestAdaptiveMaxPool3DAPI(unittest.TestCase):
114 115
    def setUp(self):
        self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
116 117 118
        self.res_1_np = adaptive_pool3d_forward(
            x=self.x_np, output_size=[3, 3, 3], pool_type="max"
        )
119

120 121 122
        self.res_2_np = adaptive_pool3d_forward(
            x=self.x_np, output_size=5, pool_type="max"
        )
123

124 125 126
        self.res_3_np = adaptive_pool3d_forward(
            x=self.x_np, output_size=[2, 3, 5], pool_type="max"
        )
127

128 129 130 131 132 133
        self.res_4_np = adaptive_pool3d_forward(
            x=self.x_np,
            output_size=[3, 3, 3],
            pool_type="max",
            data_format="NDHWC",
        )
134

135 136 137
        self.res_5_np = adaptive_pool3d_forward(
            x=self.x_np, output_size=[None, 3, None], pool_type="max"
        )
138 139

    def test_static_graph(self):
140 141 142
        for use_cuda in (
            [False, True] if core.is_compiled_with_cuda() else [False]
        ):
143 144
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.enable_static()
145 146 147
            x = paddle.fluid.data(
                name="x", shape=[2, 3, 5, 7, 7], dtype="float32"
            )
148 149

            out_1 = paddle.nn.functional.adaptive_max_pool3d(
150 151
                x=x, output_size=[3, 3, 3]
            )
152 153 154 155

            out_2 = paddle.nn.functional.adaptive_max_pool3d(x=x, output_size=5)

            out_3 = paddle.nn.functional.adaptive_max_pool3d(
156 157
                x=x, output_size=[2, 3, 5]
            )
158

159
            # out_4 = paddle.nn.functional.adaptive_max_pool3d(
160 161 162
            #    x=x, output_size=[3, 3, 3], data_format="NDHWC")

            out_5 = paddle.nn.functional.adaptive_max_pool3d(
163 164
                x=x, output_size=[None, 3, None]
            )
165 166

            exe = paddle.static.Executor(place=place)
167 168 169 170 171
            [res_1, res_2, res_3, res_5] = exe.run(
                fluid.default_main_program(),
                feed={"x": self.x_np},
                fetch_list=[out_1, out_2, out_3, out_5],
            )
172 173 174 175 176 177 178

            assert np.allclose(res_1, self.res_1_np)

            assert np.allclose(res_2, self.res_2_np)

            assert np.allclose(res_3, self.res_3_np)

179
            # assert np.allclose(res_4, self.res_4_np)
180 181 182

            assert np.allclose(res_5, self.res_5_np)

183
    def func_dynamic_graph(self):
184 185 186
        for use_cuda in (
            [False, True] if core.is_compiled_with_cuda() else [False]
        ):
187 188
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.disable_static(place=place)
Z
Zhou Wei 已提交
189
            x = paddle.to_tensor(self.x_np)
190 191

            out_1 = paddle.nn.functional.adaptive_max_pool3d(
192 193
                x=x, output_size=[3, 3, 3]
            )
194 195 196 197

            out_2 = paddle.nn.functional.adaptive_max_pool3d(x=x, output_size=5)

            out_3 = paddle.nn.functional.adaptive_max_pool3d(
198 199
                x=x, output_size=[2, 3, 5]
            )
200

201
            # out_4 = paddle.nn.functional.adaptive_max_pool3d(
202 203 204
            #    x=x, output_size=[3, 3, 3], data_format="NDHWC")

            out_5 = paddle.nn.functional.adaptive_max_pool3d(
205 206
                x=x, output_size=[None, 3, None]
            )
207 208 209 210 211 212 213

            assert np.allclose(out_1.numpy(), self.res_1_np)

            assert np.allclose(out_2.numpy(), self.res_2_np)

            assert np.allclose(out_3.numpy(), self.res_3_np)

214
            # assert np.allclose(out_4.numpy(), self.res_4_np)
215 216 217

            assert np.allclose(out_5.numpy(), self.res_5_np)

218 219 220 221 222
    def test_dynamic_graph(self):
        with paddle.fluid.framework._test_eager_guard():
            self.func_dynamic_graph()
        self.func_dynamic_graph()

223

C
cnn 已提交
224
class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase):
225 226
    def setUp(self):
        self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
227 228 229
        self.res_1_np = adaptive_pool3d_forward(
            x=self.x_np, output_size=[3, 3, 3], pool_type="max"
        )
230

231 232 233
        self.res_2_np = adaptive_pool3d_forward(
            x=self.x_np, output_size=5, pool_type="max"
        )
234

235 236 237
        self.res_3_np = adaptive_pool3d_forward(
            x=self.x_np, output_size=[2, 3, 5], pool_type="max"
        )
238 239 240 241 242 243 244

        # self.res_4_np = adaptive_pool3d_forward(
        #     x=self.x_np,
        #     output_size=[3, 3, 3],
        #     pool_type="max",
        #     data_format="NDHWC")

245 246 247
        self.res_5_np = adaptive_pool3d_forward(
            x=self.x_np, output_size=[None, 3, None], pool_type="max"
        )
248 249

    def test_static_graph(self):
250 251 252
        for use_cuda in (
            [False, True] if core.is_compiled_with_cuda() else [False]
        ):
253 254
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.enable_static()
255 256 257
            x = paddle.fluid.data(
                name="x", shape=[2, 3, 5, 7, 7], dtype="float32"
            )
258

C
cnn 已提交
259
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
260 261
                output_size=[3, 3, 3]
            )
262 263
            out_1 = adaptive_max_pool(x=x)

C
cnn 已提交
264
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(output_size=5)
265 266
            out_2 = adaptive_max_pool(x=x)

C
cnn 已提交
267
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
268 269
                output_size=[2, 3, 5]
            )
270 271
            out_3 = adaptive_max_pool(x=x)

C
cnn 已提交
272
            #     adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
273 274 275
            #         output_size=[3, 3, 3], data_format="NDHWC")
            #     out_4 = adaptive_max_pool(x=x)

C
cnn 已提交
276
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
277 278
                output_size=[None, 3, None]
            )
279 280 281
            out_5 = adaptive_max_pool(x=x)

            exe = paddle.static.Executor(place=place)
282 283 284 285 286
            [res_1, res_2, res_3, res_5] = exe.run(
                fluid.default_main_program(),
                feed={"x": self.x_np},
                fetch_list=[out_1, out_2, out_3, out_5],
            )
287 288 289 290 291 292 293 294 295 296 297 298

            assert np.allclose(res_1, self.res_1_np)

            assert np.allclose(res_2, self.res_2_np)

            assert np.allclose(res_3, self.res_3_np)

            #     assert np.allclose(res_4, self.res_4_np)

            assert np.allclose(res_5, self.res_5_np)

    def test_dynamic_graph(self):
299 300 301
        for use_cuda in (
            [False, True] if core.is_compiled_with_cuda() else [False]
        ):
302 303
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.disable_static(place=place)
Z
Zhou Wei 已提交
304
            x = paddle.to_tensor(self.x_np)
305

C
cnn 已提交
306
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
307 308
                output_size=[3, 3, 3]
            )
309 310
            out_1 = adaptive_max_pool(x=x)

C
cnn 已提交
311
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(output_size=5)
312 313
            out_2 = adaptive_max_pool(x=x)

C
cnn 已提交
314
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
315 316
                output_size=[2, 3, 5]
            )
317 318
            out_3 = adaptive_max_pool(x=x)

C
cnn 已提交
319
            #     adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
320 321 322
            #         output_size=[3, 3, 3], data_format="NDHWC")
            #     out_4 = adaptive_max_pool(x=x)

C
cnn 已提交
323
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
324 325
                output_size=[None, 3, None]
            )
326 327 328 329 330 331 332 333 334 335 336 337 338
            out_5 = adaptive_max_pool(x=x)

            assert np.allclose(out_1.numpy(), self.res_1_np)

            assert np.allclose(out_2.numpy(), self.res_2_np)

            assert np.allclose(out_3.numpy(), self.res_3_np)

            #     assert np.allclose(out_4.numpy(), self.res_4_np)

            assert np.allclose(out_5.numpy(), self.res_5_np)


339 340 341 342
class TestOutDtype(unittest.TestCase):
    def test_max_pool(self):
        api_fn = F.adaptive_max_pool3d
        shape = [1, 3, 32, 32, 32]
343 344 345 346 347 348
        check_out_dtype(
            api_fn,
            in_specs=[(shape,)],
            expect_dtypes=['float32', 'float64'],
            output_size=16,
        )
349 350


351 352
if __name__ == '__main__':
    unittest.main()