test_adaptive_max_pool3d.py 12.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np

import paddle.fluid.core as core
19
from op_test import OpTest, check_out_dtype
20 21 22
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
23
import paddle.nn.functional as F
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76


def adaptive_start_index(index, input_size, output_size):
    return int(np.floor(index * input_size / output_size))


def adaptive_end_index(index, input_size, output_size):
    return int(np.ceil((index + 1) * input_size / output_size))


def adaptive_pool3d_forward(x,
                            output_size,
                            adaptive=True,
                            data_format='NCDHW',
                            pool_type='max'):

    N = x.shape[0]
    C, D, H, W = [x.shape[1], x.shape[2], x.shape[3], x.shape[4]] \
        if data_format == 'NCDHW' else [x.shape[4], x.shape[1], x.shape[2],x.shape[3]]

    if (isinstance(output_size, int) or output_size == None):
        H_out = output_size
        W_out = output_size
        D_out = output_size
        output_size = [D_out, H_out, W_out]
    else:
        D_out, H_out, W_out = output_size

    if output_size[0] == None:
        output_size[0] = D
        D_out = D
    if output_size[1] == None:
        output_size[1] = H
        H_out = H
    if output_size[2] == None:
        output_size[2] = W
        W_out = W

    out = np.zeros((N, C, D_out, H_out, W_out)) if data_format=='NCDHW' \
        else np.zeros((N, D_out, H_out, W_out, C))
    for k in range(D_out):
        d_start = adaptive_start_index(k, D, output_size[0])
        d_end = adaptive_end_index(k, D, output_size[0])

        for i in range(H_out):
            h_start = adaptive_start_index(i, H, output_size[1])
            h_end = adaptive_end_index(i, H, output_size[1])

            for j in range(W_out):
                w_start = adaptive_start_index(j, W, output_size[2])
                w_end = adaptive_end_index(j, W, output_size[2])

                if data_format == 'NCDHW':
77 78
                    x_masked = x[:, :, d_start:d_end, h_start:h_end,
                                 w_start:w_end]
79 80 81
                    if pool_type == 'avg':
                        field_size = (d_end - d_start) * (h_end - h_start) * (
                            w_end - w_start)
82 83
                        out[:, :, k, i,
                            j] = np.sum(x_masked, axis=(2, 3, 4)) / field_size
84 85 86 87
                    elif pool_type == 'max':
                        out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4))

                elif data_format == 'NDHWC':
88 89
                    x_masked = x[:, d_start:d_end, h_start:h_end,
                                 w_start:w_end, :]
90 91 92 93 94 95 96 97 98 99
                    if pool_type == 'avg':
                        field_size = (d_end - d_start) * (h_end - h_start) * (
                            w_end - w_start)
                        out[:, k, i, j, :] = np.sum(x_masked,
                                                    axis=(1, 2, 3)) / field_size
                    elif pool_type == 'max':
                        out[:, k, i, j, :] = np.max(x_masked, axis=(1, 2, 3))
    return out


C
cnn 已提交
100
class TestAdaptiveMaxPool3DAPI(unittest.TestCase):
101

102 103
    def setUp(self):
        self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
104 105 106
        self.res_1_np = adaptive_pool3d_forward(x=self.x_np,
                                                output_size=[3, 3, 3],
                                                pool_type="max")
107

108 109 110
        self.res_2_np = adaptive_pool3d_forward(x=self.x_np,
                                                output_size=5,
                                                pool_type="max")
111

112 113 114
        self.res_3_np = adaptive_pool3d_forward(x=self.x_np,
                                                output_size=[2, 3, 5],
                                                pool_type="max")
115

116 117 118 119
        self.res_4_np = adaptive_pool3d_forward(x=self.x_np,
                                                output_size=[3, 3, 3],
                                                pool_type="max",
                                                data_format="NDHWC")
120

121 122 123
        self.res_5_np = adaptive_pool3d_forward(x=self.x_np,
                                                output_size=[None, 3, None],
                                                pool_type="max")
124 125 126 127 128 129

    def test_static_graph(self):
        for use_cuda in ([False, True]
                         if core.is_compiled_with_cuda() else [False]):
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.enable_static()
130 131 132
            x = paddle.fluid.data(name="x",
                                  shape=[2, 3, 5, 7, 7],
                                  dtype="float32")
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148

            out_1 = paddle.nn.functional.adaptive_max_pool3d(
                x=x, output_size=[3, 3, 3])

            out_2 = paddle.nn.functional.adaptive_max_pool3d(x=x, output_size=5)

            out_3 = paddle.nn.functional.adaptive_max_pool3d(
                x=x, output_size=[2, 3, 5])

            #out_4 = paddle.nn.functional.adaptive_max_pool3d(
            #    x=x, output_size=[3, 3, 3], data_format="NDHWC")

            out_5 = paddle.nn.functional.adaptive_max_pool3d(
                x=x, output_size=[None, 3, None])

            exe = paddle.static.Executor(place=place)
149 150 151 152
            [res_1, res_2, res_3,
             res_5] = exe.run(fluid.default_main_program(),
                              feed={"x": self.x_np},
                              fetch_list=[out_1, out_2, out_3, out_5])
153 154 155 156 157 158 159 160 161 162 163

            assert np.allclose(res_1, self.res_1_np)

            assert np.allclose(res_2, self.res_2_np)

            assert np.allclose(res_3, self.res_3_np)

            #assert np.allclose(res_4, self.res_4_np)

            assert np.allclose(res_5, self.res_5_np)

164
    def func_dynamic_graph(self):
165 166 167 168
        for use_cuda in ([False, True]
                         if core.is_compiled_with_cuda() else [False]):
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.disable_static(place=place)
Z
Zhou Wei 已提交
169
            x = paddle.to_tensor(self.x_np)
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194

            out_1 = paddle.nn.functional.adaptive_max_pool3d(
                x=x, output_size=[3, 3, 3])

            out_2 = paddle.nn.functional.adaptive_max_pool3d(x=x, output_size=5)

            out_3 = paddle.nn.functional.adaptive_max_pool3d(
                x=x, output_size=[2, 3, 5])

            #out_4 = paddle.nn.functional.adaptive_max_pool3d(
            #    x=x, output_size=[3, 3, 3], data_format="NDHWC")

            out_5 = paddle.nn.functional.adaptive_max_pool3d(
                x=x, output_size=[None, 3, None])

            assert np.allclose(out_1.numpy(), self.res_1_np)

            assert np.allclose(out_2.numpy(), self.res_2_np)

            assert np.allclose(out_3.numpy(), self.res_3_np)

            #assert np.allclose(out_4.numpy(), self.res_4_np)

            assert np.allclose(out_5.numpy(), self.res_5_np)

195 196 197 198 199
    def test_dynamic_graph(self):
        with paddle.fluid.framework._test_eager_guard():
            self.func_dynamic_graph()
        self.func_dynamic_graph()

200

C
cnn 已提交
201
class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase):
202

203 204
    def setUp(self):
        self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
205 206 207
        self.res_1_np = adaptive_pool3d_forward(x=self.x_np,
                                                output_size=[3, 3, 3],
                                                pool_type="max")
208

209 210 211
        self.res_2_np = adaptive_pool3d_forward(x=self.x_np,
                                                output_size=5,
                                                pool_type="max")
212

213 214 215
        self.res_3_np = adaptive_pool3d_forward(x=self.x_np,
                                                output_size=[2, 3, 5],
                                                pool_type="max")
216 217 218 219 220 221 222

        # self.res_4_np = adaptive_pool3d_forward(
        #     x=self.x_np,
        #     output_size=[3, 3, 3],
        #     pool_type="max",
        #     data_format="NDHWC")

223 224 225
        self.res_5_np = adaptive_pool3d_forward(x=self.x_np,
                                                output_size=[None, 3, None],
                                                pool_type="max")
226 227 228 229 230 231

    def test_static_graph(self):
        for use_cuda in ([False, True]
                         if core.is_compiled_with_cuda() else [False]):
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.enable_static()
232 233 234
            x = paddle.fluid.data(name="x",
                                  shape=[2, 3, 5, 7, 7],
                                  dtype="float32")
235

C
cnn 已提交
236
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
237 238 239
                output_size=[3, 3, 3])
            out_1 = adaptive_max_pool(x=x)

C
cnn 已提交
240
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(output_size=5)
241 242
            out_2 = adaptive_max_pool(x=x)

C
cnn 已提交
243
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
244 245 246
                output_size=[2, 3, 5])
            out_3 = adaptive_max_pool(x=x)

C
cnn 已提交
247
            #     adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
248 249 250
            #         output_size=[3, 3, 3], data_format="NDHWC")
            #     out_4 = adaptive_max_pool(x=x)

C
cnn 已提交
251
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
252 253 254 255
                output_size=[None, 3, None])
            out_5 = adaptive_max_pool(x=x)

            exe = paddle.static.Executor(place=place)
256 257 258 259
            [res_1, res_2, res_3,
             res_5] = exe.run(fluid.default_main_program(),
                              feed={"x": self.x_np},
                              fetch_list=[out_1, out_2, out_3, out_5])
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

            assert np.allclose(res_1, self.res_1_np)

            assert np.allclose(res_2, self.res_2_np)

            assert np.allclose(res_3, self.res_3_np)

            #     assert np.allclose(res_4, self.res_4_np)

            assert np.allclose(res_5, self.res_5_np)

    def test_dynamic_graph(self):
        for use_cuda in ([False, True]
                         if core.is_compiled_with_cuda() else [False]):
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            paddle.disable_static(place=place)
Z
Zhou Wei 已提交
276
            x = paddle.to_tensor(self.x_np)
277

C
cnn 已提交
278
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
279 280 281
                output_size=[3, 3, 3])
            out_1 = adaptive_max_pool(x=x)

C
cnn 已提交
282
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(output_size=5)
283 284
            out_2 = adaptive_max_pool(x=x)

C
cnn 已提交
285
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
286 287 288
                output_size=[2, 3, 5])
            out_3 = adaptive_max_pool(x=x)

C
cnn 已提交
289
            #     adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
290 291 292
            #         output_size=[3, 3, 3], data_format="NDHWC")
            #     out_4 = adaptive_max_pool(x=x)

C
cnn 已提交
293
            adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
294 295 296 297 298 299 300 301 302 303 304 305 306 307
                output_size=[None, 3, None])
            out_5 = adaptive_max_pool(x=x)

            assert np.allclose(out_1.numpy(), self.res_1_np)

            assert np.allclose(out_2.numpy(), self.res_2_np)

            assert np.allclose(out_3.numpy(), self.res_3_np)

            #     assert np.allclose(out_4.numpy(), self.res_4_np)

            assert np.allclose(out_5.numpy(), self.res_5_np)


308
class TestOutDtype(unittest.TestCase):
309

310 311 312
    def test_max_pool(self):
        api_fn = F.adaptive_max_pool3d
        shape = [1, 3, 32, 32, 32]
313 314 315 316
        check_out_dtype(api_fn,
                        in_specs=[(shape, )],
                        expect_dtypes=['float32', 'float64'],
                        output_size=16)
317 318


319 320
if __name__ == '__main__':
    unittest.main()