test_pool3d_op.py 29.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

C
chengduoZH 已提交
15
import unittest
16

C
chengduoZH 已提交
17
import numpy as np
18
from op_test import OpTest
19

20
import paddle
21
import paddle.fluid.core as core
C
chengduoZH 已提交
22 23


24 25 26 27 28 29 30 31
def adaptive_start_index(index, input_size, output_size):
    return int(np.floor(index * input_size / output_size))


def adaptive_end_index(index, input_size, output_size):
    return int(np.ceil((index + 1) * input_size / output_size))


32 33 34 35 36 37 38 39 40 41 42 43 44
def pool3D_forward_naive(
    x,
    ksize,
    strides,
    paddings,
    global_pool=0,
    ceil_mode=False,
    exclusive=True,
    adaptive=False,
    data_format='NCDHW',
    pool_type='max',
    padding_algorithm="EXPLICIT",
):
45 46 47
    # update paddings
    def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
        padding = []
48 49 50
        for input_size, filter_size, stride_size in zip(
            input_shape, pool_size, pool_stride
        ):
51
            out_size = int((input_size + stride_size - 1) / stride_size)
52
            pad_sum = np.max(
53 54
                ((out_size - 1) * stride_size + filter_size - input_size, 0)
            )
55 56 57 58 59 60 61 62 63
            pad_0 = int(pad_sum / 2)
            pad_1 = int(pad_sum - pad_0)
            padding.append(pad_0)
            padding.append(pad_1)
        return padding

    if isinstance(padding_algorithm, str):
        padding_algorithm = padding_algorithm.upper()
        if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
64 65 66 67
            raise ValueError(
                "Unknown Attr(padding_algorithm): '%s'. "
                "It can only be 'SAME' or 'VALID'." % str(padding_algorithm)
            )
68 69 70

        if padding_algorithm == "VALID":
            paddings = [0, 0, 0, 0, 0, 0]
71
            if ceil_mode is not False:
72 73 74
                raise ValueError(
                    "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)"
                    " must be False. "
75 76
                    "Received ceil_mode: True."
                )
77 78 79 80 81 82 83 84 85 86 87 88
        elif padding_algorithm == "SAME":
            input_data_shape = []
            if data_format == "NCDHW":
                input_data_shape = x.shape[2:5]
            elif data_format == "NDHWC":
                input_data_shape = x.shape[1:4]
            paddings = _get_padding_with_SAME(input_data_shape, ksize, strides)

    assert len(paddings) == 3 or len(paddings) == 6
    is_sys = True if len(paddings) == 3 else False

    N = x.shape[0]
89 90 91 92 93
    C, D, H, W = (
        [x.shape[1], x.shape[2], x.shape[3], x.shape[4]]
        if data_format == 'NCDHW'
        else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]]
    )
94

C
chengduoZH 已提交
95 96
    if global_pool == 1:
        ksize = [D, H, W]
97 98 99 100 101 102 103 104 105
        paddings = [0 for _ in range(len(paddings))]

    pad_d_forth = paddings[0] if is_sys else paddings[0]
    pad_d_back = paddings[0] if is_sys else paddings[1]
    pad_h_up = paddings[1] if is_sys else paddings[2]
    pad_h_down = paddings[1] if is_sys else paddings[3]
    pad_w_left = paddings[2] if is_sys else paddings[4]
    pad_w_right = paddings[2] if is_sys else paddings[5]

106 107 108
    if adaptive:
        D_out, H_out, W_out = ksize
    else:
109

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
        D_out = (
            (D - ksize[0] + pad_d_forth + pad_d_back + strides[0] - 1)
            // strides[0]
            + 1
            if ceil_mode
            else (D - ksize[0] + pad_d_forth + pad_d_back) // strides[0] + 1
        )

        H_out = (
            (H - ksize[1] + pad_h_up + pad_h_down + strides[1] - 1)
            // strides[1]
            + 1
            if ceil_mode
            else (H - ksize[1] + pad_h_up + pad_h_down) // strides[1] + 1
        )

        W_out = (
            (W - ksize[2] + pad_w_left + pad_w_right + strides[2] - 1)
            // strides[2]
            + 1
            if ceil_mode
            else (W - ksize[2] + pad_w_left + pad_w_right) // strides[2] + 1
        )

    out = (
        np.zeros((N, C, D_out, H_out, W_out))
        if data_format == 'NCDHW'
137
        else np.zeros((N, D_out, H_out, W_out, C))
138
    )
139
    for k in range(D_out):
140 141 142
        if adaptive:
            d_start = adaptive_start_index(k, D, ksize[0])
            d_end = adaptive_end_index(k, D, ksize[0])
143

144
        for i in range(H_out):
145 146 147
            if adaptive:
                h_start = adaptive_start_index(i, H, ksize[1])
                h_end = adaptive_end_index(i, H, ksize[1])
148

149
            for j in range(W_out):
150 151 152 153
                if adaptive:
                    w_start = adaptive_start_index(j, W, ksize[2])
                    w_end = adaptive_end_index(j, W, ksize[2])
                else:
154

D
Double_V 已提交
155
                    d_start = k * strides[0] - pad_d_forth
156 157 158 159 160 161
                    d_end = np.min(
                        (
                            k * strides[0] + ksize[0] - pad_d_forth,
                            D + pad_d_back,
                        )
                    )
D
Double_V 已提交
162 163
                    h_start = i * strides[1] - pad_h_up
                    h_end = np.min(
164 165
                        (i * strides[1] + ksize[1] - pad_h_up, H + pad_h_down)
                    )
D
Double_V 已提交
166
                    w_start = j * strides[2] - pad_w_left
167 168 169 170 171 172 173 174 175 176 177 178
                    w_end = np.min(
                        (
                            j * strides[2] + ksize[2] - pad_w_left,
                            W + pad_w_right,
                        )
                    )

                    field_size = (
                        (d_end - d_start)
                        * (h_end - h_start)
                        * (w_end - w_start)
                    )
D
Double_V 已提交
179 180 181 182 183 184
                    w_start = np.max((w_start, 0))
                    d_start = np.max((d_start, 0))
                    h_start = np.max((h_start, 0))
                    w_end = np.min((w_end, W))
                    d_end = np.min((d_end, D))
                    h_end = np.min((h_end, H))
185
                if data_format == 'NCDHW':
186 187 188
                    x_masked = x[
                        :, :, d_start:d_end, h_start:h_end, w_start:w_end
                    ]
189
                    if pool_type == 'avg':
190 191 192 193 194 195 196 197 198 199
                        if exclusive or adaptive:
                            field_size = (
                                (d_end - d_start)
                                * (h_end - h_start)
                                * (w_end - w_start)
                            )

                        out[:, :, k, i, j] = (
                            np.sum(x_masked, axis=(2, 3, 4)) / field_size
                        )
200 201 202 203
                    elif pool_type == 'max':
                        out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4))

                elif data_format == 'NDHWC':
204 205 206
                    x_masked = x[
                        :, d_start:d_end, h_start:h_end, w_start:w_end, :
                    ]
207
                    if pool_type == 'avg':
208 209 210 211 212 213 214 215 216 217
                        if exclusive or adaptive:
                            field_size = (
                                (d_end - d_start)
                                * (h_end - h_start)
                                * (w_end - w_start)
                            )

                        out[:, k, i, j, :] = (
                            np.sum(x_masked, axis=(1, 2, 3)) / field_size
                        )
218 219
                    elif pool_type == 'max':
                        out[:, k, i, j, :] = np.max(x_masked, axis=(1, 2, 3))
C
chengduoZH 已提交
220

221 222 223
    return out


224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
def max_pool3D_forward_naive(
    x,
    ksize,
    strides,
    paddings,
    global_pool=0,
    ceil_mode=False,
    exclusive=True,
    adaptive=False,
):
    out = pool3D_forward_naive(
        x=x,
        ksize=ksize,
        strides=strides,
        paddings=paddings,
        global_pool=global_pool,
        ceil_mode=ceil_mode,
        exclusive=exclusive,
        adaptive=adaptive,
        data_format='NCDHW',
        pool_type="max",
    )
C
chengduoZH 已提交
246 247 248
    return out


249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
def avg_pool3D_forward_naive(
    x,
    ksize,
    strides,
    paddings,
    global_pool=0,
    ceil_mode=False,
    exclusive=True,
    adaptive=False,
):
    out = pool3D_forward_naive(
        x=x,
        ksize=ksize,
        strides=strides,
        paddings=paddings,
        global_pool=global_pool,
        ceil_mode=ceil_mode,
        exclusive=exclusive,
        adaptive=adaptive,
        data_format='NCDHW',
        pool_type="avg",
    )
C
chengduoZH 已提交
271 272 273
    return out


C
cnn 已提交
274
class TestPool3D_Op(OpTest):
C
chengduoZH 已提交
275
    def setUp(self):
K
Kexin Zhao 已提交
276
        self.op_type = "pool3d"
277
        self.init_kernel_type()
R
ronnywang 已提交
278
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
C
fix bug  
chengduoZH 已提交
279
        self.init_test_case()
280 281
        self.padding_algorithm = "EXPLICIT"
        self.init_paddings()
C
chengduoZH 已提交
282
        self.init_global_pool()
K
Kexin Zhao 已提交
283
        self.init_kernel_type()
C
chengduoZH 已提交
284
        self.init_pool_type()
285
        self.init_ceil_mode()
286
        self.init_exclusive()
287
        self.init_adaptive()
288 289
        self.init_data_format()
        self.init_shape()
290
        paddle.enable_static()
C
chengduoZH 已提交
291

K
Kexin Zhao 已提交
292
        input = np.random.random(self.shape).astype(self.dtype)
293 294 295 296 297 298 299 300 301 302 303 304 305
        output = pool3D_forward_naive(
            input,
            self.ksize,
            self.strides,
            self.paddings,
            self.global_pool,
            self.ceil_mode,
            self.exclusive,
            self.adaptive,
            self.data_format,
            self.pool_type,
            self.padding_algorithm,
        ).astype(self.dtype)
306

K
Kexin Zhao 已提交
307
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)}
C
chengduoZH 已提交
308 309 310 311 312

        self.attrs = {
            'strides': self.strides,
            'paddings': self.paddings,
            'ksize': self.ksize,
C
chengduoZH 已提交
313 314
            'pooling_type': self.pool_type,
            'global_pooling': self.global_pool,
315
            'use_cudnn': self.use_cudnn,
316
            'ceil_mode': self.ceil_mode,
317
            'data_format': self.data_format,
318
            'exclusive': self.exclusive,
319 320
            'adaptive': self.adaptive,
            "padding_algorithm": self.padding_algorithm,
C
chengduoZH 已提交
321 322
        }

K
Kexin Zhao 已提交
323
        self.outputs = {'Out': output}
C
chengduoZH 已提交
324

325
    def has_cudnn(self):
326 327
        return core.is_compiled_with_cuda() and self.use_cudnn

C
chengduoZH 已提交
328
    def test_check_output(self):
329
        if self.has_cudnn():
330 331 332 333
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)
        else:
            self.check_output()
C
chengduoZH 已提交
334 335

    def test_check_grad(self):
K
Kexin Zhao 已提交
336 337
        if self.dtype == np.float16:
            return
338
        if self.has_cudnn() and self.pool_type != "max":
339
            place = core.CUDAPlace(0)
R
ronnywang 已提交
340
            if core.is_compiled_with_rocm():
341 342 343
                self.check_grad_with_place(
                    place, set(['X']), 'Out', max_relative_error=1e-2
                )
R
ronnywang 已提交
344 345
            else:
                self.check_grad_with_place(place, set(['X']), 'Out')
346
        elif self.pool_type != "max":
R
ronnywang 已提交
347 348 349 350
            if core.is_compiled_with_rocm():
                self.check_grad(set(['X']), 'Out', max_relative_error=1e-2)
            else:
                self.check_grad(set(['X']), 'Out')
C
chengduoZH 已提交
351

352 353 354 355
    def init_data_format(self):
        self.data_format = "NCDHW"

    def init_shape(self):
356
        self.shape = [1, 3, 5, 6, 5]
357 358

    def init_test_case(self):
359 360 361 362
        self.ksize = [2, 3, 1]
        self.strides = [2, 2, 3]

    def init_paddings(self):
C
chengduoZH 已提交
363
        self.paddings = [0, 0, 0]
364
        self.padding_algorithm = "EXPLICIT"
C
chengduoZH 已提交
365

K
Kexin Zhao 已提交
366
    def init_kernel_type(self):
367
        self.use_cudnn = False
C
chengduoZH 已提交
368 369 370 371 372 373 374

    def init_pool_type(self):
        self.pool_type = "avg"

    def init_global_pool(self):
        self.global_pool = True

375 376 377
    def init_ceil_mode(self):
        self.ceil_mode = False

378
    def init_exclusive(self):
379
        self.exclusive = True
380

381 382 383
    def init_adaptive(self):
        self.adaptive = False

C
chengduoZH 已提交
384

C
cnn 已提交
385
class TestCase1(TestPool3D_Op):
386
    def init_shape(self):
387
        self.shape = [1, 3, 7, 7, 7]
388 389

    def init_test_case(self):
C
chengduoZH 已提交
390 391
        self.ksize = [3, 3, 3]
        self.strides = [1, 1, 1]
392 393

    def init_paddings(self):
C
chengduoZH 已提交
394
        self.paddings = [0, 0, 0]
C
chengduoZH 已提交
395

C
chengduoZH 已提交
396
    def init_pool_type(self):
C
chengduoZH 已提交
397
        self.pool_type = "avg"
C
chengduoZH 已提交
398 399 400 401 402

    def init_global_pool(self):
        self.global_pool = False


C
cnn 已提交
403
class TestCase2(TestPool3D_Op):
404
    def init_shape(self):
405
        self.shape = [1, 3, 6, 7, 7]
406 407

    def init_test_case(self):
408 409 410 411
        self.ksize = [3, 3, 4]
        self.strides = [1, 3, 2]

    def init_paddings(self):
C
chengduoZH 已提交
412 413
        self.paddings = [1, 1, 1]

C
chengduoZH 已提交
414 415 416 417 418 419
    def init_pool_type(self):
        self.pool_type = "avg"

    def init_global_pool(self):
        self.global_pool = False

C
chengduoZH 已提交
420

C
cnn 已提交
421
class TestCase3(TestPool3D_Op):
C
chengduoZH 已提交
422
    def init_pool_type(self):
C
chengduoZH 已提交
423 424 425
        self.pool_type = "max"


C
chengduoZH 已提交
426 427
class TestCase4(TestCase1):
    def init_pool_type(self):
C
chengduoZH 已提交
428
        self.pool_type = "max"
C
chengduoZH 已提交
429 430


C
chengduoZH 已提交
431 432
class TestCase5(TestCase2):
    def init_pool_type(self):
C
chengduoZH 已提交
433
        self.pool_type = "max"
C
chengduoZH 已提交
434 435


436
# --------------------test pool3d cudnn--------------------
K
Kexin Zhao 已提交
437 438


439
def create_test_cudnn_class(parent):
440 441 442
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
443 444 445
    class TestCUDNNCase(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
K
Kexin Zhao 已提交
446

447 448 449
    cls_name = "{0}_{1}".format(parent.__name__, "CUDNNOp")
    TestCUDNNCase.__name__ = cls_name
    globals()[cls_name] = TestCUDNNCase
C
chengduoZH 已提交
450 451


C
cnn 已提交
452
create_test_cudnn_class(TestPool3D_Op)
453 454 455 456 457
create_test_cudnn_class(TestCase1)
create_test_cudnn_class(TestCase2)
create_test_cudnn_class(TestCase3)
create_test_cudnn_class(TestCase4)
create_test_cudnn_class(TestCase5)
K
Kexin Zhao 已提交
458 459


460
def create_test_cudnn_fp16_class(parent):
461 462 463
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
464 465 466 467
    class TestCUDNNFp16Case(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
            self.dtype = np.float16
K
Kexin Zhao 已提交
468

469 470 471 472
        def test_check_output(self):
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
R
ronnywang 已提交
473 474 475 476
                    if core.is_compiled_with_rocm():
                        self.check_output_with_place(place, atol=1e-2)
                    else:
                        self.check_output_with_place(place, atol=1e-3)
C
chengduoZH 已提交
477

478 479 480
    cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16Op")
    TestCUDNNFp16Case.__name__ = cls_name
    globals()[cls_name] = TestCUDNNFp16Case
C
chengduoZH 已提交
481

K
Kexin Zhao 已提交
482

483
def create_test_fp16_class(parent):
484 485 486
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
    class TestFp16Case(parent):
        def init_kernel_type(self):
            self.use_cudnn = False
            self.dtype = np.float16

        def test_check_output(self):
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
                    self.check_output_with_place(place, atol=1e-2)

    cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op")
    TestFp16Case.__name__ = cls_name
    globals()[cls_name] = TestFp16Case


C
cnn 已提交
503
create_test_cudnn_fp16_class(TestPool3D_Op)
504 505 506 507 508
create_test_cudnn_fp16_class(TestCase1)
create_test_cudnn_fp16_class(TestCase2)
create_test_cudnn_fp16_class(TestCase3)
create_test_cudnn_fp16_class(TestCase4)
create_test_cudnn_fp16_class(TestCase5)
509 510 511 512 513 514 515

create_test_fp16_class(TestPool3D_Op)
create_test_fp16_class(TestCase1)
create_test_fp16_class(TestCase2)
create_test_fp16_class(TestCase3)
create_test_fp16_class(TestCase4)
create_test_fp16_class(TestCase5)
K
Kexin Zhao 已提交
516 517


518 519
# ---- test ceil mode ------
def create_test_cudnn_use_ceil_class(parent):
520 521 522
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
523 524 525
    class TestPool3DUseCeilCase(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
C
chengduoZH 已提交
526

527 528
        def init_ceil_mode(self):
            self.ceil_mode = True
C
chengduoZH 已提交
529

530 531 532
    cls_name = "{0}_{1}".format(parent.__name__, "CUDNNOpCeilMode")
    TestPool3DUseCeilCase.__name__ = cls_name
    globals()[cls_name] = TestPool3DUseCeilCase
K
Kexin Zhao 已提交
533 534


C
cnn 已提交
535
create_test_cudnn_use_ceil_class(TestPool3D_Op)
536
create_test_cudnn_use_ceil_class(TestCase1)
K
Kexin Zhao 已提交
537

C
chengduoZH 已提交
538

539 540 541 542
def create_test_use_ceil_class(parent):
    class TestPool3DUseCeilCase(parent):
        def init_ceil_mode(self):
            self.ceil_mode = True
C
chengduoZH 已提交
543

544 545 546
    cls_name = "{0}_{1}".format(parent.__name__, "CeilModeCast")
    TestPool3DUseCeilCase.__name__ = cls_name
    globals()[cls_name] = TestPool3DUseCeilCase
K
Kexin Zhao 已提交
547 548


549 550
create_test_use_ceil_class(TestCase1)
create_test_use_ceil_class(TestCase2)
K
Kexin Zhao 已提交
551

552 553 554 555

class TestAvgInclude(TestCase2):
    def init_exclusive(self):
        self.exclusive = False
C
chengduoZH 已提交
556 557


558 559 560
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
561
class TestCUDNNAvgInclude(TestCase2):
K
Kexin Zhao 已提交
562
    def init_kernel_type(self):
563
        self.use_cudnn = True
K
Kexin Zhao 已提交
564

565 566 567 568 569 570 571 572 573
    def init_exclusive(self):
        self.exclusive = False


class TestAvgPoolAdaptive(TestCase1):
    def init_adaptive(self):
        self.adaptive = True


574 575 576 577 578
class TestAvgPoolAdaptiveAsyOutSize(TestCase1):
    def init_adaptive(self):
        self.adaptive = True

    def init_shape(self):
579
        self.shape = [1, 3, 3, 4, 4]
580 581 582 583 584 585

    def init_test_case(self):
        self.ksize = [2, 2, 3]
        self.strides = [1, 1, 1]


586
# -------test pool3d with asymmetric padding------
C
cnn 已提交
587
class TestPool3D_Op_AsyPadding(TestPool3D_Op):
588
    def init_test_case(self):
589 590 591 592
        self.ksize = [3, 4, 3]
        self.strides = [1, 1, 2]

    def init_paddings(self):
593 594 595
        self.paddings = [0, 0, 0, 2, 3, 0]

    def init_shape(self):
596
        self.shape = [1, 3, 5, 5, 6]
597 598 599 600


class TestCase1_AsyPadding(TestCase1):
    def init_test_case(self):
601 602 603 604
        self.ksize = [3, 3, 4]
        self.strides = [1, 1, 2]

    def init_paddings(self):
605 606 607
        self.paddings = [1, 0, 2, 1, 2, 1]

    def init_shape(self):
608
        self.shape = [1, 3, 7, 7, 6]
609 610 611 612 613 614


class TestCase2_AsyPadding(TestCase2):
    def init_test_case(self):
        self.ksize = [3, 3, 3]
        self.strides = [1, 1, 1]
615 616

    def init_paddings(self):
617 618 619
        self.paddings = [1, 2, 1, 1, 1, 0]

    def init_shape(self):
620
        self.shape = [1, 3, 7, 7, 7]
621 622 623 624 625 626


class TestCase3_AsyPadding(TestCase3):
    def init_test_case(self):
        self.ksize = [3, 3, 3]
        self.strides = [1, 1, 1]
627 628

    def init_paddings(self):
629 630 631
        self.paddings = [1, 0, 0, 0, 1, 0]

    def init_shape(self):
632
        self.shape = [1, 3, 5, 5, 5]
K
Kexin Zhao 已提交
633

634 635 636 637 638

class TestCase4_AsyPadding(TestCase4):
    def init_test_case(self):
        self.ksize = [3, 3, 3]
        self.strides = [1, 1, 1]
639 640

    def init_paddings(self):
641 642 643
        self.paddings = [1, 0, 2, 1, 2, 1]

    def init_shape(self):
644
        self.shape = [1, 3, 7, 7, 7]
645 646 647 648 649 650


class TestCase5_AsyPadding(TestCase5):
    def init_test_case(self):
        self.ksize = [3, 3, 3]
        self.strides = [1, 1, 1]
651 652

    def init_paddings(self):
653 654 655
        self.paddings = [1, 2, 1, 1, 1, 0]

    def init_shape(self):
656
        self.shape = [1, 3, 7, 7, 7]
657 658


C
cnn 已提交
659
create_test_cudnn_class(TestPool3D_Op_AsyPadding)
660 661 662 663 664 665
create_test_cudnn_class(TestCase1_AsyPadding)
create_test_cudnn_class(TestCase2_AsyPadding)
create_test_cudnn_class(TestCase3_AsyPadding)
create_test_cudnn_class(TestCase4_AsyPadding)
create_test_cudnn_class(TestCase5_AsyPadding)

C
cnn 已提交
666
create_test_cudnn_fp16_class(TestPool3D_Op_AsyPadding)
667 668 669 670 671 672
create_test_cudnn_fp16_class(TestCase1_AsyPadding)
create_test_cudnn_fp16_class(TestCase2_AsyPadding)
create_test_cudnn_fp16_class(TestCase3_AsyPadding)
create_test_cudnn_fp16_class(TestCase4_AsyPadding)
create_test_cudnn_fp16_class(TestCase5_AsyPadding)

C
cnn 已提交
673
create_test_cudnn_use_ceil_class(TestPool3D_Op_AsyPadding)
674 675 676 677 678 679 680 681 682 683
create_test_cudnn_use_ceil_class(TestCase1_AsyPadding)

create_test_use_ceil_class(TestCase1_AsyPadding)
create_test_use_ceil_class(TestCase2_AsyPadding)


class TestAvgInclude_AsyPadding(TestCase2):
    def init_exclusive(self):
        self.exclusive = False

684
    def init_paddings(self):
D
Double_V 已提交
685
        self.paddings = [2, 2, 1, 1, 0, 0]
686 687


688 689 690
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
691
class TestCUDNNAvgInclude_AsyPadding(TestCase2):
K
Kexin Zhao 已提交
692 693 694
    def init_kernel_type(self):
        self.use_cudnn = True

695 696
    def init_exclusive(self):
        self.exclusive = False
C
chengduoZH 已提交
697

698
    def init_paddings(self):
699
        self.paddings = [1, 0, 0, 0, 0, 0]
C
chengduoZH 已提交
700

701
    def init_shape(self):
702
        self.shape = [1, 3, 5, 5, 5]
703 704


705 706 707
class TestAvgPoolAdaptive_AsyPadding(TestCase1):
    def init_adaptive(self):
        self.adaptive = True
708

709
    def init_paddings(self):
710
        self.paddings = [1, 0, 2, 1, 2, 1]
711 712


713
# ------------ test channel_last --------------
C
cnn 已提交
714
class TestPool3D_channel_last(TestPool3D_Op):
715 716
    def init_data_format(self):
        self.data_format = "NDHWC"
717

718
    def init_shape(self):
719
        self.shape = [1, 5, 5, 6, 3]
720

721 722 723 724 725 726

class TestCase1_channel_last(TestCase1):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
727
        self.shape = [1, 7, 7, 7, 3]
728 729 730 731 732 733 734


class TestCase2_channel_last(TestCase2):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
735
        self.shape = [1, 7, 7, 5, 3]
736 737 738 739 740 741 742


class TestCase3_channel_last(TestCase3):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
743
        self.shape = [1, 5, 6, 5, 3]
744 745 746 747 748 749 750


class TestCase4_channel_last(TestCase4):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
751
        self.shape = [1, 7, 6, 7, 3]
752 753 754 755 756 757 758


class TestCase5_channel_last(TestCase5):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
759
        self.shape = [1, 7, 7, 7, 3]
760 761


C
cnn 已提交
762
create_test_cudnn_class(TestPool3D_channel_last)
763 764 765 766 767 768
create_test_cudnn_class(TestCase1_channel_last)
create_test_cudnn_class(TestCase2_channel_last)
create_test_cudnn_class(TestCase3_channel_last)
create_test_cudnn_class(TestCase4_channel_last)
create_test_cudnn_class(TestCase5_channel_last)

C
cnn 已提交
769
create_test_cudnn_use_ceil_class(TestPool3D_channel_last)
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
create_test_cudnn_use_ceil_class(TestCase1_channel_last)

create_test_use_ceil_class(TestCase1_channel_last)
create_test_use_ceil_class(TestCase2_channel_last)


class TestCase5_Max(TestCase2):
    def init_pool_type(self):
        self.pool_type = "max"

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        if self.has_cudnn() and self.pool_type == "max":
            place = core.CUDAPlace(0)
785 786 787
            self.check_grad_with_place(
                place, set(['X']), 'Out', max_relative_error=1.00
            )
788 789 790 791 792 793 794 795 796
        elif self.pool_type == "max":
            self.check_grad(set(['X']), 'Out', max_relative_error=1.00)


class TestCase5_channel_last_Max(TestCase5_Max):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
797
        self.shape = [1, 7, 7, 7, 3]
798 799 800 801 802 803 804


create_test_cudnn_class(TestCase5_Max)
create_test_cudnn_class(TestCase5_channel_last_Max)


class TestAvgInclude_channel_last(TestCase2_channel_last):
805 806 807
    def init_exclusive(self):
        self.exclusive = False

808

809 810 811
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
812 813 814 815
class TestCUDNNAvgInclude_channel_last(TestCase2_channel_last):
    def init_kernel_type(self):
        self.use_cudnn = True

816 817 818
    def init_exclusive(self):
        self.exclusive = False

819

820
class TestAvgPoolAdaptive_channel_last(TestCase1_channel_last):
821 822 823 824
    def init_adaptive(self):
        self.adaptive = True


825
# --- asy padding
C
cnn 已提交
826
class TestPool3D_Op_AsyPadding_channel_last(TestPool3D_Op_AsyPadding):
827 828 829 830
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
831
        self.shape = [1, 5, 5, 6, 3]
832 833 834 835 836 837 838


class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
839
        self.shape = [1, 7, 6, 8, 3]
840 841 842 843 844 845 846


class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
847
        self.shape = [1, 6, 8, 7, 3]
848 849 850 851 852 853 854


class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
855
        self.shape = [1, 5, 7, 5, 3]
856 857 858 859 860 861 862


class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
863
        self.shape = [1, 6, 7, 7, 3]
864 865 866 867 868 869 870


class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding):
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
871
        self.shape = [1, 7, 8, 6, 3]
872 873


C
cnn 已提交
874
create_test_cudnn_class(TestPool3D_Op_AsyPadding_channel_last)
875 876 877 878 879 880
create_test_cudnn_class(TestCase1_AsyPadding_channel_last)
create_test_cudnn_class(TestCase2_AsyPadding_channel_last)
create_test_cudnn_class(TestCase3_AsyPadding_channel_last)
create_test_cudnn_class(TestCase4_AsyPadding_channel_last)
create_test_cudnn_class(TestCase5_AsyPadding_channel_last)

C
cnn 已提交
881
create_test_cudnn_use_ceil_class(TestPool3D_Op_AsyPadding_channel_last)
882 883 884 885 886 887 888 889 890 891 892
create_test_cudnn_use_ceil_class(TestCase1_AsyPadding_channel_last)

create_test_use_ceil_class(TestCase1_AsyPadding_channel_last)
create_test_use_ceil_class(TestCase2_AsyPadding_channel_last)


class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding):
    def init_data_format(self):
        self.data_format = "NDHWC"


893 894 895 896 897 898
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestCUDNNAvgInclude_AsyPadding_channel_last(
    TestCUDNNAvgInclude_AsyPadding
):
899 900 901 902
    def init_data_format(self):
        self.data_format = "NDHWC"


903 904 905
class TestAvgPoolAdaptive_AsyPadding_channel_last(
    TestAvgPoolAdaptive_AsyPadding
):
906 907 908 909
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
910
        self.shape = [1, 7, 7, 7, 3]
911 912


913
# test padding = SAME VALID
914 915 916
def create_test_padding_SAME_class(parent):
    class TestPaddingSMAECase(parent):
        def init_paddings(self):
917
            self.paddings = [0, 0, 0]
918 919 920 921 922 923 924
            self.padding_algorithm = "SAME"

    cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
    TestPaddingSMAECase.__name__ = cls_name
    globals()[cls_name] = TestPaddingSMAECase


C
cnn 已提交
925
create_test_padding_SAME_class(TestPool3D_Op)
926 927 928 929 930 931
create_test_padding_SAME_class(TestCase1)
create_test_padding_SAME_class(TestCase2)
create_test_padding_SAME_class(TestCase3)
create_test_padding_SAME_class(TestCase4)
create_test_padding_SAME_class(TestCase5)

C
cnn 已提交
932
create_test_padding_SAME_class(TestPool3D_channel_last)
933 934 935 936 937 938 939 940
create_test_padding_SAME_class(TestCase1_channel_last)
create_test_padding_SAME_class(TestCase2_channel_last)
create_test_padding_SAME_class(TestCase3_channel_last)
create_test_padding_SAME_class(TestCase4_channel_last)
create_test_padding_SAME_class(TestCase5_channel_last)


def create_test_cudnn_padding_SAME_class(parent):
941 942 943
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
944 945 946 947 948
    class TestCUDNNPaddingSMAECase(parent):
        def init_kernel_type(self):
            self.use_cudnn = True

        def init_paddings(self):
949
            self.paddings = [1, 1, 1]
950 951 952 953 954 955 956
            self.padding_algorithm = "SAME"

    cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp")
    TestCUDNNPaddingSMAECase.__name__ = cls_name
    globals()[cls_name] = TestCUDNNPaddingSMAECase


C
cnn 已提交
957
create_test_cudnn_padding_SAME_class(TestPool3D_Op)
958 959 960 961 962 963
create_test_cudnn_padding_SAME_class(TestCase1)
create_test_cudnn_padding_SAME_class(TestCase2)
create_test_cudnn_padding_SAME_class(TestCase3)
create_test_cudnn_padding_SAME_class(TestCase4)
create_test_cudnn_padding_SAME_class(TestCase5)

C
cnn 已提交
964
create_test_cudnn_padding_SAME_class(TestPool3D_channel_last)
965 966 967 968 969 970 971 972 973 974
create_test_cudnn_padding_SAME_class(TestCase1_channel_last)
create_test_cudnn_padding_SAME_class(TestCase2_channel_last)
create_test_cudnn_padding_SAME_class(TestCase3_channel_last)
create_test_cudnn_padding_SAME_class(TestCase4_channel_last)
create_test_cudnn_padding_SAME_class(TestCase5_channel_last)


def create_test_padding_VALID_class(parent):
    class TestPaddingVALIDCase(parent):
        def init_paddings(self):
975
            self.paddings = [1, 1, 1]
976 977 978 979 980 981 982
            self.padding_algorithm = "VALID"

    cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
    TestPaddingVALIDCase.__name__ = cls_name
    globals()[cls_name] = TestPaddingVALIDCase


C
cnn 已提交
983
create_test_padding_VALID_class(TestPool3D_Op)
984 985 986 987 988 989
create_test_padding_VALID_class(TestCase1)
create_test_padding_VALID_class(TestCase2)
create_test_padding_VALID_class(TestCase3)
create_test_padding_VALID_class(TestCase4)
create_test_padding_VALID_class(TestCase5)

C
cnn 已提交
990
create_test_padding_VALID_class(TestPool3D_channel_last)
991 992 993 994 995 996 997 998
create_test_padding_VALID_class(TestCase1_channel_last)
create_test_padding_VALID_class(TestCase2_channel_last)
create_test_padding_VALID_class(TestCase3_channel_last)
create_test_padding_VALID_class(TestCase4_channel_last)
create_test_padding_VALID_class(TestCase5_channel_last)


def create_test_cudnn_padding_VALID_class(parent):
999 1000 1001
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
1002 1003 1004 1005 1006
    class TestCUDNNPaddingVALIDCase(parent):
        def init_kernel_type(self):
            self.use_cudnn = True

        def init_paddings(self):
1007
            self.paddings = [1, 1, 1]
1008 1009 1010 1011 1012 1013 1014
            self.padding_algorithm = "VALID"

    cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp")
    TestCUDNNPaddingVALIDCase.__name__ = cls_name
    globals()[cls_name] = TestCUDNNPaddingVALIDCase


C
cnn 已提交
1015
create_test_cudnn_padding_VALID_class(TestPool3D_Op)
1016 1017 1018 1019 1020 1021
create_test_cudnn_padding_VALID_class(TestCase1)
create_test_cudnn_padding_VALID_class(TestCase2)
create_test_cudnn_padding_VALID_class(TestCase3)
create_test_cudnn_padding_VALID_class(TestCase4)
create_test_cudnn_padding_VALID_class(TestCase5)

C
cnn 已提交
1022
create_test_cudnn_padding_VALID_class(TestPool3D_channel_last)
1023 1024 1025 1026 1027 1028 1029
create_test_cudnn_padding_VALID_class(TestCase1_channel_last)
create_test_cudnn_padding_VALID_class(TestCase2_channel_last)
create_test_cudnn_padding_VALID_class(TestCase3_channel_last)
create_test_cudnn_padding_VALID_class(TestCase4_channel_last)
create_test_cudnn_padding_VALID_class(TestCase5_channel_last)


C
chengduoZH 已提交
1030 1031
if __name__ == '__main__':
    unittest.main()