test_pool3d_op.py 40.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

C
chengduoZH 已提交
15 16
import unittest
import numpy as np
17

18
import paddle
19
import paddle.fluid.core as core
20
from op_test import OpTest
21
import paddle.fluid as fluid
C
chengduoZH 已提交
22 23


24 25 26 27 28 29 30 31
def adaptive_start_index(index, input_size, output_size):
    return int(np.floor(index * input_size / output_size))


def adaptive_end_index(index, input_size, output_size):
    return int(np.ceil((index + 1) * input_size / output_size))


32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
def pool3D_forward_naive(x,
                         ksize,
                         strides,
                         paddings,
                         global_pool=0,
                         ceil_mode=False,
                         exclusive=True,
                         adaptive=False,
                         data_format='NCDHW',
                         pool_type='max',
                         padding_algorithm="EXPLICIT"):
    # update paddings
    def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
        padding = []
        for input_size, filter_size, stride_size in zip(input_shape, pool_size,
                                                        pool_stride):
            out_size = int((input_size + stride_size - 1) / stride_size)
49 50
            pad_sum = np.max(
                ((out_size - 1) * stride_size + filter_size - input_size, 0))
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
            pad_0 = int(pad_sum / 2)
            pad_1 = int(pad_sum - pad_0)
            padding.append(pad_0)
            padding.append(pad_1)
        return padding

    if isinstance(padding_algorithm, str):
        padding_algorithm = padding_algorithm.upper()
        if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
            raise ValueError("Unknown Attr(padding_algorithm): '%s'. "
                             "It can only be 'SAME' or 'VALID'." %
                             str(padding_algorithm))

        if padding_algorithm == "VALID":
            paddings = [0, 0, 0, 0, 0, 0]
            if ceil_mode != False:
                raise ValueError(
                    "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)"
                    " must be False. "
                    "Received ceil_mode: True.")
        elif padding_algorithm == "SAME":
            input_data_shape = []
            if data_format == "NCDHW":
                input_data_shape = x.shape[2:5]
            elif data_format == "NDHWC":
                input_data_shape = x.shape[1:4]
            paddings = _get_padding_with_SAME(input_data_shape, ksize, strides)

    assert len(paddings) == 3 or len(paddings) == 6
    is_sys = True if len(paddings) == 3 else False

    N = x.shape[0]
    C,D, H, W = [x.shape[1], x.shape[2], x.shape[3], x.shape[4]] \
        if data_format == 'NCDHW' else [x.shape[4], x.shape[1], x.shape[2],x.shape[3]]

C
chengduoZH 已提交
86 87
    if global_pool == 1:
        ksize = [D, H, W]
88 89 90 91 92 93 94 95 96
        paddings = [0 for _ in range(len(paddings))]

    pad_d_forth = paddings[0] if is_sys else paddings[0]
    pad_d_back = paddings[0] if is_sys else paddings[1]
    pad_h_up = paddings[1] if is_sys else paddings[2]
    pad_h_down = paddings[1] if is_sys else paddings[3]
    pad_w_left = paddings[2] if is_sys else paddings[4]
    pad_w_right = paddings[2] if is_sys else paddings[5]

97 98 99
    if adaptive:
        D_out, H_out, W_out = ksize
    else:
100 101 102 103 104 105 106 107 108 109 110 111 112

        D_out = (D - ksize[0] + pad_d_forth+pad_d_back + strides[0] - 1) // strides[0] + 1 \
            if ceil_mode  else (D - ksize[0] + pad_d_forth+pad_d_back) // strides[0] + 1

        H_out = (H - ksize[1] + pad_h_up + pad_h_down + strides[1] - 1) // strides[1] + 1 \
            if ceil_mode else (H - ksize[1] + pad_h_up + pad_h_down) // strides[1] + 1

        W_out = (W - ksize[2] + pad_w_left + pad_w_right + strides[2] - 1) // strides[2] + 1 \
            if ceil_mode else (W - ksize[2] + pad_w_left + pad_w_right) // strides[2] + 1


    out = np.zeros((N, C, D_out, H_out, W_out)) if data_format=='NCDHW' \
        else np.zeros((N, D_out, H_out, W_out, C))
113
    for k in range(D_out):
114 115 116
        if adaptive:
            d_start = adaptive_start_index(k, D, ksize[0])
            d_end = adaptive_end_index(k, D, ksize[0])
117

118
        for i in range(H_out):
119 120 121
            if adaptive:
                h_start = adaptive_start_index(i, H, ksize[1])
                h_end = adaptive_end_index(i, H, ksize[1])
122

123
            for j in range(W_out):
124 125 126 127
                if adaptive:
                    w_start = adaptive_start_index(j, W, ksize[2])
                    w_end = adaptive_end_index(j, W, ksize[2])
                else:
128

D
Double_V 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
                    d_start = k * strides[0] - pad_d_forth
                    d_end = np.min((k * strides[0] + ksize[0] - pad_d_forth,
                                    D + pad_d_back))
                    h_start = i * strides[1] - pad_h_up
                    h_end = np.min(
                        (i * strides[1] + ksize[1] - pad_h_up, H + pad_h_down))
                    w_start = j * strides[2] - pad_w_left
                    w_end = np.min((j * strides[2] + ksize[2] - pad_w_left,
                                    W + pad_w_right))

                    field_size = (d_end - d_start) * (h_end - h_start) * (
                        w_end - w_start)
                    w_start = np.max((w_start, 0))
                    d_start = np.max((d_start, 0))
                    h_start = np.max((h_start, 0))
                    w_end = np.min((w_end, W))
                    d_end = np.min((d_end, D))
                    h_end = np.min((h_end, H))
147
                if data_format == 'NCDHW':
148 149
                    x_masked = x[:, :, d_start:d_end, h_start:h_end,
                                 w_start:w_end]
150
                    if pool_type == 'avg':
D
Double_V 已提交
151 152 153 154
                        if (exclusive or adaptive):
                            field_size = (d_end - d_start) * (
                                h_end - h_start) * (w_end - w_start)

155 156
                        out[:, :, k, i,
                            j] = np.sum(x_masked, axis=(2, 3, 4)) / field_size
157 158 159 160
                    elif pool_type == 'max':
                        out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4))

                elif data_format == 'NDHWC':
161 162
                    x_masked = x[:, d_start:d_end, h_start:h_end,
                                 w_start:w_end, :]
163
                    if pool_type == 'avg':
D
Double_V 已提交
164 165 166 167
                        if (exclusive or adaptive):
                            field_size = (d_end - d_start) * (
                                h_end - h_start) * (w_end - w_start)

168 169 170 171
                        out[:, k, i, j, :] = np.sum(x_masked,
                                                    axis=(1, 2, 3)) / field_size
                    elif pool_type == 'max':
                        out[:, k, i, j, :] = np.max(x_masked, axis=(1, 2, 3))
C
chengduoZH 已提交
172

173 174 175 176 177 178 179 180 181 182 183
    return out


def max_pool3D_forward_naive(x,
                             ksize,
                             strides,
                             paddings,
                             global_pool=0,
                             ceil_mode=False,
                             exclusive=True,
                             adaptive=False):
184 185 186 187 188 189 190 191 192 193
    out = pool3D_forward_naive(x=x,
                               ksize=ksize,
                               strides=strides,
                               paddings=paddings,
                               global_pool=global_pool,
                               ceil_mode=ceil_mode,
                               exclusive=exclusive,
                               adaptive=adaptive,
                               data_format='NCDHW',
                               pool_type="max")
C
chengduoZH 已提交
194 195 196
    return out


197 198 199 200 201
def avg_pool3D_forward_naive(x,
                             ksize,
                             strides,
                             paddings,
                             global_pool=0,
202
                             ceil_mode=False,
203 204
                             exclusive=True,
                             adaptive=False):
205 206 207 208 209 210 211 212 213 214
    out = pool3D_forward_naive(x=x,
                               ksize=ksize,
                               strides=strides,
                               paddings=paddings,
                               global_pool=global_pool,
                               ceil_mode=ceil_mode,
                               exclusive=exclusive,
                               adaptive=adaptive,
                               data_format='NCDHW',
                               pool_type="avg")
C
chengduoZH 已提交
215 216 217
    return out


C
cnn 已提交
218
class TestPool3D_Op(OpTest):
219

C
chengduoZH 已提交
220
    def setUp(self):
K
Kexin Zhao 已提交
221
        self.op_type = "pool3d"
222
        self.init_kernel_type()
R
ronnywang 已提交
223
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
C
fix bug  
chengduoZH 已提交
224
        self.init_test_case()
225 226
        self.padding_algorithm = "EXPLICIT"
        self.init_paddings()
C
chengduoZH 已提交
227
        self.init_global_pool()
K
Kexin Zhao 已提交
228
        self.init_kernel_type()
C
chengduoZH 已提交
229
        self.init_pool_type()
230
        self.init_ceil_mode()
231
        self.init_exclusive()
232
        self.init_adaptive()
233 234
        self.init_data_format()
        self.init_shape()
235
        paddle.enable_static()
C
chengduoZH 已提交
236

K
Kexin Zhao 已提交
237
        input = np.random.random(self.shape).astype(self.dtype)
238 239 240 241 242 243
        output = pool3D_forward_naive(input, self.ksize, self.strides,
                                      self.paddings, self.global_pool,
                                      self.ceil_mode, self.exclusive,
                                      self.adaptive, self.data_format,
                                      self.pool_type,
                                      self.padding_algorithm).astype(self.dtype)
244

K
Kexin Zhao 已提交
245
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)}
C
chengduoZH 已提交
246 247 248 249 250

        self.attrs = {
            'strides': self.strides,
            'paddings': self.paddings,
            'ksize': self.ksize,
C
chengduoZH 已提交
251 252
            'pooling_type': self.pool_type,
            'global_pooling': self.global_pool,
253
            'use_cudnn': self.use_cudnn,
254
            'ceil_mode': self.ceil_mode,
255
            'data_format': self.data_format,
256
            'exclusive': self.exclusive,
257 258
            'adaptive': self.adaptive,
            "padding_algorithm": self.padding_algorithm,
C
chengduoZH 已提交
259 260
        }

K
Kexin Zhao 已提交
261
        self.outputs = {'Out': output}
C
chengduoZH 已提交
262

263
    def has_cudnn(self):
264 265
        return core.is_compiled_with_cuda() and self.use_cudnn

C
chengduoZH 已提交
266
    def test_check_output(self):
267
        if self.has_cudnn():
268 269 270 271
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)
        else:
            self.check_output()
C
chengduoZH 已提交
272 273

    def test_check_grad(self):
K
Kexin Zhao 已提交
274 275
        if self.dtype == np.float16:
            return
276
        if self.has_cudnn() and self.pool_type != "max":
277
            place = core.CUDAPlace(0)
R
ronnywang 已提交
278
            if core.is_compiled_with_rocm():
279 280 281 282
                self.check_grad_with_place(place,
                                           set(['X']),
                                           'Out',
                                           max_relative_error=1e-2)
R
ronnywang 已提交
283 284
            else:
                self.check_grad_with_place(place, set(['X']), 'Out')
285
        elif self.pool_type != "max":
R
ronnywang 已提交
286 287 288 289
            if core.is_compiled_with_rocm():
                self.check_grad(set(['X']), 'Out', max_relative_error=1e-2)
            else:
                self.check_grad(set(['X']), 'Out')
C
chengduoZH 已提交
290

291 292 293 294
    def init_data_format(self):
        self.data_format = "NCDHW"

    def init_shape(self):
295
        self.shape = [1, 3, 5, 6, 5]
296 297

    def init_test_case(self):
298 299 300 301
        self.ksize = [2, 3, 1]
        self.strides = [2, 2, 3]

    def init_paddings(self):
C
chengduoZH 已提交
302
        self.paddings = [0, 0, 0]
303
        self.padding_algorithm = "EXPLICIT"
C
chengduoZH 已提交
304

K
Kexin Zhao 已提交
305
    def init_kernel_type(self):
306
        self.use_cudnn = False
C
chengduoZH 已提交
307 308 309 310 311 312 313

    def init_pool_type(self):
        self.pool_type = "avg"

    def init_global_pool(self):
        self.global_pool = True

314 315 316
    def init_ceil_mode(self):
        self.ceil_mode = False

317
    def init_exclusive(self):
318
        self.exclusive = True
319

320 321 322
    def init_adaptive(self):
        self.adaptive = False

C
chengduoZH 已提交
323

C
cnn 已提交
324
class TestCase1(TestPool3D_Op):
325

326
    def init_shape(self):
327
        self.shape = [1, 3, 7, 7, 7]
328 329

    def init_test_case(self):
C
chengduoZH 已提交
330 331
        self.ksize = [3, 3, 3]
        self.strides = [1, 1, 1]
332 333

    def init_paddings(self):
C
chengduoZH 已提交
334
        self.paddings = [0, 0, 0]
C
chengduoZH 已提交
335

C
chengduoZH 已提交
336
    def init_pool_type(self):
C
chengduoZH 已提交
337
        self.pool_type = "avg"
C
chengduoZH 已提交
338 339 340 341 342

    def init_global_pool(self):
        self.global_pool = False


C
cnn 已提交
343
class TestCase2(TestPool3D_Op):
344

345
    def init_shape(self):
346
        self.shape = [1, 3, 6, 7, 7]
347 348

    def init_test_case(self):
349 350 351 352
        self.ksize = [3, 3, 4]
        self.strides = [1, 3, 2]

    def init_paddings(self):
C
chengduoZH 已提交
353 354
        self.paddings = [1, 1, 1]

C
chengduoZH 已提交
355 356 357 358 359 360
    def init_pool_type(self):
        self.pool_type = "avg"

    def init_global_pool(self):
        self.global_pool = False

C
chengduoZH 已提交
361

C
cnn 已提交
362
class TestCase3(TestPool3D_Op):
363

C
chengduoZH 已提交
364
    def init_pool_type(self):
C
chengduoZH 已提交
365 366 367
        self.pool_type = "max"


C
chengduoZH 已提交
368
class TestCase4(TestCase1):
369

C
chengduoZH 已提交
370
    def init_pool_type(self):
C
chengduoZH 已提交
371
        self.pool_type = "max"
C
chengduoZH 已提交
372 373


C
chengduoZH 已提交
374
class TestCase5(TestCase2):
375

C
chengduoZH 已提交
376
    def init_pool_type(self):
C
chengduoZH 已提交
377
        self.pool_type = "max"
C
chengduoZH 已提交
378 379


380
#--------------------test pool3d cudnn--------------------
K
Kexin Zhao 已提交
381 382


383
def create_test_cudnn_class(parent):
384

385 386 387
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestCUDNNCase(parent):
388

389 390
        def init_kernel_type(self):
            self.use_cudnn = True
K
Kexin Zhao 已提交
391

392 393 394
    cls_name = "{0}_{1}".format(parent.__name__, "CUDNNOp")
    TestCUDNNCase.__name__ = cls_name
    globals()[cls_name] = TestCUDNNCase
C
chengduoZH 已提交
395 396


C
cnn 已提交
397
create_test_cudnn_class(TestPool3D_Op)
398 399 400 401 402
create_test_cudnn_class(TestCase1)
create_test_cudnn_class(TestCase2)
create_test_cudnn_class(TestCase3)
create_test_cudnn_class(TestCase4)
create_test_cudnn_class(TestCase5)
K
Kexin Zhao 已提交
403 404


405
def create_test_cudnn_fp16_class(parent):
406

407 408 409
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestCUDNNFp16Case(parent):
410

411 412 413
        def init_kernel_type(self):
            self.use_cudnn = True
            self.dtype = np.float16
K
Kexin Zhao 已提交
414

415 416 417 418
        def test_check_output(self):
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
R
ronnywang 已提交
419 420 421 422
                    if core.is_compiled_with_rocm():
                        self.check_output_with_place(place, atol=1e-2)
                    else:
                        self.check_output_with_place(place, atol=1e-3)
C
chengduoZH 已提交
423

424 425 426
    cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16Op")
    TestCUDNNFp16Case.__name__ = cls_name
    globals()[cls_name] = TestCUDNNFp16Case
C
chengduoZH 已提交
427

K
Kexin Zhao 已提交
428

429
def create_test_fp16_class(parent):
430

431 432 433
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestFp16Case(parent):
434

435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
        def init_kernel_type(self):
            self.use_cudnn = False
            self.dtype = np.float16

        def test_check_output(self):
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
                    self.check_output_with_place(place, atol=1e-2)

    cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op")
    TestFp16Case.__name__ = cls_name
    globals()[cls_name] = TestFp16Case


C
cnn 已提交
450
create_test_cudnn_fp16_class(TestPool3D_Op)
451 452 453 454 455
create_test_cudnn_fp16_class(TestCase1)
create_test_cudnn_fp16_class(TestCase2)
create_test_cudnn_fp16_class(TestCase3)
create_test_cudnn_fp16_class(TestCase4)
create_test_cudnn_fp16_class(TestCase5)
456 457 458 459 460 461 462

create_test_fp16_class(TestPool3D_Op)
create_test_fp16_class(TestCase1)
create_test_fp16_class(TestCase2)
create_test_fp16_class(TestCase3)
create_test_fp16_class(TestCase4)
create_test_fp16_class(TestCase5)
K
Kexin Zhao 已提交
463 464


465 466
# ---- test ceil mode ------
def create_test_cudnn_use_ceil_class(parent):
467

468 469 470
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestPool3DUseCeilCase(parent):
471

472 473
        def init_kernel_type(self):
            self.use_cudnn = True
C
chengduoZH 已提交
474

475 476
        def init_ceil_mode(self):
            self.ceil_mode = True
C
chengduoZH 已提交
477

478 479 480
    cls_name = "{0}_{1}".format(parent.__name__, "CUDNNOpCeilMode")
    TestPool3DUseCeilCase.__name__ = cls_name
    globals()[cls_name] = TestPool3DUseCeilCase
K
Kexin Zhao 已提交
481 482


C
cnn 已提交
483
create_test_cudnn_use_ceil_class(TestPool3D_Op)
484
create_test_cudnn_use_ceil_class(TestCase1)
K
Kexin Zhao 已提交
485

C
chengduoZH 已提交
486

487
def create_test_use_ceil_class(parent):
488

489
    class TestPool3DUseCeilCase(parent):
490

491 492
        def init_ceil_mode(self):
            self.ceil_mode = True
C
chengduoZH 已提交
493

494 495 496
    cls_name = "{0}_{1}".format(parent.__name__, "CeilModeCast")
    TestPool3DUseCeilCase.__name__ = cls_name
    globals()[cls_name] = TestPool3DUseCeilCase
K
Kexin Zhao 已提交
497 498


499 500
create_test_use_ceil_class(TestCase1)
create_test_use_ceil_class(TestCase2)
K
Kexin Zhao 已提交
501

502 503

class TestAvgInclude(TestCase2):
504

505 506
    def init_exclusive(self):
        self.exclusive = False
C
chengduoZH 已提交
507 508


509 510 511
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNAvgInclude(TestCase2):
512

K
Kexin Zhao 已提交
513
    def init_kernel_type(self):
514
        self.use_cudnn = True
K
Kexin Zhao 已提交
515

516 517 518 519 520
    def init_exclusive(self):
        self.exclusive = False


class TestAvgPoolAdaptive(TestCase1):
521

522 523 524 525
    def init_adaptive(self):
        self.adaptive = True


526
class TestAvgPoolAdaptiveAsyOutSize(TestCase1):
527

528 529 530 531
    def init_adaptive(self):
        self.adaptive = True

    def init_shape(self):
532
        self.shape = [1, 3, 3, 4, 4]
533 534 535 536 537 538

    def init_test_case(self):
        self.ksize = [2, 2, 3]
        self.strides = [1, 1, 1]


539
#-------test pool3d with asymmetric padding------
C
cnn 已提交
540
class TestPool3D_Op_AsyPadding(TestPool3D_Op):
541

542
    def init_test_case(self):
543 544 545 546
        self.ksize = [3, 4, 3]
        self.strides = [1, 1, 2]

    def init_paddings(self):
547 548 549
        self.paddings = [0, 0, 0, 2, 3, 0]

    def init_shape(self):
550
        self.shape = [1, 3, 5, 5, 6]
551 552 553


class TestCase1_AsyPadding(TestCase1):
554

555
    def init_test_case(self):
556 557 558 559
        self.ksize = [3, 3, 4]
        self.strides = [1, 1, 2]

    def init_paddings(self):
560 561 562
        self.paddings = [1, 0, 2, 1, 2, 1]

    def init_shape(self):
563
        self.shape = [1, 3, 7, 7, 6]
564 565 566


class TestCase2_AsyPadding(TestCase2):
567

568 569 570
    def init_test_case(self):
        self.ksize = [3, 3, 3]
        self.strides = [1, 1, 1]
571 572

    def init_paddings(self):
573 574 575
        self.paddings = [1, 2, 1, 1, 1, 0]

    def init_shape(self):
576
        self.shape = [1, 3, 7, 7, 7]
577 578 579


class TestCase3_AsyPadding(TestCase3):
580

581 582 583
    def init_test_case(self):
        self.ksize = [3, 3, 3]
        self.strides = [1, 1, 1]
584 585

    def init_paddings(self):
586 587 588
        self.paddings = [1, 0, 0, 0, 1, 0]

    def init_shape(self):
589
        self.shape = [1, 3, 5, 5, 5]
K
Kexin Zhao 已提交
590

591 592

class TestCase4_AsyPadding(TestCase4):
593

594 595 596
    def init_test_case(self):
        self.ksize = [3, 3, 3]
        self.strides = [1, 1, 1]
597 598

    def init_paddings(self):
599 600 601
        self.paddings = [1, 0, 2, 1, 2, 1]

    def init_shape(self):
602
        self.shape = [1, 3, 7, 7, 7]
603 604 605


class TestCase5_AsyPadding(TestCase5):
606

607 608 609
    def init_test_case(self):
        self.ksize = [3, 3, 3]
        self.strides = [1, 1, 1]
610 611

    def init_paddings(self):
612 613 614
        self.paddings = [1, 2, 1, 1, 1, 0]

    def init_shape(self):
615
        self.shape = [1, 3, 7, 7, 7]
616 617


C
cnn 已提交
618
create_test_cudnn_class(TestPool3D_Op_AsyPadding)
619 620 621 622 623 624
create_test_cudnn_class(TestCase1_AsyPadding)
create_test_cudnn_class(TestCase2_AsyPadding)
create_test_cudnn_class(TestCase3_AsyPadding)
create_test_cudnn_class(TestCase4_AsyPadding)
create_test_cudnn_class(TestCase5_AsyPadding)

C
cnn 已提交
625
create_test_cudnn_fp16_class(TestPool3D_Op_AsyPadding)
626 627 628 629 630 631
create_test_cudnn_fp16_class(TestCase1_AsyPadding)
create_test_cudnn_fp16_class(TestCase2_AsyPadding)
create_test_cudnn_fp16_class(TestCase3_AsyPadding)
create_test_cudnn_fp16_class(TestCase4_AsyPadding)
create_test_cudnn_fp16_class(TestCase5_AsyPadding)

C
cnn 已提交
632
create_test_cudnn_use_ceil_class(TestPool3D_Op_AsyPadding)
633 634 635 636 637 638 639
create_test_cudnn_use_ceil_class(TestCase1_AsyPadding)

create_test_use_ceil_class(TestCase1_AsyPadding)
create_test_use_ceil_class(TestCase2_AsyPadding)


class TestAvgInclude_AsyPadding(TestCase2):
640

641 642 643
    def init_exclusive(self):
        self.exclusive = False

644
    def init_paddings(self):
D
Double_V 已提交
645
        self.paddings = [2, 2, 1, 1, 0, 0]
646 647 648 649 650


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNAvgInclude_AsyPadding(TestCase2):
651

K
Kexin Zhao 已提交
652 653 654
    def init_kernel_type(self):
        self.use_cudnn = True

655 656
    def init_exclusive(self):
        self.exclusive = False
C
chengduoZH 已提交
657

658
    def init_paddings(self):
659
        self.paddings = [1, 0, 0, 0, 0, 0]
C
chengduoZH 已提交
660

661
    def init_shape(self):
662
        self.shape = [1, 3, 5, 5, 5]
663 664


665
class TestAvgPoolAdaptive_AsyPadding(TestCase1):
666

667 668
    def init_adaptive(self):
        self.adaptive = True
669

670
    def init_paddings(self):
671
        self.paddings = [1, 0, 2, 1, 2, 1]
672 673


674
# ------------ test channel_last --------------
C
cnn 已提交
675
class TestPool3D_channel_last(TestPool3D_Op):
676

677 678
    def init_data_format(self):
        self.data_format = "NDHWC"
679

680
    def init_shape(self):
681
        self.shape = [1, 5, 5, 6, 3]
682

683 684

class TestCase1_channel_last(TestCase1):
685

686 687 688 689
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
690
        self.shape = [1, 7, 7, 7, 3]
691 692 693


class TestCase2_channel_last(TestCase2):
694

695 696 697 698
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
699
        self.shape = [1, 7, 7, 5, 3]
700 701 702


class TestCase3_channel_last(TestCase3):
703

704 705 706 707
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
708
        self.shape = [1, 5, 6, 5, 3]
709 710 711


class TestCase4_channel_last(TestCase4):
712

713 714 715 716
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
717
        self.shape = [1, 7, 6, 7, 3]
718 719 720


class TestCase5_channel_last(TestCase5):
721

722 723 724 725
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
726
        self.shape = [1, 7, 7, 7, 3]
727 728


C
cnn 已提交
729
create_test_cudnn_class(TestPool3D_channel_last)
730 731 732 733 734 735
create_test_cudnn_class(TestCase1_channel_last)
create_test_cudnn_class(TestCase2_channel_last)
create_test_cudnn_class(TestCase3_channel_last)
create_test_cudnn_class(TestCase4_channel_last)
create_test_cudnn_class(TestCase5_channel_last)

C
cnn 已提交
736
create_test_cudnn_use_ceil_class(TestPool3D_channel_last)
737 738 739 740 741 742 743
create_test_cudnn_use_ceil_class(TestCase1_channel_last)

create_test_use_ceil_class(TestCase1_channel_last)
create_test_use_ceil_class(TestCase2_channel_last)


class TestCase5_Max(TestCase2):
744

745 746 747 748 749 750 751 752
    def init_pool_type(self):
        self.pool_type = "max"

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        if self.has_cudnn() and self.pool_type == "max":
            place = core.CUDAPlace(0)
753 754 755 756
            self.check_grad_with_place(place,
                                       set(['X']),
                                       'Out',
                                       max_relative_error=1.00)
757 758 759 760 761
        elif self.pool_type == "max":
            self.check_grad(set(['X']), 'Out', max_relative_error=1.00)


class TestCase5_channel_last_Max(TestCase5_Max):
762

763 764 765 766
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
767
        self.shape = [1, 7, 7, 7, 3]
768 769 770 771 772 773 774


create_test_cudnn_class(TestCase5_Max)
create_test_cudnn_class(TestCase5_channel_last_Max)


class TestAvgInclude_channel_last(TestCase2_channel_last):
775

776 777 778
    def init_exclusive(self):
        self.exclusive = False

779

780 781 782
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNAvgInclude_channel_last(TestCase2_channel_last):
783

784 785 786
    def init_kernel_type(self):
        self.use_cudnn = True

787 788 789
    def init_exclusive(self):
        self.exclusive = False

790

791
class TestAvgPoolAdaptive_channel_last(TestCase1_channel_last):
792

793 794 795 796
    def init_adaptive(self):
        self.adaptive = True


797
# --- asy padding
C
cnn 已提交
798
class TestPool3D_Op_AsyPadding_channel_last(TestPool3D_Op_AsyPadding):
799

800 801 802 803
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
804
        self.shape = [1, 5, 5, 6, 3]
805 806 807


class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding):
808

809 810 811 812
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
813
        self.shape = [1, 7, 6, 8, 3]
814 815 816


class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding):
817

818 819 820 821
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
822
        self.shape = [1, 6, 8, 7, 3]
823 824 825


class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding):
826

827 828 829 830
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
831
        self.shape = [1, 5, 7, 5, 3]
832 833 834


class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding):
835

836 837 838 839
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
840
        self.shape = [1, 6, 7, 7, 3]
841 842 843


class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding):
844

845 846 847 848
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
849
        self.shape = [1, 7, 8, 6, 3]
850 851


C
cnn 已提交
852
create_test_cudnn_class(TestPool3D_Op_AsyPadding_channel_last)
853 854 855 856 857 858
create_test_cudnn_class(TestCase1_AsyPadding_channel_last)
create_test_cudnn_class(TestCase2_AsyPadding_channel_last)
create_test_cudnn_class(TestCase3_AsyPadding_channel_last)
create_test_cudnn_class(TestCase4_AsyPadding_channel_last)
create_test_cudnn_class(TestCase5_AsyPadding_channel_last)

C
cnn 已提交
859
create_test_cudnn_use_ceil_class(TestPool3D_Op_AsyPadding_channel_last)
860 861 862 863 864 865 866
create_test_cudnn_use_ceil_class(TestCase1_AsyPadding_channel_last)

create_test_use_ceil_class(TestCase1_AsyPadding_channel_last)
create_test_use_ceil_class(TestCase2_AsyPadding_channel_last)


class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding):
867

868 869 870 871 872 873
    def init_data_format(self):
        self.data_format = "NDHWC"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
874 875 876
class TestCUDNNAvgInclude_AsyPadding_channel_last(TestCUDNNAvgInclude_AsyPadding
                                                  ):

877 878 879 880
    def init_data_format(self):
        self.data_format = "NDHWC"


881 882 883
class TestAvgPoolAdaptive_AsyPadding_channel_last(TestAvgPoolAdaptive_AsyPadding
                                                  ):

884 885 886 887
    def init_data_format(self):
        self.data_format = "NDHWC"

    def init_shape(self):
888
        self.shape = [1, 7, 7, 7, 3]
889 890 891 892


#test padding = SAME VALID
def create_test_padding_SAME_class(parent):
893

894
    class TestPaddingSMAECase(parent):
895

896
        def init_paddings(self):
897
            self.paddings = [0, 0, 0]
898 899 900 901 902 903 904
            self.padding_algorithm = "SAME"

    cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
    TestPaddingSMAECase.__name__ = cls_name
    globals()[cls_name] = TestPaddingSMAECase


C
cnn 已提交
905
create_test_padding_SAME_class(TestPool3D_Op)
906 907 908 909 910 911
create_test_padding_SAME_class(TestCase1)
create_test_padding_SAME_class(TestCase2)
create_test_padding_SAME_class(TestCase3)
create_test_padding_SAME_class(TestCase4)
create_test_padding_SAME_class(TestCase5)

C
cnn 已提交
912
create_test_padding_SAME_class(TestPool3D_channel_last)
913 914 915 916 917 918 919 920
create_test_padding_SAME_class(TestCase1_channel_last)
create_test_padding_SAME_class(TestCase2_channel_last)
create_test_padding_SAME_class(TestCase3_channel_last)
create_test_padding_SAME_class(TestCase4_channel_last)
create_test_padding_SAME_class(TestCase5_channel_last)


def create_test_cudnn_padding_SAME_class(parent):
921

922 923 924
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestCUDNNPaddingSMAECase(parent):
925

926 927 928 929
        def init_kernel_type(self):
            self.use_cudnn = True

        def init_paddings(self):
930
            self.paddings = [1, 1, 1]
931 932 933 934 935 936 937
            self.padding_algorithm = "SAME"

    cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp")
    TestCUDNNPaddingSMAECase.__name__ = cls_name
    globals()[cls_name] = TestCUDNNPaddingSMAECase


C
cnn 已提交
938
create_test_cudnn_padding_SAME_class(TestPool3D_Op)
939 940 941 942 943 944
create_test_cudnn_padding_SAME_class(TestCase1)
create_test_cudnn_padding_SAME_class(TestCase2)
create_test_cudnn_padding_SAME_class(TestCase3)
create_test_cudnn_padding_SAME_class(TestCase4)
create_test_cudnn_padding_SAME_class(TestCase5)

C
cnn 已提交
945
create_test_cudnn_padding_SAME_class(TestPool3D_channel_last)
946 947 948 949 950 951 952 953
create_test_cudnn_padding_SAME_class(TestCase1_channel_last)
create_test_cudnn_padding_SAME_class(TestCase2_channel_last)
create_test_cudnn_padding_SAME_class(TestCase3_channel_last)
create_test_cudnn_padding_SAME_class(TestCase4_channel_last)
create_test_cudnn_padding_SAME_class(TestCase5_channel_last)


def create_test_padding_VALID_class(parent):
954

955
    class TestPaddingVALIDCase(parent):
956

957
        def init_paddings(self):
958
            self.paddings = [1, 1, 1]
959 960 961 962 963 964 965
            self.padding_algorithm = "VALID"

    cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
    TestPaddingVALIDCase.__name__ = cls_name
    globals()[cls_name] = TestPaddingVALIDCase


C
cnn 已提交
966
create_test_padding_VALID_class(TestPool3D_Op)
967 968 969 970 971 972
create_test_padding_VALID_class(TestCase1)
create_test_padding_VALID_class(TestCase2)
create_test_padding_VALID_class(TestCase3)
create_test_padding_VALID_class(TestCase4)
create_test_padding_VALID_class(TestCase5)

C
cnn 已提交
973
create_test_padding_VALID_class(TestPool3D_channel_last)
974 975 976 977 978 979 980 981
create_test_padding_VALID_class(TestCase1_channel_last)
create_test_padding_VALID_class(TestCase2_channel_last)
create_test_padding_VALID_class(TestCase3_channel_last)
create_test_padding_VALID_class(TestCase4_channel_last)
create_test_padding_VALID_class(TestCase5_channel_last)


def create_test_cudnn_padding_VALID_class(parent):
982

983 984 985
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestCUDNNPaddingVALIDCase(parent):
986

987 988 989 990
        def init_kernel_type(self):
            self.use_cudnn = True

        def init_paddings(self):
991
            self.paddings = [1, 1, 1]
992 993 994 995 996 997 998
            self.padding_algorithm = "VALID"

    cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp")
    TestCUDNNPaddingVALIDCase.__name__ = cls_name
    globals()[cls_name] = TestCUDNNPaddingVALIDCase


C
cnn 已提交
999
create_test_cudnn_padding_VALID_class(TestPool3D_Op)
1000 1001 1002 1003 1004 1005
create_test_cudnn_padding_VALID_class(TestCase1)
create_test_cudnn_padding_VALID_class(TestCase2)
create_test_cudnn_padding_VALID_class(TestCase3)
create_test_cudnn_padding_VALID_class(TestCase4)
create_test_cudnn_padding_VALID_class(TestCase5)

C
cnn 已提交
1006
create_test_cudnn_padding_VALID_class(TestPool3D_channel_last)
1007 1008 1009 1010 1011 1012 1013 1014
create_test_cudnn_padding_VALID_class(TestCase1_channel_last)
create_test_cudnn_padding_VALID_class(TestCase2_channel_last)
create_test_cudnn_padding_VALID_class(TestCase3_channel_last)
create_test_cudnn_padding_VALID_class(TestCase4_channel_last)
create_test_cudnn_padding_VALID_class(TestCase5_channel_last)


#test API
C
cnn 已提交
1015
class TestPool3DAPI(unittest.TestCase):
1016

1017 1018 1019 1020
    def test_api(self):
        x_NDHWC = np.random.random([2, 5, 5, 5, 3]).astype("float32")
        x_NCDHW = np.random.random([2, 3, 5, 5, 5]).astype("float32")

1021 1022 1023 1024
        input_NDHWC = fluid.layers.data(name="input_NDHWC",
                                        shape=[2, 5, 5, 5, 3],
                                        append_batch_size=False,
                                        dtype="float32")
1025

1026 1027 1028 1029
        input_NCDHW = fluid.layers.data(name="input_NCDHW",
                                        shape=[2, 3, 5, 5, 5],
                                        append_batch_size=False,
                                        dtype="float32")
1030 1031

        ksize = [3, 3, 3]
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
        out_1 = fluid.layers.pool3d(input=input_NDHWC,
                                    pool_size=ksize,
                                    pool_type="max",
                                    pool_padding=[1, 1, 1],
                                    use_cudnn=False,
                                    data_format="NDHWC")

        out_2 = fluid.layers.pool3d(input=input_NDHWC,
                                    pool_size=ksize,
                                    pool_type="avg",
                                    pool_padding=[[0, 0], [1, 1], [1, 1],
                                                  [1, 1], [0, 0]],
                                    use_cudnn=False,
                                    data_format="NDHWC")

        out_3 = fluid.layers.pool3d(input=input_NCDHW,
                                    pool_size=ksize,
                                    pool_type="avg",
                                    pool_padding=[[0, 0], [0, 0], [1, 1],
                                                  [1, 1], [1, 1]],
                                    use_cudnn=False,
                                    data_format="NCDHW")

        out_4 = fluid.layers.pool3d(input=input_NCDHW,
                                    pool_size=ksize,
                                    pool_type="avg",
                                    pool_padding=[1, 2, 1, 0, 0, 1],
                                    use_cudnn=False,
                                    data_format="NCDHW")
1061
        # test VALID
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
        out_5 = fluid.layers.pool3d(input=input_NDHWC,
                                    pool_size=ksize,
                                    pool_type="avg",
                                    pool_padding="VALID",
                                    use_cudnn=False,
                                    data_format="NDHWC")

        out_6 = fluid.layers.pool3d(input=input_NCDHW,
                                    pool_size=ksize,
                                    pool_type="avg",
                                    pool_padding="VALID",
                                    use_cudnn=False,
                                    data_format="NCDHW")
1075 1076

        # test SAME
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
        out_7 = fluid.layers.pool3d(input=input_NDHWC,
                                    pool_size=ksize,
                                    pool_stride=[1, 1, 2],
                                    pool_type="avg",
                                    pool_padding="SAME",
                                    use_cudnn=False,
                                    data_format="NDHWC")

        out_8 = fluid.layers.pool3d(input=input_NCDHW,
                                    pool_size=[4, 4, 4],
                                    pool_type="avg",
                                    pool_padding="SAME",
                                    use_cudnn=False,
                                    data_format="NCDHW")
1091 1092 1093 1094

        exe = fluid.Executor(place=fluid.CPUPlace())
        [res_1, res_2, res_3, res_4, res_5, res_6, res_7, res_8] = exe.run(
            fluid.default_main_program(),
1095 1096 1097 1098 1099
            feed={
                "input_NDHWC": x_NDHWC,
                "input_NCDHW": x_NCDHW
            },
            fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8])
1100 1101 1102

        assert np.allclose(
            res_1,
1103 1104 1105 1106 1107 1108
            pool3D_forward_naive(x=x_NDHWC,
                                 ksize=ksize,
                                 pool_type="max",
                                 strides=[1, 1, 1],
                                 paddings=[1, 1, 1],
                                 data_format="NDHWC"))
1109 1110 1111

        assert np.allclose(
            res_2,
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
            pool3D_forward_naive(x=x_NDHWC,
                                 ksize=ksize,
                                 pool_type="avg",
                                 strides=[1, 1, 1],
                                 paddings=[1, 1, 1, 1, 1, 1],
                                 data_format="NDHWC"))
        assert np.allclose(res_3,
                           pool3D_forward_naive(x=x_NCDHW,
                                                ksize=ksize,
                                                pool_type="avg",
                                                strides=[1, 1, 1],
                                                paddings=[1, 1, 1, 1, 1, 1],
                                                data_format="NCDHW"),
                           rtol=0.07,
                           atol=1e-05)

        assert np.allclose(res_4,
                           pool3D_forward_naive(x=x_NCDHW,
                                                ksize=ksize,
                                                pool_type="avg",
                                                strides=[1, 1, 1],
                                                paddings=[1, 2, 1, 0, 0, 1],
                                                data_format="NCDHW"),
                           rtol=0.07,
                           atol=1e-05)
1137 1138 1139
        # VALID
        assert np.allclose(
            res_5,
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
            pool3D_forward_naive(x=x_NDHWC,
                                 ksize=ksize,
                                 pool_type="avg",
                                 strides=[1, 1, 1],
                                 paddings=[10, 20],
                                 padding_algorithm="VALID",
                                 data_format="NDHWC"))

        assert np.allclose(res_6,
                           pool3D_forward_naive(x=x_NCDHW,
                                                ksize=ksize,
                                                pool_type="avg",
                                                strides=[1, 1, 1],
                                                paddings=[10, 20],
                                                padding_algorithm="VALID",
                                                data_format="NCDHW"),
                           rtol=0.07,
                           atol=1e-05)
1158 1159 1160
        # SAME
        assert np.allclose(
            res_7,
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
            pool3D_forward_naive(x=x_NDHWC,
                                 ksize=ksize,
                                 pool_type="avg",
                                 strides=[1, 1, 2],
                                 paddings=[10, 20],
                                 padding_algorithm="SAME",
                                 data_format="NDHWC"))

        assert np.allclose(res_8,
                           pool3D_forward_naive(x=x_NCDHW,
                                                ksize=[4, 4, 4],
                                                pool_type="avg",
                                                strides=[1, 1, 1],
                                                paddings=[10, 20],
                                                padding_algorithm="SAME",
                                                data_format="NCDHW"),
                           rtol=0.07,
                           atol=1e-05)
1179 1180


C
cnn 已提交
1181
class TestPool3DAPI_Error(unittest.TestCase):
1182

1183
    def test_api(self):
1184 1185 1186 1187
        input_NDHWC = fluid.layers.data(name="input_NDHWC",
                                        shape=[2, 5, 5, 5, 3],
                                        append_batch_size=False,
                                        dtype="float32")
1188 1189
        ksize = [3, 3, 3]

1190
        # cudnn type error
1191
        def run_1():
1192 1193 1194 1195 1196 1197
            out_1 = fluid.layers.pool3d(input=input_NDHWC,
                                        pool_size=ksize,
                                        pool_type="max",
                                        pool_padding=[1, 1, 1],
                                        use_cudnn=[0],
                                        data_format="NDHWC")
1198

1199
        self.assertRaises(TypeError, run_1)
1200 1201 1202

        # data_format value error
        def run_2():
1203 1204 1205 1206 1207 1208
            out_2 = fluid.layers.pool3d(input=input_NDHWC,
                                        pool_size=ksize,
                                        pool_type="max",
                                        pool_padding=[1, 1, 1],
                                        use_cudnn=False,
                                        data_format="NDHWCC")
1209 1210 1211 1212 1213

        self.assertRaises(ValueError, run_2)

        # padding str value error
        def run_3():
1214 1215 1216 1217 1218 1219
            out_3 = fluid.layers.pool3d(input=input_NDHWC,
                                        pool_size=ksize,
                                        pool_type="max",
                                        pool_padding="VALIDSAME",
                                        use_cudnn=False,
                                        data_format="NDHWC")
1220 1221 1222 1223 1224

        self.assertRaises(ValueError, run_3)

        # padding str valid and ceil_mode value error
        def run_4():
1225 1226 1227 1228 1229 1230 1231
            out_4 = fluid.layers.pool3d(input=input_NDHWC,
                                        pool_size=ksize,
                                        pool_type="max",
                                        pool_padding="VALID",
                                        use_cudnn=False,
                                        ceil_mode=True,
                                        data_format="NDHWC")
1232 1233 1234 1235 1236

        self.assertRaises(ValueError, run_4)

        # padding with 8 ele. value error
        def run_5():
1237 1238 1239 1240 1241 1242 1243
            out_5 = fluid.layers.pool3d(input=input_NDHWC,
                                        pool_size=ksize,
                                        pool_type="max",
                                        pool_padding=[[1, 1], [0, 0], [0, 0],
                                                      [1, 1], [1, 1]],
                                        use_cudnn=False,
                                        data_format="NDHWC")
1244 1245 1246 1247

        self.assertRaises(ValueError, run_5)


C
chengduoZH 已提交
1248 1249
if __name__ == '__main__':
    unittest.main()