test_conv2d_op.py 34.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import unittest
16

17
import numpy as np
D
dzhwinter 已提交
18

19
import paddle
L
liym27 已提交
20
import paddle.fluid as fluid
21 22
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
23 24 25 26 27
from paddle.fluid.tests.unittests.op_test import (
    OpTest,
    convert_float_to_uint16,
    get_numeric_gradient,
)
W
wuhuanzhou 已提交
28
from paddle.fluid.tests.unittests.testsuite import create_op
29 30


31 32 33 34 35 36 37 38
def conv2d_forward_naive(
    input,
    filter,
    group,
    conv_param,
    padding_algorithm='EXPLICIT',
    data_format='NCHW',
):
L
liym27 已提交
39
    if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
40 41 42 43
        raise ValueError(
            "Unknown Attr(padding_algorithm): '%s'. "
            "It can only be 'SAME' or 'VALID'." % str(padding_algorithm)
        )
L
liym27 已提交
44 45

    if data_format not in ["NCHW", "NHWC"]:
46 47 48 49
        raise ValueError(
            "Unknown Attr(data_format): '%s' ."
            "It can only be 'NCHW' or 'NHWC'." % str(data_format)
        )
L
liym27 已提交
50

51
    channel_last = data_format == "NHWC"
L
liym27 已提交
52 53 54
    if channel_last:
        input = np.transpose(input, [0, 3, 1, 2])

C
chengduoZH 已提交
55
    in_n, in_c, in_h, in_w = input.shape
L
liym27 已提交
56 57 58
    f_n, f_c, f_h, f_w = filter.shape
    out_n = in_n
    out_c = f_n
C
chengduoZH 已提交
59 60
    assert f_c * group == in_c
    assert np.mod(out_c, group) == 0
M
minqiyang 已提交
61
    sub_out_c = out_c // group
L
liym27 已提交
62
    sub_f_n = f_n // group
C
chengduoZH 已提交
63

64 65 66 67 68
    stride, pad, dilation = (
        conv_param['stride'],
        conv_param['pad'],
        conv_param['dilation'],
    )
L
liym27 已提交
69 70 71 72

    # update pad and dilation
    def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
        padding = []
73 74 75
        for input_size, filter_size, stride_size in zip(
            input_shape, pool_size, pool_stride
        ):
L
liym27 已提交
76
            out_size = int((input_size + stride_size - 1) / stride_size)
77
            pad_sum = np.max(
78 79
                ((out_size - 1) * stride_size + filter_size - input_size, 0)
            )
L
liym27 已提交
80 81 82 83 84 85 86 87 88 89 90
            pad_0 = int(pad_sum / 2)
            pad_1 = int(pad_sum - pad_0)
            padding.append(pad_0)
            padding.append(pad_1)
        return padding

    ksize = filter.shape[2:4]
    if padding_algorithm == "VALID":
        pad = [0, 0, 0, 0]
    elif padding_algorithm == "SAME":
        dilation = [1, 1]
91
        input_data_shape = input.shape[2:4]
L
liym27 已提交
92 93 94 95 96 97 98
        pad = _get_padding_with_SAME(input_data_shape, ksize, stride)

    pad_h_0, pad_h_1 = pad[0], pad[0]
    pad_w_0, pad_w_1 = pad[1], pad[1]
    if len(pad) == 4:
        pad_h_0, pad_h_1 = pad[0], pad[1]
        pad_w_0, pad_w_1 = pad[2], pad[3]
99 100 101 102 103 104 105 106 107 108
    out_h = (
        1
        + (in_h + pad_h_0 + pad_h_1 - (dilation[0] * (f_h - 1) + 1))
        // stride[0]
    )
    out_w = (
        1
        + (in_w + pad_w_0 + pad_w_1 - (dilation[1] * (f_w - 1) + 1))
        // stride[1]
    )
L
liym27 已提交
109
    out = np.zeros((out_n, out_c, out_h, out_w))
C
chengduoZH 已提交
110

111 112
    d_bolck_h = dilation[0] * (f_h - 1) + 1
    d_bolck_w = dilation[1] * (f_w - 1) + 1
C
chengduoZH 已提交
113

114 115 116 117 118 119
    input_pad = np.pad(
        input,
        ((0, 0), (0, 0), (pad_h_0, pad_h_1), (pad_w_0, pad_w_1)),
        mode='constant',
        constant_values=0,
    )
C
chengduoZH 已提交
120

L
liym27 已提交
121
    filter_dilation = np.zeros((f_n, f_c, d_bolck_h, d_bolck_w))
122 123 124
    filter_dilation[
        :, :, 0 : d_bolck_h : dilation[0], 0 : d_bolck_w : dilation[1]
    ] = filter
C
chengduoZH 已提交
125

C
chengduoZH 已提交
126 127 128
    for i in range(out_h):
        for j in range(out_w):
            for g in range(group):
129 130 131 132 133 134 135 136 137 138
                input_pad_masked = input_pad[
                    :,
                    g * f_c : (g + 1) * f_c,
                    i * stride[0] : i * stride[0] + d_bolck_h,
                    j * stride[1] : j * stride[1] + d_bolck_w,
                ]

                f_sub = filter_dilation[
                    g * sub_f_n : (g + 1) * sub_f_n, :, :, :
                ]
L
liym27 已提交
139
                # sub_f_n == sub_out_c
C
chengduoZH 已提交
140
                for k in range(sub_out_c):
L
liym27 已提交
141
                    # Multiplication of Corresponding Elements, then sum all
142 143 144
                    out[:, g * sub_out_c + k, i, j] = np.sum(
                        input_pad_masked * f_sub[k, :, :, :], axis=(1, 2, 3)
                    )
C
chengduoZH 已提交
145

L
liym27 已提交
146 147 148
    if channel_last:
        out = np.transpose(out, [0, 2, 3, 1])

149
    return out, in_n, out_h, out_w, out_c
C
chengduoZH 已提交
150 151


L
liym27 已提交
152
def create_test_cudnn_class(parent):
153 154 155
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
L
liym27 已提交
156 157 158
    class TestCUDNNCase(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
159 160 161
            self.dtype = (
                np.float32 if core.is_compiled_with_rocm() else np.float64
            )
L
liym27 已提交
162 163 164 165 166 167 168

    cls_name = "{0}_{1}".format(parent.__name__, "CUDNN")
    TestCUDNNCase.__name__ = cls_name
    globals()[cls_name] = TestCUDNNCase


def create_test_cudnn_fp16_class(parent, grad_check=True):
169 170 171
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
L
liym27 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185
    class TestConv2DCUDNNFp16(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
            self.dtype = np.float16

        def test_check_output(self):
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
                    self.check_output_with_place(place, atol=2e-2)

        def test_check_grad_no_filter(self):
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place) and grad_check:
186 187 188
                self.check_grad_with_place(
                    place, ['Input'], 'Output', no_grad_set=set(['Filter'])
                )
L
liym27 已提交
189 190 191 192

        def test_check_grad_no_input(self):
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place) and grad_check:
193 194 195
                self.check_grad_with_place(
                    place, ['Filter'], 'Output', no_grad_set=set(['Input'])
                )
L
liym27 已提交
196 197 198 199 200 201

    cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16")
    TestConv2DCUDNNFp16.__name__ = cls_name
    globals()[cls_name] = TestConv2DCUDNNFp16


W
wuhuanzhou 已提交
202 203
def create_test_cudnn_bf16_class(parent):
    @unittest.skipIf(
204 205
        not core.is_compiled_with_cuda()
        or not core.is_bfloat16_supported(core.CUDAPlace(0)),
206 207
        "core is not compiled with CUDA and do not support bfloat16",
    )
W
wuhuanzhou 已提交
208 209 210 211
    class TestConv2DCUDNNBF16(parent):
        def get_numeric_grad(self, place, check_name):
            scope = core.Scope()
            self._check_grad_helper()
212 213 214 215 216 217
            op = create_op(
                scope, self.op_type, self.inputs, self.outputs, self.attrs
            )
            return get_numeric_gradient(
                place, scope, op, self.inputs_fp32, check_name, ['Output']
            )
W
wuhuanzhou 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230

        def init_kernel_type(self):
            self.use_cudnn = True
            self.no_need_check_grad = True
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-2)

        def test_check_grad_no_filter(self):
            place = core.CUDAPlace(0)
            numeric_grads = self.get_numeric_grad(place, 'Input')
231 232 233 234 235 236 237
            self.check_grad_with_place(
                place,
                ['Input'],
                'Output',
                no_grad_set=set(['Filter']),
                user_defined_grads=[numeric_grads],
            )
W
wuhuanzhou 已提交
238 239 240 241

        def test_check_grad_no_input(self):
            place = core.CUDAPlace(0)
            numeric_grads = self.get_numeric_grad(place, 'Filter')
242 243 244 245 246 247 248
            self.check_grad_with_place(
                place,
                ['Filter'],
                'Output',
                no_grad_set=set(['Input']),
                user_defined_grads=[numeric_grads],
            )
W
wuhuanzhou 已提交
249 250 251 252 253 254

    cls_name = "{0}_{1}".format(parent.__name__, "CUDNNBF16")
    TestConv2DCUDNNBF16.__name__ = cls_name
    globals()[cls_name] = TestConv2DCUDNNBF16


L
liym27 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
def create_test_channel_last_class(parent):
    class TestChannelLastCase(parent):
        def init_data_format(self):
            self.data_format = "NHWC"

        def init_test_case_2(self):
            N, C, H, W = self.input_size
            self.input_size = [N, H, W, C]

    cls_name = "{0}_{1}".format(parent.__name__, "ChannelLast")
    TestChannelLastCase.__name__ = cls_name
    globals()[cls_name] = TestChannelLastCase


def create_test_cudnn_channel_last_class(parent):
270 271 272
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
L
liym27 已提交
273 274 275
    class TestCudnnChannelLastCase(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
276 277 278
            self.dtype = (
                np.float32 if core.is_compiled_with_rocm() else np.float64
            )
L
liym27 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291

        def init_data_format(self):
            self.data_format = "NHWC"

        def init_test_case_2(self):
            N, C, H, W = self.input_size
            self.input_size = [N, H, W, C]

    cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLast")
    TestCudnnChannelLastCase.__name__ = cls_name
    globals()[cls_name] = TestCudnnChannelLastCase


292
def create_test_cudnn_channel_last_fp16_class(parent, grad_check=True):
293 294 295
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
296 297 298 299 300 301 302 303 304 305 306 307 308 309
    class TestCudnnChannelLastFp16(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
            self.dtype = np.float16

        def test_check_output(self):
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
                    self.check_output_with_place(place, atol=2e-2)

        def test_check_grad_no_filter(self):
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place) and grad_check:
310 311 312
                self.check_grad_with_place(
                    place, ['Input'], 'Output', no_grad_set=set(['Filter'])
                )
313 314 315 316

        def test_check_grad_no_input(self):
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place) and grad_check:
317 318 319
                self.check_grad_with_place(
                    place, ['Filter'], 'Output', no_grad_set=set(['Input'])
                )
320 321 322 323 324 325 326 327 328 329 330 331 332

        def init_data_format(self):
            self.data_format = "NHWC"

        def init_test_case_2(self):
            N, C, H, W = self.input_size
            self.input_size = [N, H, W, C]

    cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLastFp16")
    TestCudnnChannelLastFp16.__name__ = cls_name
    globals()[cls_name] = TestCudnnChannelLastFp16


L
liym27 已提交
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
def create_test_padding_SAME_class(parent):
    class TestPaddingSMAECase(parent):
        def init_paddings(self):
            self.pad = [0, 0]
            self.padding_algorithm = "SAME"

    cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
    TestPaddingSMAECase.__name__ = cls_name
    globals()[cls_name] = TestPaddingSMAECase


def create_test_padding_VALID_class(parent):
    class TestPaddingVALIDCase(parent):
        def init_paddings(self):
            self.pad = [1, 1]
            self.padding_algorithm = "VALID"

    cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
    TestPaddingVALIDCase.__name__ = cls_name
    globals()[cls_name] = TestPaddingVALIDCase


def create_test_cudnn_padding_SAME_class(parent):
356 357 358
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
L
liym27 已提交
359 360 361
    class TestCUDNNPaddingSMAECase(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
362 363 364
            self.dtype = (
                np.float32 if core.is_compiled_with_rocm() else np.float64
            )
L
liym27 已提交
365 366 367 368 369 370 371 372 373 374 375

        def init_paddings(self):
            self.pad = [1, 1]
            self.padding_algorithm = "SAME"

    cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp")
    TestCUDNNPaddingSMAECase.__name__ = cls_name
    globals()[cls_name] = TestCUDNNPaddingSMAECase


def create_test_cudnn_padding_VALID_class(parent):
376 377 378
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
L
liym27 已提交
379 380 381
    class TestCUDNNPaddingVALIDCase(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
382 383 384
            self.dtype = (
                np.float32 if core.is_compiled_with_rocm() else np.float64
            )
L
liym27 已提交
385 386 387 388 389 390 391 392 393 394

        def init_paddings(self):
            self.pad = [1, 1]
            self.padding_algorithm = "VALID"

    cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp")
    TestCUDNNPaddingVALIDCase.__name__ = cls_name
    globals()[cls_name] = TestCUDNNPaddingVALIDCase


C
cnn 已提交
395
class TestConv2DOp(OpTest):
396
    def setUp(self):
K
Kexin Zhao 已提交
397
        self.op_type = "conv2d"
398
        self.use_cudnn = False
399
        self.exhaustive_search = False
400
        self.use_cuda = False
401
        self.use_mkldnn = False
402
        self.fuse_relu_before_depthwise_conv = False
403
        self.data_format = "AnyLayout"
404
        self.dtype = np.float64
K
Kexin Zhao 已提交
405
        self.init_kernel_type()
C
chengduoZH 已提交
406
        self.init_group()
C
chengduoZH 已提交
407
        self.init_dilation()
C
chengduoZH 已提交
408
        self.init_test_case()
C
chengduoZH 已提交
409

C
chengduoZH 已提交
410 411 412
        conv2d_param = {
            'stride': self.stride,
            'pad': self.pad,
413
            'dilation': self.dilations,
C
chengduoZH 已提交
414
        }
415

W
wuhuanzhou 已提交
416 417
        if self.is_bfloat16_op():
            input = np.random.random(self.input_size).astype(np.float32)
418 419 420
            filter = np.random.uniform(-1, 1, self.filter_size).astype(
                np.float32
            )
W
wuhuanzhou 已提交
421 422
        else:
            input = np.random.random(self.input_size).astype(self.dtype)
423 424 425
            filter = np.random.uniform(-1, 1, self.filter_size).astype(
                self.dtype
            )
W
wuhuanzhou 已提交
426

G
guomingz 已提交
427
        if not self.has_cuda():
428 429 430 431 432 433 434 435
            self.fuse_relu_before_depthwise_conv = False
        if self.fuse_relu_before_depthwise_conv:
            input = input - 0.5
            input -= (input < 0) * 0.1
            input += (input >= 0) * 0.1
            input2 = np.maximum(input, 0.0)
        else:
            input2 = input
L
liym27 已提交
436

437 438 439
        output, _, _, _, _ = conv2d_forward_naive(
            input2, filter, self.groups, conv2d_param
        )
K
Kexin Zhao 已提交
440

W
wuhuanzhou 已提交
441 442 443 444
        if self.is_bfloat16_op():
            output = output.astype(np.float32)
            self.inputs = {
                'Input': convert_float_to_uint16(input),
445
                'Filter': convert_float_to_uint16(filter),
W
wuhuanzhou 已提交
446 447 448
            }
            self.inputs_fp32 = {
                'Input': OpTest.np_dtype_to_fluid_dtype(input),
449
                'Filter': OpTest.np_dtype_to_fluid_dtype(filter),
W
wuhuanzhou 已提交
450 451 452 453 454
            }
        else:
            output = output.astype(self.dtype)
            self.inputs = {
                'Input': OpTest.np_dtype_to_fluid_dtype(input),
455
                'Filter': OpTest.np_dtype_to_fluid_dtype(filter),
W
wuhuanzhou 已提交
456 457
            }

H
hedaoyuan 已提交
458
        self.attrs = {
C
chengduoZH 已提交
459 460
            'strides': self.stride,
            'paddings': self.pad,
C
chengduoZH 已提交
461
            'groups': self.groups,
462
            'dilations': self.dilations,
463
            'use_cudnn': self.use_cudnn,
464
            'use_mkldnn': self.use_mkldnn,
465
            'data_format': self.data_format,
466 467
            'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv,
            'exhaustive_search': self.exhaustive_search,
H
hedaoyuan 已提交
468
        }
469 470
        self.outputs = {'Output': output}

G
guomingz 已提交
471
    def has_cuda(self):
472 473 474
        return core.is_compiled_with_cuda() and (
            self.use_cudnn or self.use_cuda
        )
475

H
hedaoyuan 已提交
476
    def test_check_output(self):
G
guomingz 已提交
477
        place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
478
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
479
        self.check_output_with_place(
480
            place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
481
        )
H
hedaoyuan 已提交
482

H
hedaoyuan 已提交
483
    def test_check_grad(self):
484
        if self.dtype == np.float16 or (
485
            hasattr(self, "no_need_check_grad") and self.no_need_check_grad
486
        ):
K
Kexin Zhao 已提交
487
            return
G
guomingz 已提交
488
        place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
489
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
490 491 492 493 494
        self.check_grad_with_place(
            place,
            {'Input', 'Filter'},
            'Output',
            max_relative_error=0.02,
495
            check_dygraph=(not self.use_mkldnn),
496
        )
H
hedaoyuan 已提交
497

498
    def test_check_grad_no_filter(self):
499
        if self.dtype == np.float16 or (
500
            hasattr(self, "no_need_check_grad") and self.no_need_check_grad
501
        ):
K
Kexin Zhao 已提交
502
            return
G
guomingz 已提交
503
        place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
504
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
505 506 507 508 509 510
        self.check_grad_with_place(
            place,
            ['Input'],
            'Output',
            max_relative_error=0.02,
            no_grad_set=set(['Filter']),
511
            check_dygraph=(not self.use_mkldnn),
512
        )
513 514

    def test_check_grad_no_input(self):
515
        if self.dtype == np.float16 or (
516
            hasattr(self, "no_need_check_grad") and self.no_need_check_grad
517
        ):
K
Kexin Zhao 已提交
518
            return
G
guomingz 已提交
519
        place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
520
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
521 522 523 524 525
        self.check_grad_with_place(
            place,
            ['Filter'],
            'Output',
            no_grad_set=set(['Input']),
526
            check_dygraph=(not self.use_mkldnn),
527
        )
528

C
chengduoZH 已提交
529 530 531 532 533
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
534
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
535 536
        self.filter_size = [6, f_c, 3, 3]

L
liym27 已提交
537 538 539
    def init_test_case_2(self):
        pass

C
chengduoZH 已提交
540 541 542
    def init_dilation(self):
        self.dilations = [1, 1]

C
chengduoZH 已提交
543
    def init_group(self):
H
hedaoyuan 已提交
544 545
        self.groups = 1

K
Kexin Zhao 已提交
546 547
    def init_kernel_type(self):
        pass
武毅 已提交
548

H
hedaoyuan 已提交
549

C
cnn 已提交
550
class TestWithPad(TestConv2DOp):
C
chengduoZH 已提交
551 552 553 554 555
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
556
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
557 558 559
        self.filter_size = [6, f_c, 3, 3]


C
cnn 已提交
560
class TestWithStride(TestConv2DOp):
C
chengduoZH 已提交
561 562 563 564 565
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.input_size = [2, 3, 6, 6]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
566
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
567 568 569
        self.filter_size = [6, f_c, 3, 3]


C
cnn 已提交
570
class TestWithGroup(TestConv2DOp):
Z
zhupengyang 已提交
571 572 573 574 575 576 577 578
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        self.group = 3
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [18, f_c, 3, 3]
H
hedaoyuan 已提交
579

武毅 已提交
580

C
cnn 已提交
581
class TestWith1x1(TestConv2DOp):
C
chengduoZH 已提交
582 583 584 585 586
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
587
        f_c = self.input_size[1] // self.groups
Z
zhupengyang 已提交
588
        self.filter_size = [120, f_c, 1, 1]
C
chengduoZH 已提交
589 590 591 592 593

    def init_group(self):
        self.groups = 3


C
cnn 已提交
594
class TestWithDepthWise3x3(TestConv2DOp):
595 596 597 598 599 600
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.input_size = [3, 4, 10, 10]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
Z
zhupengyang 已提交
601
        self.filter_size = [12, f_c, 3, 3]
602 603 604 605 606 607 608 609

    def init_dilation(self):
        self.dilations = [2, 2]

    def init_group(self):
        self.groups = 4


C
cnn 已提交
610
class TestWithDepthWise5x5(TestConv2DOp):
611 612 613 614 615 616 617 618 619 620 621 622
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.input_size = [2, 4, 10, 10]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [8, f_c, 5, 5]

    def init_group(self):
        self.groups = 4


C
cnn 已提交
623
class TestWithDepthWise7x7(TestConv2DOp):
624 625 626 627 628 629 630 631 632 633 634 635
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.input_size = [2, 8, 10, 10]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [16, f_c, 7, 7]

    def init_group(self):
        self.groups = 8


C
cnn 已提交
636
class TestWithDilation(TestConv2DOp):
C
chengduoZH 已提交
637 638 639 640 641
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.input_size = [2, 3, 10, 10]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
642
        f_c = self.input_size[1] // self.groups
Z
zhupengyang 已提交
643
        self.filter_size = [12, f_c, 3, 3]
C
chengduoZH 已提交
644

C
chengduoZH 已提交
645 646
    def init_dilation(self):
        self.dilations = [2, 2]
C
chengduoZH 已提交
647

C
chengduoZH 已提交
648
    def init_group(self):
C
chengduoZH 已提交
649
        self.groups = 3
武毅 已提交
650

C
chengduoZH 已提交
651

C
cnn 已提交
652
class TestWithInput1x1Filter1x1(TestConv2DOp):
653 654 655
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
Z
zhupengyang 已提交
656
        self.input_size = [100, 3, 1, 1]  # NCHW
657
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
658
        f_c = self.input_size[1] // self.groups
Z
zhupengyang 已提交
659
        self.filter_size = [120, f_c, 1, 1]
660 661 662 663 664

    def init_group(self):
        self.groups = 3


H
hong 已提交
665
# #----------------Conv2DCUDNN----------------
C
chengduoZH 已提交
666

C
cnn 已提交
667
create_test_cudnn_class(TestConv2DOp)
C
chengduo 已提交
668 669 670 671 672
create_test_cudnn_class(TestWithPad)
create_test_cudnn_class(TestWithStride)
create_test_cudnn_class(TestWithGroup)
create_test_cudnn_class(TestWith1x1)
create_test_cudnn_class(TestWithInput1x1Filter1x1)
K
Kexin Zhao 已提交
673

674
# ----------------Conv2DCUDNN fp16----------------
C
chengduo 已提交
675

C
cnn 已提交
676
create_test_cudnn_fp16_class(TestConv2DOp, grad_check=False)
C
chengduo 已提交
677 678 679 680 681
create_test_cudnn_fp16_class(TestWithPad, grad_check=False)
create_test_cudnn_fp16_class(TestWithStride, grad_check=False)
create_test_cudnn_fp16_class(TestWithGroup, grad_check=False)
create_test_cudnn_fp16_class(TestWith1x1, grad_check=False)
create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False)
C
chengduo 已提交
682

683
# ----------------Conv2DCUDNN bf16----------------
W
wuhuanzhou 已提交
684 685 686 687 688 689 690 691

create_test_cudnn_bf16_class(TestConv2DOp)
create_test_cudnn_bf16_class(TestWithPad)
create_test_cudnn_bf16_class(TestWithStride)
create_test_cudnn_bf16_class(TestWithGroup)
create_test_cudnn_bf16_class(TestWith1x1)
create_test_cudnn_bf16_class(TestWithInput1x1Filter1x1)

692

C
cnn 已提交
693
class TestCUDNNExhaustiveSearch(TestConv2DOp):
694 695 696
    def init_kernel_type(self):
        self.use_cudnn = True
        self.exhaustive_search = True
697
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
698 699


C
cnn 已提交
700
class TestConv2DOpError(unittest.TestCase):
701 702 703 704 705
    def test_errors(self):
        with program_guard(Program(), Program()):

            def test_Variable():
                # the input of conv2d must be Variable.
706 707 708
                x1 = fluid.create_lod_tensor(
                    np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()
                )
709 710 711 712 713 714 715
                fluid.layers.conv2d(x1, 1, 1)

            self.assertRaises(TypeError, test_Variable)

            def test_dtype():
                # the input dtype of conv2d must be float16 or float32 or float64
                # float16 only can be set on GPU place
716 717 718
                x2 = fluid.layers.data(
                    name='x2', shape=[3, 4, 5, 6], dtype="int32"
                )
719 720 721 722 723
                fluid.layers.conv2d(x2, 1, 1)

            self.assertRaises(TypeError, test_dtype)


724 725
# Please Don't remove the following code.
# Currently, CI use cudnn V5.0 which not support dilation conv.
726
# class TestCUDNNWithDilation(TestWithDilation):
C
chengduoZH 已提交
727 728 729
#     def init_op_type(self):
#         self.op_type = "conv_cudnn"

L
liym27 已提交
730 731 732
# ---- test asymmetric padding ----


C
cnn 已提交
733
class TestConv2DOp_v2(OpTest):
L
liym27 已提交
734 735 736 737 738 739 740
    def setUp(self):
        self.op_type = "conv2d"
        self.use_cudnn = False
        self.exhaustive_search = False
        self.use_cuda = False
        self.use_mkldnn = False
        self.fuse_relu_before_depthwise_conv = False
741
        self.dtype = np.float64
L
liym27 已提交
742 743 744 745 746 747 748 749 750 751 752
        self.init_kernel_type()
        self.init_group()
        self.init_dilation()
        self.init_data_format()
        self.init_test_case()
        self.init_paddings()
        self.init_test_case_2()

        conv2d_param = {
            'stride': self.stride,
            'pad': self.pad,
753
            'dilation': self.dilations,
L
liym27 已提交
754 755 756 757 758 759 760 761 762 763 764 765 766
        }

        input = np.random.random(self.input_size).astype(self.dtype)
        if not self.has_cuda():
            self.fuse_relu_before_depthwise_conv = False
        if self.fuse_relu_before_depthwise_conv:
            input = input - 0.5
            input -= (input < 0) * 0.1
            input += (input >= 0) * 0.1
            input2 = np.maximum(input, 0.0)
        else:
            input2 = input
        filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype)
767 768 769 770 771 772 773 774
        output, _, _, _, _ = conv2d_forward_naive(
            input2,
            filter,
            self.groups,
            conv2d_param,
            self.padding_algorithm,
            self.data_format,
        )
L
liym27 已提交
775 776 777 778
        output = output.astype(self.dtype)

        self.inputs = {
            'Input': OpTest.np_dtype_to_fluid_dtype(input),
779
            'Filter': OpTest.np_dtype_to_fluid_dtype(filter),
L
liym27 已提交
780 781 782 783 784 785 786 787 788 789
        }
        self.attrs = {
            'strides': self.stride,
            'paddings': self.pad,
            'padding_algorithm': self.padding_algorithm,
            'groups': self.groups,
            'dilations': self.dilations,
            'use_cudnn': self.use_cudnn,
            'use_mkldnn': self.use_mkldnn,
            'data_format': self.data_format,
790 791
            'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv,
            'exhaustive_search': self.exhaustive_search,
L
liym27 已提交
792 793 794 795
        }
        self.outputs = {'Output': output}

    def has_cuda(self):
796 797 798
        return core.is_compiled_with_cuda() and (
            self.use_cudnn or self.use_cuda
        )
L
liym27 已提交
799 800

    def test_check_output(self):
801
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
L
liym27 已提交
802
        place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
803
        self.check_output_with_place(
804
            place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
805
        )
L
liym27 已提交
806 807

    def test_check_grad(self):
808
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
L
liym27 已提交
809 810 811
        if self.dtype == np.float16:
            return
        place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
812 813 814 815 816
        self.check_grad_with_place(
            place,
            {'Input', 'Filter'},
            'Output',
            max_relative_error=0.02,
817
            check_dygraph=(not self.use_mkldnn),
818
        )
L
liym27 已提交
819 820

    def test_check_grad_no_filter(self):
821
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
L
liym27 已提交
822 823 824
        if self.dtype == np.float16:
            return
        place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
825 826 827 828 829 830
        self.check_grad_with_place(
            place,
            ['Input'],
            'Output',
            max_relative_error=0.02,
            no_grad_set=set(['Filter']),
831
            check_dygraph=(not self.use_mkldnn),
832
        )
L
liym27 已提交
833 834

    def test_check_grad_no_input(self):
835
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
L
liym27 已提交
836 837 838
        if self.dtype == np.float16:
            return
        place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
839 840 841 842 843
        self.check_grad_with_place(
            place,
            ['Filter'],
            'Output',
            no_grad_set=set(['Input']),
844
            check_dygraph=(not self.use_mkldnn),
845
        )
L
liym27 已提交
846 847 848

    def init_test_case(self):
        self.pad = [0, 0]
849
        self.stride = [1, 2]
L
liym27 已提交
850 851 852
        self.input_size = [2, 3, 5, 5]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
853
        self.filter_size = [6, f_c, 4, 3]
L
liym27 已提交
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874

    def init_dilation(self):
        self.dilations = [1, 1]

    def init_group(self):
        self.groups = 1

    def init_kernel_type(self):
        pass

    def init_paddings(self):
        self.pad = [0, 0]
        self.padding_algorithm = "EXPLICIT"

    def init_data_format(self):
        self.data_format = "NCHW"

    def init_test_case_2(self):
        pass


C
cnn 已提交
875
class TestConv2DOp_AsyPadding(TestConv2DOp_v2):
L
liym27 已提交
876 877 878 879 880
    def init_paddings(self):
        self.pad = [0, 0, 1, 2]
        self.padding_algorithm = "EXPLICIT"


C
cnn 已提交
881
class TestWithPad_AsyPadding(TestConv2DOp_v2):
L
liym27 已提交
882 883 884 885 886 887 888 889 890 891 892 893
    def init_test_case(self):
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [6, f_c, 3, 3]

    def init_paddings(self):
        self.pad = [2, 1, 3, 2]
        self.padding_algorithm = "EXPLICIT"


C
cnn 已提交
894
class TestWithStride_AsyPadding(TestConv2DOp_v2):
L
liym27 已提交
895 896 897 898 899 900 901 902 903 904 905 906
    def init_test_case(self):
        self.stride = [2, 2]
        self.input_size = [2, 3, 6, 6]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [6, f_c, 3, 3]

    def init_paddings(self):
        self.pad = [2, 1, 3, 2]
        self.padding_algorithm = "EXPLICIT"


C
cnn 已提交
907
class TestWithGroup_AsyPadding(TestConv2DOp_v2):
Z
zhupengyang 已提交
908 909 910 911 912 913 914 915
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 2]
        self.input_size = [2, 3, 5, 5]  # NCHW
        self.group = 3
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [24, f_c, 4, 3]
L
liym27 已提交
916 917


C
cnn 已提交
918
class TestWith1x1_AsyPadding(TestConv2DOp_v2):
L
liym27 已提交
919 920 921 922 923
    def init_test_case(self):
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
Z
zhupengyang 已提交
924
        self.filter_size = [120, f_c, 1, 1]
L
liym27 已提交
925 926 927 928 929 930 931 932 933

    def init_group(self):
        self.groups = 3

    def init_paddings(self):
        self.pad = [2, 2, 4, 0]
        self.padding_algorithm = "EXPLICIT"


C
cnn 已提交
934
class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2):
L
liym27 已提交
935 936 937 938 939
    def init_test_case(self):
        self.stride = [1, 1]
        self.input_size = [3, 4, 10, 10]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
Z
zhupengyang 已提交
940
        self.filter_size = [16, f_c, 3, 3]
L
liym27 已提交
941 942 943 944 945 946 947 948 949 950 951 952

    def init_dilation(self):
        self.dilations = [2, 2]

    def init_group(self):
        self.groups = 4

    def init_paddings(self):
        self.pad = [1, 3, 2, 1]
        self.padding_algorithm = "EXPLICIT"


C
cnn 已提交
953
class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2):
L
liym27 已提交
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
    def init_test_case(self):
        self.stride = [1, 1]
        self.input_size = [2, 4, 10, 10]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [8, f_c, 5, 5]

    def init_group(self):
        self.groups = 4

    def init_paddings(self):
        self.pad = [0, 1, 1, 0]
        self.padding_algorithm = "EXPLICIT"


C
cnn 已提交
969
class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2):
L
liym27 已提交
970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
    def init_test_case(self):
        self.stride = [2, 2]
        self.input_size = [2, 8, 10, 10]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [16, f_c, 7, 7]

    def init_group(self):
        self.groups = 8

    def init_paddings(self):
        self.pad = [1, 3, 4, 1]
        self.padding_algorithm = "EXPLICIT"


C
cnn 已提交
985
class TestWithDilation_AsyPadding(TestConv2DOp_v2):
L
liym27 已提交
986 987 988 989 990
    def init_test_case(self):
        self.stride = [1, 1]
        self.input_size = [2, 3, 10, 10]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
Z
zhupengyang 已提交
991
        self.filter_size = [24, f_c, 3, 3]
L
liym27 已提交
992 993 994 995 996 997 998 999 1000 1001 1002 1003

    def init_dilation(self):
        self.dilations = [2, 2]

    def init_group(self):
        self.groups = 3

    def init_paddings(self):
        self.pad = [0, 1, 3, 0]
        self.padding_algorithm = "EXPLICIT"


C
cnn 已提交
1004
class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2):
L
liym27 已提交
1005 1006
    def init_test_case(self):
        self.stride = [1, 1]
Z
zhupengyang 已提交
1007
        self.input_size = [40, 3, 1, 1]  # NCHW
L
liym27 已提交
1008 1009
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
Z
zhupengyang 已提交
1010
        self.filter_size = [120, f_c, 1, 1]
L
liym27 已提交
1011 1012 1013 1014 1015 1016 1017 1018 1019

    def init_group(self):
        self.groups = 3

    def init_paddings(self):
        self.pad = [0, 3, 4, 0]
        self.padding_algorithm = "EXPLICIT"


C
cnn 已提交
1020
create_test_cudnn_class(TestConv2DOp_AsyPadding)
L
liym27 已提交
1021 1022 1023 1024 1025 1026
create_test_cudnn_class(TestWithPad_AsyPadding)
create_test_cudnn_class(TestWithStride_AsyPadding)
create_test_cudnn_class(TestWithGroup_AsyPadding)
create_test_cudnn_class(TestWith1x1_AsyPadding)
create_test_cudnn_class(TestWithInput1x1Filter1x1_AsyPadding)

1027
# ---------- test SAME VALID -----------
C
cnn 已提交
1028
create_test_padding_SAME_class(TestConv2DOp_AsyPadding)
L
liym27 已提交
1029 1030 1031 1032 1033
create_test_padding_SAME_class(TestWithPad_AsyPadding)
create_test_padding_SAME_class(TestWithStride_AsyPadding)
create_test_padding_SAME_class(TestWithGroup_AsyPadding)
create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding)

C
cnn 已提交
1034
create_test_padding_VALID_class(TestConv2DOp_AsyPadding)
L
liym27 已提交
1035 1036 1037 1038 1039
create_test_padding_VALID_class(TestWithPad_AsyPadding)
create_test_padding_VALID_class(TestWithStride_AsyPadding)
create_test_padding_VALID_class(TestWithGroup_AsyPadding)
create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding)

C
cnn 已提交
1040
create_test_cudnn_padding_SAME_class(TestConv2DOp_AsyPadding)
L
liym27 已提交
1041 1042 1043 1044 1045
create_test_cudnn_padding_SAME_class(TestWithPad_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithStride_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithGroup_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding)

C
cnn 已提交
1046
create_test_cudnn_padding_VALID_class(TestConv2DOp_AsyPadding)
L
liym27 已提交
1047 1048 1049 1050 1051 1052
create_test_cudnn_padding_VALID_class(TestWithPad_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithStride_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithGroup_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding)

# ------------ test channel last ---------
C
cnn 已提交
1053
create_test_channel_last_class(TestConv2DOp_AsyPadding)
L
liym27 已提交
1054 1055 1056 1057 1058
create_test_channel_last_class(TestWithPad_AsyPadding)
create_test_channel_last_class(TestWithGroup_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestWithInput1x1Filter1x1_AsyPadding)

C
cnn 已提交
1059
create_test_cudnn_channel_last_class(TestConv2DOp_AsyPadding)
L
liym27 已提交
1060 1061 1062 1063 1064
create_test_cudnn_channel_last_class(TestWithPad_AsyPadding)
create_test_cudnn_channel_last_class(TestWithStride_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup_AsyPadding)
create_test_cudnn_channel_last_class(TestWithDilation_AsyPadding)

1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
create_test_cudnn_channel_last_fp16_class(
    TestConv2DOp_AsyPadding, grad_check=False
)
create_test_cudnn_channel_last_fp16_class(
    TestWithPad_AsyPadding, grad_check=False
)
create_test_cudnn_channel_last_fp16_class(
    TestWithStride_AsyPadding, grad_check=False
)
create_test_cudnn_channel_last_fp16_class(
    TestWithGroup_AsyPadding, grad_check=False
)
create_test_cudnn_channel_last_fp16_class(
    TestWithDilation_AsyPadding, grad_check=False
)
1080

1081
if __name__ == '__main__':
H
hong 已提交
1082
    paddle.enable_static()
1083
    unittest.main()