test_conv2d_transpose_op.py 34.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Z
deconv  
zchen0211 已提交
16 17
import unittest
import numpy as np
18

K
Kaipeng Deng 已提交
19
import paddle
20
import paddle.nn as nn
21

K
Kaipeng Deng 已提交
22
paddle.enable_static()
23
import paddle.fluid.core as core
24
import paddle.fluid as fluid
25 26 27
from paddle.fluid import Program, program_guard
from test_attribute_var import UnittestBase
from op_test import OpTest
Z
deconv  
zchen0211 已提交
28 29


C
chengduoZH 已提交
30
def conv2dtranspose_forward_naive(input_, filter_, attrs):
31 32
    padding_algorithm = attrs['padding_algorithm']
    if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
33 34 35 36
        raise ValueError(
            "Unknown Attr(padding_algorithm): '%s'. "
            "It can only be 'SAME' or 'VALID'." % str(padding_algorithm)
        )
37 38 39

    if attrs['data_format'] == 'NHWC':
        input_ = np.transpose(input_, [0, 3, 1, 2])
Z
deconv  
zchen0211 已提交
40
    in_n, in_c, in_h, in_w = input_.shape
Y
Yibing Liu 已提交
41 42
    f_c, f_out_c, f_h, f_w = filter_.shape
    groups = attrs['groups']
Z
deconv  
zchen0211 已提交
43
    assert in_c == f_c
Y
Yibing Liu 已提交
44
    out_c = f_out_c * groups
M
minqiyang 已提交
45
    sub_in_c = in_c // groups
Z
deconv  
zchen0211 已提交
46

47 48 49 50 51
    stride, pad, dilations = (
        attrs['strides'],
        attrs['paddings'],
        attrs['dilations'],
    )
52 53 54 55

    # update pad and dilation
    def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride):
        padding = []
56 57 58
        for input_size, filter_size, stride_size in zip(
            input_shape, kernel_size, kernel_stride
        ):
59
            out_size = int((input_size + stride_size - 1) / stride_size)
60
            pad_sum = np.max(
61 62
                ((out_size - 1) * stride_size + filter_size - input_size, 0)
            )
63 64 65 66 67 68 69 70 71 72
            pad_0 = int(pad_sum / 2)
            pad_1 = int(pad_sum - pad_0)
            padding.append(pad_0)
            padding.append(pad_1)
        return padding

    ksize = filter_.shape[2:4]
    if padding_algorithm == "VALID":
        pad = [0, 0, 0, 0]
    elif padding_algorithm == "SAME":
73 74
        dilations = [1, 1]
        input_data_shape = input_.shape[2:4]
75 76 77 78 79 80 81 82
        pad = _get_padding_with_SAME(input_data_shape, ksize, stride)

    pad_h_0, pad_h_1 = pad[0], pad[0]
    pad_w_0, pad_w_1 = pad[1], pad[1]
    if len(pad) == 4:
        pad_h_0, pad_h_1 = pad[0], pad[1]
        pad_w_0, pad_w_1 = pad[2], pad[3]

C
chengduoZH 已提交
83 84 85 86
    d_bolck_h = dilations[0] * (f_h - 1) + 1
    d_bolck_w = dilations[1] * (f_w - 1) + 1
    out_h = (in_h - 1) * stride[0] + d_bolck_h
    out_w = (in_w - 1) * stride[1] + d_bolck_w
87 88
    if 'output_size' in attrs:
        output_size = attrs['output_size']
89 90
        out_h = output_size[0] + pad_h_0 + pad_h_1
        out_w = output_size[1] + pad_w_0 + pad_w_1
L
LielinJiang 已提交
91 92 93 94 95
    out_pad_h = 0
    out_pad_w = 0
    if 'output_padding' in attrs:
        out_pad_h = attrs['output_padding'][0]
        out_pad_w = attrs['output_padding'][1]
96 97 98
    out = np.zeros(
        (in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), dtype=input_.dtype
    )
Z
deconv  
zchen0211 已提交
99 100 101 102

    for n in range(in_n):
        for i in range(in_h):
            for j in range(in_w):
Y
Yibing Liu 已提交
103
                for g in range(groups):
104 105 106
                    input_masked = input_[
                        n, g * sub_in_c : (g + 1) * sub_in_c, i, j
                    ]  # (c)
Y
Yibing Liu 已提交
107 108 109 110 111
                    input_masked = np.reshape(input_masked, (sub_in_c, 1, 1))
                    input_masked = np.tile(input_masked, (1, f_h, f_w))

                    for k in range(f_out_c):
                        tmp_out = np.sum(
112 113 114 115 116 117
                            input_masked
                            * filter_[
                                g * sub_in_c : (g + 1) * sub_in_c, k, :, :
                            ],
                            axis=0,
                        )
Y
Yibing Liu 已提交
118
                        i1, i2 = i * stride[0], i * stride[0] + d_bolck_h
119
                        j1, j2 = j * stride[1], j * stride[1] + d_bolck_w
120 121 122 123 124 125 126 127 128 129 130 131 132
                        out[
                            n,
                            g * f_out_c + k,
                            i1 : i2 : dilations[0],
                            j1 : j2 : dilations[1],
                        ] += tmp_out

    out = out[
        :,
        :,
        pad_h_0 : out_h - pad_h_1 + out_pad_h,
        pad_w_0 : out_w - pad_w_1 + out_pad_w,
    ]
133 134
    if attrs['data_format'] == 'NHWC':
        out = np.transpose(out, [0, 2, 3, 1])
Z
deconv  
zchen0211 已提交
135 136 137
    return out


C
cnn 已提交
138
class TestConv2DTransposeOp(OpTest):
Z
deconv  
zchen0211 已提交
139
    def setUp(self):
Z
zchen0211 已提交
140
        # init as conv transpose
141
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
142
        self.need_check_grad = True
J
Jacek Czaja 已提交
143
        self.is_test = False
144
        self.use_cudnn = False
J
Jacek Czaja 已提交
145
        self.use_mkldnn = False
146
        self.output_size = None
L
LielinJiang 已提交
147
        self.output_padding = []
148 149 150
        self.data_format = "NCHW"
        self.pad = [0, 0]
        self.padding_algorithm = "EXPLICIT"
Z
deconv  
zchen0211 已提交
151 152 153
        self.init_op_type()
        self.init_test_case()

154 155
        input_ = np.random.random(self.input_size).astype(self.dtype)
        filter_ = np.random.random(self.filter_size).astype(self.dtype)
Z
deconv  
zchen0211 已提交
156 157 158 159 160

        self.inputs = {'Input': input_, 'Filter': filter_}
        self.attrs = {
            'strides': self.stride,
            'paddings': self.pad,
161
            'padding_algorithm': self.padding_algorithm,
Y
Yibing Liu 已提交
162
            'groups': self.groups,
163 164
            'dilations': self.dilations,
            'use_cudnn': self.use_cudnn,
J
Jacek Czaja 已提交
165 166
            'is_test': self.is_test,
            'use_mkldnn': self.use_mkldnn,
167
            'data_format': self.data_format,
Z
deconv  
zchen0211 已提交
168
        }
169 170
        if self.output_size is not None:
            self.attrs['output_size'] = self.output_size
C
chengduoZH 已提交
171

L
LielinJiang 已提交
172 173 174
        if len(self.output_padding) > 0:
            self.attrs['output_padding'] = self.output_padding

175 176 177
        output = conv2dtranspose_forward_naive(
            input_, filter_, self.attrs
        ).astype(self.dtype)
C
chengduoZH 已提交
178

Z
deconv  
zchen0211 已提交
179 180 181
        self.outputs = {'Output': output}

    def test_check_output(self):
182
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
183 184
        if self.use_cudnn:
            place = core.CUDAPlace(0)
185
            self.check_output_with_place(
186
                place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
187
            )
188
        else:
189
            self.check_output(check_dygraph=(not self.use_mkldnn))
Z
deconv  
zchen0211 已提交
190

Z
zchen0211 已提交
191
    def test_check_grad_no_input(self):
192 193 194
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
195 196 197 198 199 200 201
                self.check_grad_with_place(
                    place,
                    ['Filter'],
                    'Output',
                    max_relative_error=0.02,
                    no_grad_set=set(['Input']),
                )
202
            else:
203 204 205
                self.check_grad(
                    ['Filter'], 'Output', no_grad_set=set(['Input'])
                )
Z
zchen0211 已提交
206 207

    def test_check_grad_no_filter(self):
208 209 210
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
211 212 213
                self.check_grad_with_place(
                    place, ['Input'], 'Output', no_grad_set=set(['Filter'])
                )
214
            else:
215 216 217
                self.check_grad(
                    ['Input'], 'Output', no_grad_set=set(['Filter'])
                )
Z
deconv  
zchen0211 已提交
218

Z
zchen0211 已提交
219
    def test_check_grad(self):
220 221 222
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
223 224 225 226 227 228
                self.check_grad_with_place(
                    place,
                    set(['Input', 'Filter']),
                    'Output',
                    max_relative_error=0.02,
                )
229
            else:
230 231 232
                self.check_grad(
                    set(['Input', 'Filter']), 'Output', max_relative_error=0.02
                )
C
chengduoZH 已提交
233 234 235 236 237

    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
238
        self.groups = 1
C
chengduoZH 已提交
239 240 241 242 243 244
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.op_type = "conv2d_transpose"
Z
deconv  
zchen0211 已提交
245

Z
zchen0211 已提交
246

C
cnn 已提交
247
class TestWithSymmetricPad(TestConv2DTransposeOp):
C
chengduoZH 已提交
248 249 250 251
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
252
        self.groups = 1
C
chengduoZH 已提交
253 254 255 256 257
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
258
class TestWithAsymmetricPad(TestConv2DTransposeOp):
259 260 261 262 263 264 265 266 267 268
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
269
class TestWithSAMEPad(TestConv2DTransposeOp):
270
    def init_test_case(self):
271 272
        self.stride = [2, 1]
        self.dilations = [1, 2]
273
        self.groups = 1
274
        self.input_size = [2, 3, 6, 5]  # NCHW
275
        f_c = self.input_size[1]
276
        self.filter_size = [f_c, 6, 4, 3]
277 278 279
        self.padding_algorithm = 'SAME'


C
cnn 已提交
280
class TestWithVALIDPad(TestConv2DTransposeOp):
281 282 283 284 285 286 287 288 289 290
    def init_test_case(self):
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]
        self.padding_algorithm = 'VALID'


C
cnn 已提交
291
class TestWithGroups(TestConv2DTransposeOp):
Y
Yibing Liu 已提交
292 293 294 295 296 297 298 299 300 301
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 4, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3]


C
cnn 已提交
302
class TestWithStride(TestConv2DTransposeOp):
C
chengduoZH 已提交
303 304 305 306
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
307
        self.groups = 1
C
chengduoZH 已提交
308 309 310 311 312
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
313
class TestWithDilation(TestConv2DTransposeOp):
C
chengduoZH 已提交
314 315 316
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
Y
Yibing Liu 已提交
317
        self.groups = 1
C
chengduoZH 已提交
318 319 320 321 322 323
        self.dilations = [2, 2]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
324
class TestWithEvenUpsample(TestConv2DTransposeOp):
325 326 327 328 329 330 331 332 333 334 335
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 3, 7, 7]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 5, 5]


C
cnn 已提交
336
class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp):
L
LielinJiang 已提交
337 338 339 340 341 342 343 344 345 346 347
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_padding = [1, 1]
        self.input_size = [2, 3, 7, 7]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 5, 5]


C
cnn 已提交
348
class Test_NHWC(TestConv2DTransposeOp):
349 350 351 352 353 354 355 356 357 358 359
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
360
class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp):
361 362 363 364 365 366 367 368 369 370 371
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
372
class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp):
373 374 375 376 377 378 379 380 381 382 383
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
384
class TestWithGroups_NHWC(TestConv2DTransposeOp):
385 386 387 388 389 390 391 392 393 394 395
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
396
class TestWithStride_NHWC(TestConv2DTransposeOp):
397 398 399 400 401 402 403 404 405 406 407
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
408
class TestWithDilation_NHWC(TestConv2DTransposeOp):
409 410 411 412 413 414 415 416 417 418 419
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [2, 2]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
420
class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp):
421 422 423 424 425 426 427 428 429 430 431 432
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'


C
cnn 已提交
433
class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp):
L
LielinJiang 已提交
434 435 436 437 438 439 440 441 442 443 444 445
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_padding = [1, 1]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'


C
chengduoZH 已提交
446
# ------------ test_cudnn ------------
447 448 449
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
cnn 已提交
450
class TestCUDNN(TestConv2DTransposeOp):
Z
deconv  
zchen0211 已提交
451
    def init_op_type(self):
452 453
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
Z
zchen0211 已提交
454

Z
deconv  
zchen0211 已提交
455

456 457 458
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
459
class TestCUDNNWithSymmetricPad(TestWithSymmetricPad):
C
chengduoZH 已提交
460 461 462
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
Y
Yibing Liu 已提交
463
        self.groups = 1
C
chengduoZH 已提交
464 465 466 467 468 469
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
470 471
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
472 473


474 475 476
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


492 493 494
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
495 496 497
class TestCUDNNWithSAMEPad(TestWithSAMEPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
498
        self.stride = [1, 2]
499 500 501 502 503 504 505 506 507 508 509
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


510 511 512
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
class TestCUDNNWithVALIDPad(TestWithVALIDPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


528 529 530
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
531
class TestCUDNNWithStride(TestWithStride):
C
chengduoZH 已提交
532 533 534
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
Y
Yibing Liu 已提交
535
        self.groups = 1
C
chengduoZH 已提交
536 537 538 539 540 541
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
542 543
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
544 545


546 547 548
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
class TestCUDNNWithGroups(TestWithGroups):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 4, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


564
# ------------ test_cudnn ------------
565 566 567
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
568 569 570 571 572 573
class TestCUDNNWithEvenUpsample(TestWithEvenUpsample):
    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


574 575
# Please Don't remove the following code.
# Currently, CI use cudnn V5.0 which not support dilation conv.
576
# class TestCUDNNWithDilation(TestWithDilation):
C
chengduoZH 已提交
577 578 579 580 581 582 583 584 585
#     def init_test_case(self):
#         self.pad = [1, 1]
#         self.stride = [2, 2]
#         self.dilations = [2, 2]
#         self.input_size = [2, 3, 5, 5]  # NCHW
#         f_c = self.input_size[1]
#         self.filter_size = [f_c, 6, 3, 3]
#
#     def init_op_type(self):
586
#         self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
587

588

589 590 591
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
cnn 已提交
592
class TestCUDNN_NHWC(TestConv2DTransposeOp):
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


608 609 610
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


627 628 629
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
class TestCUDNNWithAsymmetricPad_NHWC(TestWithSymmetricPad):
    def init_test_case(self):
        self.pad = [1, 0, 2, 3]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


646 647 648
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
class TestCUDNNWithStride_NHWC(TestWithStride):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


665 666 667
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
class TestCUDNNWithGroups_NHWC(TestWithGroups):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


684 685 686
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample):
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


704 705 706
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
cnn 已提交
707
class TestCUDNN_FP16(TestConv2DTransposeOp):
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.need_check_grad = False
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"

    def test_check_output(self):
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(
727
                place, atol=0.02, check_dygraph=(not self.use_mkldnn)
728
            )
729
        else:
730
            self.check_output(check_dygraph=(not self.use_mkldnn))
731 732


733 734 735
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
736 737 738 739 740 741 742 743 744 745 746 747 748
class TestCUDNN_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


749 750 751
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
752 753 754 755 756 757 758 759 760 761 762 763 764
class TestCUDNNWithSymmetricPad_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


765 766 767
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
768 769 770 771 772 773 774 775 776 777 778 779 780
class TestCUDNNWithAsymmetricPad_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 0, 2, 3]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


781 782 783
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
784 785 786 787 788 789 790 791 792 793 794 795 796
class TestCUDNNWithStride_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


797 798 799
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
800 801 802 803 804 805 806 807 808 809 810 811 812
class TestCUDNNWithGroups_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'


813 814 815
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
816 817 818 819 820 821 822 823 824 825 826 827
class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'
828 829


C
cnn 已提交
830
class TestConv2DTransposeAPI(unittest.TestCase):
831
    def test_case1(self):
832 833 834 835 836 837
        data1 = fluid.layers.data(
            name='data1', shape=[3, 5, 5], dtype='float32'
        )
        data2 = fluid.layers.data(
            name='data2', shape=[5, 5, 3], dtype='float32'
        )
838
        out1 = paddle.static.nn.conv2d_transpose(
839 840 841 842 843 844
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            data_format='NCHW',
        )
845
        out2 = paddle.static.nn.conv2d_transpose(
846 847 848 849 850 851
            input=data2,
            groups=1,
            num_filters=6,
            filter_size=3,
            data_format='NHWC',
        )
852
        out3 = paddle.static.nn.conv2d_transpose(
853 854 855 856 857 858 859
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
            data_format='NHWC',
        )
860
        out4 = paddle.static.nn.conv2d_transpose(
861 862 863 864 865 866 867
            input=data1,
            groups=3,
            num_filters=6,
            filter_size=3,
            padding=[[0, 0], [0, 0], [2, 1], [0, 0]],
            data_format='NCHW',
        )
868
        out5 = paddle.static.nn.conv2d_transpose(
869 870 871 872 873 874 875
            input=data2,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding='SAME',
            data_format='NCHW',
        )
876
        out6 = paddle.static.nn.conv2d_transpose(
877 878 879 880 881 882 883
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding='VALID',
            data_format='NHWC',
        )
884
        out7 = paddle.static.nn.conv2d_transpose(
885 886 887 888 889 890 891
            input=data1,
            groups=1,
            num_filters=6,
            output_size=[7, 7],
            padding=[0, 0],
            data_format='NHWC',
        )
892 893 894 895 896 897 898 899 900 901

        data1_np = np.random.random((2, 3, 5, 5)).astype("float32")
        data2_np = np.random.random((2, 5, 5, 3)).astype("float32")

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
902 903 904 905 906 907
        results = exe.run(
            fluid.default_main_program(),
            feed={"data1": data1_np, "data2": data2_np},
            fetch_list=[out1, out2, out3, out4, out5, out6, out7],
            return_numpy=True,
        )
908 909 910 911 912 913 914 915 916
        self.assertIsNotNone(results[0])
        self.assertIsNotNone(results[1])
        self.assertIsNotNone(results[2])
        self.assertIsNotNone(results[3])
        self.assertIsNotNone(results[4])
        self.assertIsNotNone(results[5])
        self.assertIsNotNone(results[6])


C
cnn 已提交
917
class TestConv2DTransposeOpException(unittest.TestCase):
918 919 920 921
    def test_exception(self):
        data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32")

        def attr_data_format():
922
            out = paddle.static.nn.conv2d_transpose(
923 924 925 926 927 928
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                data_format="NCDHW",
            )
929 930 931 932

        self.assertRaises(ValueError, attr_data_format)

        def attr_padding_str():
933
            out = paddle.static.nn.conv2d_transpose(
934 935 936 937 938 939
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding='Vald',
            )
940 941 942 943

        self.assertRaises(ValueError, attr_padding_str)

        def attr_padding_list():
944
            out = paddle.static.nn.conv2d_transpose(
945 946 947 948 949 950
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding=[[1, 1], [1, 1], [0, 0], [0, 0]],
            )
951 952 953 954

        self.assertRaises(ValueError, attr_padding_list)

        def attr_padding_with_data_format():
955
            out = paddle.static.nn.conv2d_transpose(
956 957 958 959 960 961 962
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding=[[1, 1], [0, 0], [0, 0], [1, 1]],
                data_format='NHWC',
            )
963 964 965

        self.assertRaises(ValueError, attr_padding_with_data_format)

966 967 968
        error_input = fluid.layers.data(
            name='error_data', shape=[1], dtype="float32"
        )
969 970

        def error_input_size():
971
            out = paddle.static.nn.conv2d_transpose(
972 973
                input=error_input, groups=1, num_filters=6, filter_size=3
            )
974 975 976 977

        self.assertRaises(ValueError, error_input_size)

        def error_groups():
978
            out = paddle.static.nn.conv2d_transpose(
979 980 981 982 983 984
                input=data,
                groups=0,
                num_filters=6,
                filter_size=3,
                data_format='NHWC',
            )
985 986 987

        self.assertRaises(ValueError, error_groups)

988

989 990 991
class TestConv2DTransposeRepr(unittest.TestCase):
    def test_case(self):
        paddle.disable_static()
992
        x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1.0, max=1.0)
993 994 995 996 997 998 999 1000
        conv = nn.Conv2DTranspose(4, 6, (3, 3), output_padding=1, stride=2)
        print(conv)
        y_var = conv(x_var)
        y_np = y_var.numpy()
        self.assertIsNotNone(y_np)
        paddle.enable_static()


1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
class TestTensorOutputSize1(UnittestBase):
    def init_info(self):
        self.shapes = [[2, 3, 8, 8]]
        self.save_path = os.path.join(self.temp_dir.name, self.path_prefix())

    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size1'

    def var_prefix(self):
        return "Vars["

    def call_func(self, x):
        w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
        output_size = paddle.assign([17])
        out = paddle.paddle.nn.functional.conv2d_transpose(
1016 1017
            x, w_var, stride=2, output_size=output_size
        )
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
        return out

    def test_static(self):
        main_prog = Program()
        starup_prog = Program()
        with program_guard(main_prog, starup_prog):
            fc = paddle.nn.Linear(8, 8)
            x = paddle.randn([2, 3, 8, 8])
            x.stop_gradient = False
            feat = fc(x)
            out = self.call_func(feat)

            sgd = paddle.optimizer.SGD()
            sgd.minimize(paddle.mean(out))
            self.assertTrue(self.var_prefix() in str(main_prog))

            exe = paddle.static.Executor()
            exe.run(starup_prog)
            res = exe.run(fetch_list=[feat, out])
            np.testing.assert_allclose(res[1].shape, (2, 6, 17, 17))

1039 1040 1041
            paddle.static.save_inference_model(
                self.save_path, [x], [feat, out], exe
            )
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
            # Test for Inference Predictor
            infer_outs = self.infer_prog()
            np.testing.assert_allclose(infer_outs[1].shape, (2, 6, 17, 17))


class TestTensorOutputSize2(TestTensorOutputSize1):
    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size2'

    def call_func(self, x):
        w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
        output_size = [17, paddle.assign([17])]
        out = paddle.paddle.nn.functional.conv2d_transpose(
1055 1056
            x, w_var, stride=2, output_size=output_size
        )
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
        return out


class TestTensorOutputSize3(TestTensorOutputSize1):
    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size3'

    def call_func(self, x):
        w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
        output_size = paddle.assign([17])
1067
        out = paddle.static.nn.conv2d_transpose(
1068 1069
            x, num_filters=6, output_size=output_size, filter_size=3, stride=2
        )
1070 1071 1072 1073 1074 1075 1076 1077 1078
        return out


class TestTensorOutputSize4(TestTensorOutputSize1):
    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size4'

    def call_func(self, x):
        output_size = [17, paddle.assign([17])]
1079
        out = paddle.static.nn.conv2d_transpose(
1080 1081
            x, num_filters=6, output_size=output_size, filter_size=3, stride=2
        )
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
        return out


class TestTensorOutputSize5(TestTensorOutputSize1):
    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size5'

    def call_func(self, x):
        w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
        output_size = [17, paddle.assign([17])]
        conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
            num_channels=3,
            num_filters=6,
            filter_size=3,
            output_size=output_size,
1097 1098
            stride=2,
        )
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
        out = conv2d_trans(x)
        return out


class TestTensorOutputSize6(TestTensorOutputSize1):
    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size6'

    def var_prefix(self):
        return "Var["

    def call_func(self, x):
        w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
        output_size = paddle.assign([17, 17])
        conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
            num_channels=3,
            num_filters=6,
            filter_size=3,
            output_size=output_size,
1118 1119
            stride=2,
        )
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
        out = conv2d_trans(x)
        return out


class TestTensorOutputSize7(TestTensorOutputSize1):
    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size7'

    def var_prefix(self):
        return ""

    def call_func(self, x):
        w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
        output_size = 17
        conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
            num_channels=3,
            num_filters=6,
            filter_size=3,
            output_size=output_size,
1139 1140
            stride=2,
        )
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
        out = conv2d_trans(x)
        return out


class TestTensorOutputSize8(TestTensorOutputSize1):
    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size8'

    def var_prefix(self):
        return ""

    def call_func(self, x):
        w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
        output_size = [17, 17]
        conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
            num_channels=3,
            num_filters=6,
            filter_size=3,
            output_size=output_size,
1160 1161
            stride=2,
        )
1162 1163 1164 1165
        out = conv2d_trans(x)
        return out


Z
deconv  
zchen0211 已提交
1166 1167
if __name__ == '__main__':
    unittest.main()