test_conv2d_transpose_op.py 32.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Z
deconv  
zchen0211 已提交
17 18
import unittest
import numpy as np
19

K
Kaipeng Deng 已提交
20
import paddle
21
import paddle.nn as nn
22

K
Kaipeng Deng 已提交
23
paddle.enable_static()
24
import paddle.fluid.core as core
25
import paddle.fluid as fluid
A
Adam Osewski 已提交
26
from paddle.fluid.tests.unittests.op_test import OpTest
Z
deconv  
zchen0211 已提交
27 28


C
chengduoZH 已提交
29
def conv2dtranspose_forward_naive(input_, filter_, attrs):
30 31 32 33 34 35 36 37
    padding_algorithm = attrs['padding_algorithm']
    if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
        raise ValueError("Unknown Attr(padding_algorithm): '%s'. "
                         "It can only be 'SAME' or 'VALID'." %
                         str(padding_algorithm))

    if attrs['data_format'] == 'NHWC':
        input_ = np.transpose(input_, [0, 3, 1, 2])
Z
deconv  
zchen0211 已提交
38
    in_n, in_c, in_h, in_w = input_.shape
Y
Yibing Liu 已提交
39 40
    f_c, f_out_c, f_h, f_w = filter_.shape
    groups = attrs['groups']
Z
deconv  
zchen0211 已提交
41
    assert in_c == f_c
Y
Yibing Liu 已提交
42
    out_c = f_out_c * groups
M
minqiyang 已提交
43
    sub_in_c = in_c // groups
Z
deconv  
zchen0211 已提交
44

C
chengduoZH 已提交
45 46
    stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[
        'dilations']
47 48 49 50

    # update pad and dilation
    def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride):
        padding = []
51 52 53
        for input_size, filter_size, stride_size in zip(input_shape,
                                                        kernel_size,
                                                        kernel_stride):
54
            out_size = int((input_size + stride_size - 1) / stride_size)
55 56
            pad_sum = np.max(
                ((out_size - 1) * stride_size + filter_size - input_size, 0))
57 58 59 60 61 62 63 64 65 66
            pad_0 = int(pad_sum / 2)
            pad_1 = int(pad_sum - pad_0)
            padding.append(pad_0)
            padding.append(pad_1)
        return padding

    ksize = filter_.shape[2:4]
    if padding_algorithm == "VALID":
        pad = [0, 0, 0, 0]
    elif padding_algorithm == "SAME":
67 68
        dilations = [1, 1]
        input_data_shape = input_.shape[2:4]
69 70 71 72 73 74 75 76
        pad = _get_padding_with_SAME(input_data_shape, ksize, stride)

    pad_h_0, pad_h_1 = pad[0], pad[0]
    pad_w_0, pad_w_1 = pad[1], pad[1]
    if len(pad) == 4:
        pad_h_0, pad_h_1 = pad[0], pad[1]
        pad_w_0, pad_w_1 = pad[2], pad[3]

C
chengduoZH 已提交
77 78 79 80
    d_bolck_h = dilations[0] * (f_h - 1) + 1
    d_bolck_w = dilations[1] * (f_w - 1) + 1
    out_h = (in_h - 1) * stride[0] + d_bolck_h
    out_w = (in_w - 1) * stride[1] + d_bolck_w
81 82
    if 'output_size' in attrs:
        output_size = attrs['output_size']
83 84
        out_h = output_size[0] + pad_h_0 + pad_h_1
        out_w = output_size[1] + pad_w_0 + pad_w_1
L
LielinJiang 已提交
85 86 87 88 89
    out_pad_h = 0
    out_pad_w = 0
    if 'output_padding' in attrs:
        out_pad_h = attrs['output_padding'][0]
        out_pad_w = attrs['output_padding'][1]
90 91
    out = np.zeros((in_n, out_c, out_h + out_pad_h, out_w + out_pad_w),
                   dtype=input_.dtype)
Z
deconv  
zchen0211 已提交
92 93 94 95

    for n in range(in_n):
        for i in range(in_h):
            for j in range(in_w):
Y
Yibing Liu 已提交
96 97 98 99 100 101 102 103 104 105 106 107
                for g in range(groups):
                    input_masked = input_[n, g * sub_in_c:(g + 1) * sub_in_c, i,
                                          j]  # (c)
                    input_masked = np.reshape(input_masked, (sub_in_c, 1, 1))
                    input_masked = np.tile(input_masked, (1, f_h, f_w))

                    for k in range(f_out_c):
                        tmp_out = np.sum(
                            input_masked *
                            filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :],
                            axis=0)
                        i1, i2 = i * stride[0], i * stride[0] + d_bolck_h
108
                        j1, j2 = j * stride[1], j * stride[1] + d_bolck_w
109 110
                        out[n, g * f_out_c + k, i1:i2:dilations[0],
                            j1:j2:dilations[1]] += tmp_out
Z
deconv  
zchen0211 已提交
111

112 113
    out = out[:, :, pad_h_0:out_h - pad_h_1 + out_pad_h,
              pad_w_0:out_w - pad_w_1 + out_pad_w]
114 115
    if attrs['data_format'] == 'NHWC':
        out = np.transpose(out, [0, 2, 3, 1])
Z
deconv  
zchen0211 已提交
116 117 118
    return out


C
cnn 已提交
119
class TestConv2DTransposeOp(OpTest):
120

Z
deconv  
zchen0211 已提交
121
    def setUp(self):
Z
zchen0211 已提交
122
        # init as conv transpose
123
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
124
        self.need_check_grad = True
J
Jacek Czaja 已提交
125
        self.is_test = False
126
        self.use_cudnn = False
J
Jacek Czaja 已提交
127
        self.use_mkldnn = False
128
        self.output_size = None
L
LielinJiang 已提交
129
        self.output_padding = []
130 131 132
        self.data_format = "NCHW"
        self.pad = [0, 0]
        self.padding_algorithm = "EXPLICIT"
Z
deconv  
zchen0211 已提交
133 134 135
        self.init_op_type()
        self.init_test_case()

136 137
        input_ = np.random.random(self.input_size).astype(self.dtype)
        filter_ = np.random.random(self.filter_size).astype(self.dtype)
Z
deconv  
zchen0211 已提交
138 139 140 141 142

        self.inputs = {'Input': input_, 'Filter': filter_}
        self.attrs = {
            'strides': self.stride,
            'paddings': self.pad,
143
            'padding_algorithm': self.padding_algorithm,
Y
Yibing Liu 已提交
144
            'groups': self.groups,
145 146
            'dilations': self.dilations,
            'use_cudnn': self.use_cudnn,
J
Jacek Czaja 已提交
147 148 149
            'is_test': self.is_test,
            'use_mkldnn': self.use_mkldnn,
            'data_format': self.data_format
Z
deconv  
zchen0211 已提交
150
        }
151 152
        if self.output_size is not None:
            self.attrs['output_size'] = self.output_size
C
chengduoZH 已提交
153

L
LielinJiang 已提交
154 155 156
        if len(self.output_padding) > 0:
            self.attrs['output_padding'] = self.output_padding

C
chengduoZH 已提交
157
        output = conv2dtranspose_forward_naive(input_, filter_,
158
                                               self.attrs).astype(self.dtype)
C
chengduoZH 已提交
159

Z
deconv  
zchen0211 已提交
160 161 162
        self.outputs = {'Output': output}

    def test_check_output(self):
163
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
164 165
        if self.use_cudnn:
            place = core.CUDAPlace(0)
166 167
            self.check_output_with_place(
                place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
168
        else:
169
            self.check_output(check_dygraph=(self.use_mkldnn == False))
Z
deconv  
zchen0211 已提交
170

Z
zchen0211 已提交
171
    def test_check_grad_no_input(self):
172 173 174
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
175 176 177 178
                self.check_grad_with_place(place, ['Filter'],
                                           'Output',
                                           max_relative_error=0.02,
                                           no_grad_set=set(['Input']))
179
            else:
180 181 182
                self.check_grad(['Filter'],
                                'Output',
                                no_grad_set=set(['Input']))
Z
zchen0211 已提交
183 184

    def test_check_grad_no_filter(self):
185 186 187
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
188 189 190
                self.check_grad_with_place(place, ['Input'],
                                           'Output',
                                           no_grad_set=set(['Filter']))
191
            else:
192 193 194
                self.check_grad(['Input'],
                                'Output',
                                no_grad_set=set(['Filter']))
Z
deconv  
zchen0211 已提交
195

Z
zchen0211 已提交
196
    def test_check_grad(self):
197 198 199
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
200 201 202 203
                self.check_grad_with_place(place,
                                           set(['Input', 'Filter']),
                                           'Output',
                                           max_relative_error=0.02)
204
            else:
205 206 207
                self.check_grad(set(['Input', 'Filter']),
                                'Output',
                                max_relative_error=0.02)
C
chengduoZH 已提交
208 209 210 211 212

    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
213
        self.groups = 1
C
chengduoZH 已提交
214 215 216 217 218 219
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.op_type = "conv2d_transpose"
Z
deconv  
zchen0211 已提交
220

Z
zchen0211 已提交
221

C
cnn 已提交
222
class TestWithSymmetricPad(TestConv2DTransposeOp):
223

C
chengduoZH 已提交
224 225 226 227
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
228
        self.groups = 1
C
chengduoZH 已提交
229 230 231 232 233
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
234
class TestWithAsymmetricPad(TestConv2DTransposeOp):
235

236 237 238 239 240 241 242 243 244 245
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
246
class TestWithSAMEPad(TestConv2DTransposeOp):
247

248
    def init_test_case(self):
249 250
        self.stride = [2, 1]
        self.dilations = [1, 2]
251
        self.groups = 1
252
        self.input_size = [2, 3, 6, 5]  # NCHW
253
        f_c = self.input_size[1]
254
        self.filter_size = [f_c, 6, 4, 3]
255 256 257
        self.padding_algorithm = 'SAME'


C
cnn 已提交
258
class TestWithVALIDPad(TestConv2DTransposeOp):
259

260 261 262 263 264 265 266 267 268 269
    def init_test_case(self):
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]
        self.padding_algorithm = 'VALID'


C
cnn 已提交
270
class TestWithGroups(TestConv2DTransposeOp):
271

Y
Yibing Liu 已提交
272 273 274 275 276 277 278 279 280 281
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 4, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3]


C
cnn 已提交
282
class TestWithStride(TestConv2DTransposeOp):
283

C
chengduoZH 已提交
284 285 286 287
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
288
        self.groups = 1
C
chengduoZH 已提交
289 290 291 292 293
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
294
class TestWithDilation(TestConv2DTransposeOp):
295

C
chengduoZH 已提交
296 297 298
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
Y
Yibing Liu 已提交
299
        self.groups = 1
C
chengduoZH 已提交
300 301 302 303 304 305
        self.dilations = [2, 2]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
306
class TestWithEvenUpsample(TestConv2DTransposeOp):
307

308 309 310 311 312 313 314 315 316 317 318
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 3, 7, 7]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 5, 5]


C
cnn 已提交
319
class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp):
320

L
LielinJiang 已提交
321 322 323 324 325 326 327 328 329 330 331
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_padding = [1, 1]
        self.input_size = [2, 3, 7, 7]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 5, 5]


C
cnn 已提交
332
class Test_NHWC(TestConv2DTransposeOp):
333

334 335 336 337 338 339 340 341 342 343 344
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
345
class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp):
346

347 348 349 350 351 352 353 354 355 356 357
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
358
class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp):
359

360 361 362 363 364 365 366 367 368 369 370
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
371
class TestWithGroups_NHWC(TestConv2DTransposeOp):
372

373 374 375 376 377 378 379 380 381 382 383
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
384
class TestWithStride_NHWC(TestConv2DTransposeOp):
385

386 387 388 389 390 391 392 393 394 395 396
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
397
class TestWithDilation_NHWC(TestConv2DTransposeOp):
398

399 400 401 402 403 404 405 406 407 408 409
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [2, 2]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
410
class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp):
411

412 413 414 415 416 417 418 419 420 421 422 423
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'


C
cnn 已提交
424
class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp):
425

L
LielinJiang 已提交
426 427 428 429 430 431 432 433 434 435 436 437
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_padding = [1, 1]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'


C
chengduoZH 已提交
438
# ------------ test_cudnn ------------
439 440
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
cnn 已提交
441
class TestCUDNN(TestConv2DTransposeOp):
442

Z
deconv  
zchen0211 已提交
443
    def init_op_type(self):
444 445
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
Z
zchen0211 已提交
446

Z
deconv  
zchen0211 已提交
447

448 449
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
450
class TestCUDNNWithSymmetricPad(TestWithSymmetricPad):
451

C
chengduoZH 已提交
452 453 454
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
Y
Yibing Liu 已提交
455
        self.groups = 1
C
chengduoZH 已提交
456 457 458 459 460 461
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
462 463
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
464 465


466 467 468
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
469

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithSAMEPad(TestWithSAMEPad):
487

488 489
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
490
        self.stride = [1, 2]
491 492 493 494 495 496 497 498 499 500 501 502 503 504
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithVALIDPad(TestWithVALIDPad):
505

506 507 508 509 510 511 512 513 514 515 516 517 518 519
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


520 521
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
522
class TestCUDNNWithStride(TestWithStride):
523

C
chengduoZH 已提交
524 525 526
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
Y
Yibing Liu 已提交
527
        self.groups = 1
C
chengduoZH 已提交
528 529 530 531 532 533
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
534 535
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
536 537


538 539
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
540
class TestCUDNNWithGroups(TestWithGroups):
541

542 543 544 545 546 547 548 549 550 551 552 553 554 555
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 4, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


556 557 558 559
# ------------ test_cudnn ------------
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithEvenUpsample(TestWithEvenUpsample):
560

561 562 563 564 565
    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


566 567
# Please Don't remove the following code.
# Currently, CI use cudnn V5.0 which not support dilation conv.
568
# class TestCUDNNWithDilation(TestWithDilation):
C
chengduoZH 已提交
569 570 571 572 573 574 575 576 577
#     def init_test_case(self):
#         self.pad = [1, 1]
#         self.stride = [2, 2]
#         self.dilations = [2, 2]
#         self.input_size = [2, 3, 5, 5]  # NCHW
#         f_c = self.input_size[1]
#         self.filter_size = [f_c, 6, 3, 3]
#
#     def init_op_type(self):
578
#         self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
579

580 581 582

@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
cnn 已提交
583
class TestCUDNN_NHWC(TestConv2DTransposeOp):
584

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad):
603

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithAsymmetricPad_NHWC(TestWithSymmetricPad):
622

623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
    def init_test_case(self):
        self.pad = [1, 0, 2, 3]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithStride_NHWC(TestWithStride):
641

642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithGroups_NHWC(TestWithGroups):
660

661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample):
679

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


696 697
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
cnn 已提交
698
class TestCUDNN_FP16(TestConv2DTransposeOp):
699

700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.need_check_grad = False
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"

    def test_check_output(self):
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(
                place, atol=0.02, check_dygraph=(self.use_mkldnn == False))
        else:
            self.check_output(check_dygraph=(self.use_mkldnn == False))


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNN_NHWC_FP16(TestCUDNN_FP16):
727

728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithSymmetricPad_NHWC_FP16(TestCUDNN_FP16):
743

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithAsymmetricPad_NHWC_FP16(TestCUDNN_FP16):
759

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 0, 2, 3]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithStride_NHWC_FP16(TestCUDNN_FP16):
775

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithGroups_NHWC_FP16(TestCUDNN_FP16):
791

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16):
807

808 809 810 811 812 813 814 815 816 817 818
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'
819 820


C
cnn 已提交
821
class TestConv2DTransposeAPI(unittest.TestCase):
822

823
    def test_case1(self):
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
        data1 = fluid.layers.data(name='data1',
                                  shape=[3, 5, 5],
                                  dtype='float32')
        data2 = fluid.layers.data(name='data2',
                                  shape=[5, 5, 3],
                                  dtype='float32')
        out1 = fluid.layers.conv2d_transpose(input=data1,
                                             groups=1,
                                             num_filters=6,
                                             filter_size=3,
                                             data_format='NCHW')
        out2 = fluid.layers.conv2d_transpose(input=data2,
                                             groups=1,
                                             num_filters=6,
                                             filter_size=3,
                                             data_format='NHWC')
        out3 = fluid.layers.conv2d_transpose(input=data1,
                                             groups=1,
                                             num_filters=6,
                                             filter_size=3,
                                             padding=[[0, 0], [1, 1], [1, 1],
                                                      [0, 0]],
                                             data_format='NHWC')
        out4 = fluid.layers.conv2d_transpose(input=data1,
                                             groups=3,
                                             num_filters=6,
                                             filter_size=3,
                                             padding=[[0, 0], [0, 0], [2, 1],
                                                      [0, 0]],
                                             data_format='NCHW')
        out5 = fluid.layers.conv2d_transpose(input=data2,
                                             groups=1,
                                             num_filters=6,
                                             filter_size=3,
                                             padding='SAME',
                                             data_format='NCHW')
        out6 = fluid.layers.conv2d_transpose(input=data1,
                                             groups=1,
                                             num_filters=6,
                                             filter_size=3,
                                             padding='VALID',
                                             data_format='NHWC')
        out7 = fluid.layers.conv2d_transpose(input=data1,
                                             groups=1,
                                             num_filters=6,
                                             output_size=[7, 7],
                                             padding=[0, 0],
                                             data_format='NHWC')
872 873 874 875 876 877 878 879 880 881

        data1_np = np.random.random((2, 3, 5, 5)).astype("float32")
        data2_np = np.random.random((2, 5, 5, 3)).astype("float32")

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
882 883 884 885 886 887 888
        results = exe.run(fluid.default_main_program(),
                          feed={
                              "data1": data1_np,
                              "data2": data2_np
                          },
                          fetch_list=[out1, out2, out3, out4, out5, out6, out7],
                          return_numpy=True)
889 890 891 892 893 894 895 896 897
        self.assertIsNotNone(results[0])
        self.assertIsNotNone(results[1])
        self.assertIsNotNone(results[2])
        self.assertIsNotNone(results[3])
        self.assertIsNotNone(results[4])
        self.assertIsNotNone(results[5])
        self.assertIsNotNone(results[6])


C
cnn 已提交
898
class TestConv2DTransposeOpException(unittest.TestCase):
899

900 901 902 903
    def test_exception(self):
        data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32")

        def attr_data_format():
904 905 906 907 908
            out = fluid.layers.conv2d_transpose(input=data,
                                                groups=1,
                                                num_filters=6,
                                                filter_size=3,
                                                data_format="NCDHW")
909 910 911 912

        self.assertRaises(ValueError, attr_data_format)

        def attr_padding_str():
913 914 915 916 917
            out = fluid.layers.conv2d_transpose(input=data,
                                                groups=1,
                                                num_filters=6,
                                                filter_size=3,
                                                padding='Vald')
918 919 920 921

        self.assertRaises(ValueError, attr_padding_str)

        def attr_padding_list():
922 923 924 925 926 927
            out = fluid.layers.conv2d_transpose(input=data,
                                                groups=1,
                                                num_filters=6,
                                                filter_size=3,
                                                padding=[[1, 1], [1, 1], [0, 0],
                                                         [0, 0]])
928 929 930 931

        self.assertRaises(ValueError, attr_padding_list)

        def attr_padding_with_data_format():
932 933 934 935 936 937 938
            out = fluid.layers.conv2d_transpose(input=data,
                                                groups=1,
                                                num_filters=6,
                                                filter_size=3,
                                                padding=[[1, 1], [0, 0], [0, 0],
                                                         [1, 1]],
                                                data_format='NHWC')
939 940 941

        self.assertRaises(ValueError, attr_padding_with_data_format)

942 943 944
        error_input = fluid.layers.data(name='error_data',
                                        shape=[1],
                                        dtype="float32")
945 946

        def error_input_size():
947 948 949 950
            out = fluid.layers.conv2d_transpose(input=error_input,
                                                groups=1,
                                                num_filters=6,
                                                filter_size=3)
951 952 953 954

        self.assertRaises(ValueError, error_input_size)

        def error_groups():
955 956 957 958 959
            out = fluid.layers.conv2d_transpose(input=data,
                                                groups=0,
                                                num_filters=6,
                                                filter_size=3,
                                                data_format='NHWC')
960 961 962

        self.assertRaises(ValueError, error_groups)

963

964
class TestConv2DTransposeRepr(unittest.TestCase):
965

966 967 968 969 970 971 972 973 974 975 976
    def test_case(self):
        paddle.disable_static()
        x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
        conv = nn.Conv2DTranspose(4, 6, (3, 3), output_padding=1, stride=2)
        print(conv)
        y_var = conv(x_var)
        y_np = y_var.numpy()
        self.assertIsNotNone(y_np)
        paddle.enable_static()


Z
deconv  
zchen0211 已提交
977 978
if __name__ == '__main__':
    unittest.main()