test_conv2d_transpose_op.py 29.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Z
deconv  
zchen0211 已提交
17 18
import unittest
import numpy as np
19

K
Kaipeng Deng 已提交
20
import paddle
21
import paddle.nn as nn
K
Kaipeng Deng 已提交
22
paddle.enable_static()
23
import paddle.fluid.core as core
24
import paddle.fluid as fluid
25
from op_test import OpTest
Z
deconv  
zchen0211 已提交
26 27


C
chengduoZH 已提交
28
def conv2dtranspose_forward_naive(input_, filter_, attrs):
29 30 31 32 33 34 35 36
    padding_algorithm = attrs['padding_algorithm']
    if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
        raise ValueError("Unknown Attr(padding_algorithm): '%s'. "
                         "It can only be 'SAME' or 'VALID'." %
                         str(padding_algorithm))

    if attrs['data_format'] == 'NHWC':
        input_ = np.transpose(input_, [0, 3, 1, 2])
Z
deconv  
zchen0211 已提交
37
    in_n, in_c, in_h, in_w = input_.shape
Y
Yibing Liu 已提交
38 39
    f_c, f_out_c, f_h, f_w = filter_.shape
    groups = attrs['groups']
Z
deconv  
zchen0211 已提交
40
    assert in_c == f_c
Y
Yibing Liu 已提交
41
    out_c = f_out_c * groups
M
minqiyang 已提交
42
    sub_in_c = in_c // groups
Z
deconv  
zchen0211 已提交
43

C
chengduoZH 已提交
44 45
    stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[
        'dilations']
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64

    # update pad and dilation
    def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride):
        padding = []
        for input_size, filter_size, stride_size in zip(
                input_shape, kernel_size, kernel_stride):
            out_size = int((input_size + stride_size - 1) / stride_size)
            pad_sum = np.max((
                (out_size - 1) * stride_size + filter_size - input_size, 0))
            pad_0 = int(pad_sum / 2)
            pad_1 = int(pad_sum - pad_0)
            padding.append(pad_0)
            padding.append(pad_1)
        return padding

    ksize = filter_.shape[2:4]
    if padding_algorithm == "VALID":
        pad = [0, 0, 0, 0]
    elif padding_algorithm == "SAME":
65 66
        dilations = [1, 1]
        input_data_shape = input_.shape[2:4]
67 68 69 70 71 72 73 74
        pad = _get_padding_with_SAME(input_data_shape, ksize, stride)

    pad_h_0, pad_h_1 = pad[0], pad[0]
    pad_w_0, pad_w_1 = pad[1], pad[1]
    if len(pad) == 4:
        pad_h_0, pad_h_1 = pad[0], pad[1]
        pad_w_0, pad_w_1 = pad[2], pad[3]

C
chengduoZH 已提交
75 76 77 78
    d_bolck_h = dilations[0] * (f_h - 1) + 1
    d_bolck_w = dilations[1] * (f_w - 1) + 1
    out_h = (in_h - 1) * stride[0] + d_bolck_h
    out_w = (in_w - 1) * stride[1] + d_bolck_w
79 80
    if 'output_size' in attrs:
        output_size = attrs['output_size']
81 82
        out_h = output_size[0] + pad_h_0 + pad_h_1
        out_w = output_size[1] + pad_w_0 + pad_w_1
L
LielinJiang 已提交
83 84 85 86 87 88 89
    out_pad_h = 0
    out_pad_w = 0
    if 'output_padding' in attrs:
        out_pad_h = attrs['output_padding'][0]
        out_pad_w = attrs['output_padding'][1]
    out = np.zeros(
        (in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), dtype=input_.dtype)
Z
deconv  
zchen0211 已提交
90 91 92 93

    for n in range(in_n):
        for i in range(in_h):
            for j in range(in_w):
Y
Yibing Liu 已提交
94 95 96 97 98 99 100 101 102 103 104 105
                for g in range(groups):
                    input_masked = input_[n, g * sub_in_c:(g + 1) * sub_in_c, i,
                                          j]  # (c)
                    input_masked = np.reshape(input_masked, (sub_in_c, 1, 1))
                    input_masked = np.tile(input_masked, (1, f_h, f_w))

                    for k in range(f_out_c):
                        tmp_out = np.sum(
                            input_masked *
                            filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :],
                            axis=0)
                        i1, i2 = i * stride[0], i * stride[0] + d_bolck_h
106
                        j1, j2 = j * stride[1], j * stride[1] + d_bolck_w
Y
Yibing Liu 已提交
107 108
                        out[n, g * f_out_c + k, i1:i2:dilations[0], j1:j2:
                            dilations[1]] += tmp_out
Z
deconv  
zchen0211 已提交
109

L
LielinJiang 已提交
110 111
    out = out[:, :, pad_h_0:out_h - pad_h_1 + out_pad_h, pad_w_0:out_w - pad_w_1
              + out_pad_w]
112 113
    if attrs['data_format'] == 'NHWC':
        out = np.transpose(out, [0, 2, 3, 1])
Z
deconv  
zchen0211 已提交
114 115 116
    return out


C
cnn 已提交
117
class TestConv2DTransposeOp(OpTest):
Z
deconv  
zchen0211 已提交
118
    def setUp(self):
Z
zchen0211 已提交
119
        # init as conv transpose
120
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
121
        self.need_check_grad = True
J
Jacek Czaja 已提交
122
        self.is_test = False
123
        self.use_cudnn = False
J
Jacek Czaja 已提交
124
        self.use_mkldnn = False
125
        self.output_size = None
L
LielinJiang 已提交
126
        self.output_padding = []
127 128 129
        self.data_format = "NCHW"
        self.pad = [0, 0]
        self.padding_algorithm = "EXPLICIT"
Z
deconv  
zchen0211 已提交
130 131 132
        self.init_op_type()
        self.init_test_case()

133 134
        input_ = np.random.random(self.input_size).astype(self.dtype)
        filter_ = np.random.random(self.filter_size).astype(self.dtype)
Z
deconv  
zchen0211 已提交
135 136 137 138 139

        self.inputs = {'Input': input_, 'Filter': filter_}
        self.attrs = {
            'strides': self.stride,
            'paddings': self.pad,
140
            'padding_algorithm': self.padding_algorithm,
Y
Yibing Liu 已提交
141
            'groups': self.groups,
142 143
            'dilations': self.dilations,
            'use_cudnn': self.use_cudnn,
J
Jacek Czaja 已提交
144 145 146
            'is_test': self.is_test,
            'use_mkldnn': self.use_mkldnn,
            'data_format': self.data_format
Z
deconv  
zchen0211 已提交
147
        }
148 149
        if self.output_size is not None:
            self.attrs['output_size'] = self.output_size
C
chengduoZH 已提交
150

L
LielinJiang 已提交
151 152 153
        if len(self.output_padding) > 0:
            self.attrs['output_padding'] = self.output_padding

C
chengduoZH 已提交
154
        output = conv2dtranspose_forward_naive(input_, filter_,
155
                                               self.attrs).astype(self.dtype)
C
chengduoZH 已提交
156

Z
deconv  
zchen0211 已提交
157 158 159
        self.outputs = {'Output': output}

    def test_check_output(self):
160
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
161 162
        if self.use_cudnn:
            place = core.CUDAPlace(0)
163 164
            self.check_output_with_place(
                place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
165
        else:
166
            self.check_output(check_dygraph=(self.use_mkldnn == False))
Z
deconv  
zchen0211 已提交
167

Z
zchen0211 已提交
168
    def test_check_grad_no_input(self):
169 170 171 172 173 174 175 176 177 178 179
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
                self.check_grad_with_place(
                    place, ['Filter'],
                    'Output',
                    max_relative_error=0.02,
                    no_grad_set=set(['Input']))
            else:
                self.check_grad(
                    ['Filter'], 'Output', no_grad_set=set(['Input']))
Z
zchen0211 已提交
180 181

    def test_check_grad_no_filter(self):
182 183 184 185 186 187 188 189
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
                self.check_grad_with_place(
                    place, ['Input'], 'Output', no_grad_set=set(['Filter']))
            else:
                self.check_grad(
                    ['Input'], 'Output', no_grad_set=set(['Filter']))
Z
deconv  
zchen0211 已提交
190

Z
zchen0211 已提交
191
    def test_check_grad(self):
192 193 194 195 196 197 198 199 200 201 202
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
                self.check_grad_with_place(
                    place,
                    set(['Input', 'Filter']),
                    'Output',
                    max_relative_error=0.02)
            else:
                self.check_grad(
                    set(['Input', 'Filter']), 'Output', max_relative_error=0.02)
C
chengduoZH 已提交
203 204 205 206 207

    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
208
        self.groups = 1
C
chengduoZH 已提交
209 210 211 212 213 214
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.op_type = "conv2d_transpose"
Z
deconv  
zchen0211 已提交
215

Z
zchen0211 已提交
216

C
cnn 已提交
217
class TestWithSymmetricPad(TestConv2DTransposeOp):
C
chengduoZH 已提交
218 219 220 221
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
222
        self.groups = 1
C
chengduoZH 已提交
223 224 225 226 227
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
228
class TestWithAsymmetricPad(TestConv2DTransposeOp):
229 230 231 232 233 234 235 236 237 238
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
239
class TestWithSAMEPad(TestConv2DTransposeOp):
240
    def init_test_case(self):
241 242
        self.stride = [2, 1]
        self.dilations = [1, 2]
243
        self.groups = 1
244
        self.input_size = [2, 3, 6, 5]  # NCHW
245
        f_c = self.input_size[1]
246
        self.filter_size = [f_c, 6, 4, 3]
247 248 249
        self.padding_algorithm = 'SAME'


C
cnn 已提交
250
class TestWithVALIDPad(TestConv2DTransposeOp):
251 252 253 254 255 256 257 258 259 260
    def init_test_case(self):
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]
        self.padding_algorithm = 'VALID'


C
cnn 已提交
261
class TestWithGroups(TestConv2DTransposeOp):
Y
Yibing Liu 已提交
262 263 264 265 266 267 268 269 270 271
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 4, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3]


C
cnn 已提交
272
class TestWithStride(TestConv2DTransposeOp):
C
chengduoZH 已提交
273 274 275 276
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
277
        self.groups = 1
C
chengduoZH 已提交
278 279 280 281 282
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
283
class TestWithDilation(TestConv2DTransposeOp):
C
chengduoZH 已提交
284 285 286
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
Y
Yibing Liu 已提交
287
        self.groups = 1
C
chengduoZH 已提交
288 289 290 291 292 293
        self.dilations = [2, 2]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
294
class TestWithEvenUpsample(TestConv2DTransposeOp):
295 296 297 298 299 300 301 302 303 304 305
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 3, 7, 7]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 5, 5]


C
cnn 已提交
306
class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp):
L
LielinJiang 已提交
307 308 309 310 311 312 313 314 315 316 317
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_padding = [1, 1]
        self.input_size = [2, 3, 7, 7]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 5, 5]


C
cnn 已提交
318
class Test_NHWC(TestConv2DTransposeOp):
319 320 321 322 323 324 325 326 327 328 329
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
330
class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp):
331 332 333 334 335 336 337 338 339 340 341
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
342
class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp):
343 344 345 346 347 348 349 350 351 352 353
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
354
class TestWithGroups_NHWC(TestConv2DTransposeOp):
355 356 357 358 359 360 361 362 363 364 365
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
366
class TestWithStride_NHWC(TestConv2DTransposeOp):
367 368 369 370 371 372 373 374 375 376 377
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
378
class TestWithDilation_NHWC(TestConv2DTransposeOp):
379 380 381 382 383 384 385 386 387 388 389
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [2, 2]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
390
class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp):
391 392 393 394 395 396 397 398 399 400 401 402
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'


C
cnn 已提交
403
class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp):
L
LielinJiang 已提交
404 405 406 407 408 409 410 411 412 413 414 415
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_padding = [1, 1]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'


C
chengduoZH 已提交
416
# ------------ test_cudnn ------------
417 418
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
cnn 已提交
419
class TestCUDNN(TestConv2DTransposeOp):
Z
deconv  
zchen0211 已提交
420
    def init_op_type(self):
421 422
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
Z
zchen0211 已提交
423

Z
deconv  
zchen0211 已提交
424

425 426
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
427
class TestCUDNNWithSymmetricPad(TestWithSymmetricPad):
C
chengduoZH 已提交
428 429 430
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
Y
Yibing Liu 已提交
431
        self.groups = 1
C
chengduoZH 已提交
432 433 434 435 436 437
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
438 439
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
440 441


442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithSAMEPad(TestWithSAMEPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
464
        self.stride = [1, 2]
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithVALIDPad(TestWithVALIDPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


493 494
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
495
class TestCUDNNWithStride(TestWithStride):
C
chengduoZH 已提交
496 497 498
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
Y
Yibing Liu 已提交
499
        self.groups = 1
C
chengduoZH 已提交
500 501 502 503 504 505
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
506 507
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
508 509


510 511
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
class TestCUDNNWithGroups(TestWithGroups):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 4, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


527 528 529 530 531 532 533 534 535
# ------------ test_cudnn ------------
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithEvenUpsample(TestWithEvenUpsample):
    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


536 537
# Please Don't remove the following code.
# Currently, CI use cudnn V5.0 which not support dilation conv.
538
# class TestCUDNNWithDilation(TestWithDilation):
C
chengduoZH 已提交
539 540 541 542 543 544 545 546 547
#     def init_test_case(self):
#         self.pad = [1, 1]
#         self.stride = [2, 2]
#         self.dilations = [2, 2]
#         self.input_size = [2, 3, 5, 5]  # NCHW
#         f_c = self.input_size[1]
#         self.filter_size = [f_c, 6, 3, 3]
#
#     def init_op_type(self):
548
#         self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
549

550 551 552

@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
cnn 已提交
553
class TestCUDNN_NHWC(TestConv2DTransposeOp):
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithAsymmetricPad_NHWC(TestWithSymmetricPad):
    def init_test_case(self):
        self.pad = [1, 0, 2, 3]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithStride_NHWC(TestWithStride):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithGroups_NHWC(TestWithGroups):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample):
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


660 661
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
cnn 已提交
662
class TestCUDNN_FP16(TestConv2DTransposeOp):
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.need_check_grad = False
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"

    def test_check_output(self):
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(
                place, atol=0.02, check_dygraph=(self.use_mkldnn == False))
        else:
            self.check_output(check_dygraph=(self.use_mkldnn == False))


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNN_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithSymmetricPad_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithAsymmetricPad_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 0, 2, 3]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithStride_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithGroups_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'
776 777


C
cnn 已提交
778
class TestConv2DTransposeAPI(unittest.TestCase):
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
    def test_case1(self):
        data1 = fluid.layers.data(
            name='data1', shape=[3, 5, 5], dtype='float32')
        data2 = fluid.layers.data(
            name='data2', shape=[5, 5, 3], dtype='float32')
        out1 = fluid.layers.conv2d_transpose(
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            data_format='NCHW')
        out2 = fluid.layers.conv2d_transpose(
            input=data2,
            groups=1,
            num_filters=6,
            filter_size=3,
            data_format='NHWC')
        out3 = fluid.layers.conv2d_transpose(
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
            data_format='NHWC')
        out4 = fluid.layers.conv2d_transpose(
            input=data1,
            groups=3,
            num_filters=6,
            filter_size=3,
            padding=[[0, 0], [0, 0], [2, 1], [0, 0]],
            data_format='NCHW')
        out5 = fluid.layers.conv2d_transpose(
            input=data2,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding='SAME',
            data_format='NCHW')
        out6 = fluid.layers.conv2d_transpose(
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding='VALID',
            data_format='NHWC')
        out7 = fluid.layers.conv2d_transpose(
            input=data1,
            groups=1,
            num_filters=6,
            output_size=[7, 7],
            padding=[0, 0],
            data_format='NHWC')

        data1_np = np.random.random((2, 3, 5, 5)).astype("float32")
        data2_np = np.random.random((2, 5, 5, 3)).astype("float32")

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        results = exe.run(
            fluid.default_main_program(),
            feed={"data1": data1_np,
                  "data2": data2_np},
            fetch_list=[out1, out2, out3, out4, out5, out6, out7],
            return_numpy=True)
        self.assertIsNotNone(results[0])
        self.assertIsNotNone(results[1])
        self.assertIsNotNone(results[2])
        self.assertIsNotNone(results[3])
        self.assertIsNotNone(results[4])
        self.assertIsNotNone(results[5])
        self.assertIsNotNone(results[6])


C
cnn 已提交
856
class TestConv2DTransposeOpException(unittest.TestCase):
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
    def test_exception(self):
        data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32")

        def attr_data_format():
            out = fluid.layers.conv2d_transpose(
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                data_format="NCDHW")

        self.assertRaises(ValueError, attr_data_format)

        def attr_padding_str():
            out = fluid.layers.conv2d_transpose(
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding='Vald')

        self.assertRaises(ValueError, attr_padding_str)

        def attr_padding_list():
            out = fluid.layers.conv2d_transpose(
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding=[[1, 1], [1, 1], [0, 0], [0, 0]])

        self.assertRaises(ValueError, attr_padding_list)

        def attr_padding_with_data_format():
            out = fluid.layers.conv2d_transpose(
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding=[[1, 1], [0, 0], [0, 0], [1, 1]],
                data_format='NHWC')

        self.assertRaises(ValueError, attr_padding_with_data_format)


902 903 904 905 906 907 908 909 910 911 912 913
class TestConv2DTransposeRepr(unittest.TestCase):
    def test_case(self):
        paddle.disable_static()
        x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
        conv = nn.Conv2DTranspose(4, 6, (3, 3), output_padding=1, stride=2)
        print(conv)
        y_var = conv(x_var)
        y_np = y_var.numpy()
        self.assertIsNotNone(y_np)
        paddle.enable_static()


Z
deconv  
zchen0211 已提交
914 915
if __name__ == '__main__':
    unittest.main()