test_conv2d_transpose_op.py 33.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Z
deconv  
zchen0211 已提交
16
import unittest
17

Z
deconv  
zchen0211 已提交
18
import numpy as np
19

K
Kaipeng Deng 已提交
20
import paddle
21
import paddle.nn as nn
22

K
Kaipeng Deng 已提交
23
paddle.enable_static()
24 25 26
from op_test import OpTest
from test_attribute_var import UnittestBase

27
import paddle.fluid as fluid
28
import paddle.fluid.core as core
29
from paddle.fluid import Program, program_guard
Z
deconv  
zchen0211 已提交
30 31


C
chengduoZH 已提交
32
def conv2dtranspose_forward_naive(input_, filter_, attrs):
33 34
    padding_algorithm = attrs['padding_algorithm']
    if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
35 36 37 38
        raise ValueError(
            "Unknown Attr(padding_algorithm): '%s'. "
            "It can only be 'SAME' or 'VALID'." % str(padding_algorithm)
        )
39 40 41

    if attrs['data_format'] == 'NHWC':
        input_ = np.transpose(input_, [0, 3, 1, 2])
Z
deconv  
zchen0211 已提交
42
    in_n, in_c, in_h, in_w = input_.shape
Y
Yibing Liu 已提交
43 44
    f_c, f_out_c, f_h, f_w = filter_.shape
    groups = attrs['groups']
Z
deconv  
zchen0211 已提交
45
    assert in_c == f_c
Y
Yibing Liu 已提交
46
    out_c = f_out_c * groups
M
minqiyang 已提交
47
    sub_in_c = in_c // groups
Z
deconv  
zchen0211 已提交
48

49 50 51 52 53
    stride, pad, dilations = (
        attrs['strides'],
        attrs['paddings'],
        attrs['dilations'],
    )
54 55 56 57

    # update pad and dilation
    def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride):
        padding = []
58 59 60
        for input_size, filter_size, stride_size in zip(
            input_shape, kernel_size, kernel_stride
        ):
61
            out_size = int((input_size + stride_size - 1) / stride_size)
62
            pad_sum = np.max(
63 64
                ((out_size - 1) * stride_size + filter_size - input_size, 0)
            )
65 66 67 68 69 70 71 72 73 74
            pad_0 = int(pad_sum / 2)
            pad_1 = int(pad_sum - pad_0)
            padding.append(pad_0)
            padding.append(pad_1)
        return padding

    ksize = filter_.shape[2:4]
    if padding_algorithm == "VALID":
        pad = [0, 0, 0, 0]
    elif padding_algorithm == "SAME":
75 76
        dilations = [1, 1]
        input_data_shape = input_.shape[2:4]
77 78 79 80 81 82 83 84
        pad = _get_padding_with_SAME(input_data_shape, ksize, stride)

    pad_h_0, pad_h_1 = pad[0], pad[0]
    pad_w_0, pad_w_1 = pad[1], pad[1]
    if len(pad) == 4:
        pad_h_0, pad_h_1 = pad[0], pad[1]
        pad_w_0, pad_w_1 = pad[2], pad[3]

C
chengduoZH 已提交
85 86 87 88
    d_bolck_h = dilations[0] * (f_h - 1) + 1
    d_bolck_w = dilations[1] * (f_w - 1) + 1
    out_h = (in_h - 1) * stride[0] + d_bolck_h
    out_w = (in_w - 1) * stride[1] + d_bolck_w
89 90
    if 'output_size' in attrs:
        output_size = attrs['output_size']
91 92
        out_h = output_size[0] + pad_h_0 + pad_h_1
        out_w = output_size[1] + pad_w_0 + pad_w_1
L
LielinJiang 已提交
93 94 95 96 97
    out_pad_h = 0
    out_pad_w = 0
    if 'output_padding' in attrs:
        out_pad_h = attrs['output_padding'][0]
        out_pad_w = attrs['output_padding'][1]
98 99 100
    out = np.zeros(
        (in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), dtype=input_.dtype
    )
Z
deconv  
zchen0211 已提交
101 102 103 104

    for n in range(in_n):
        for i in range(in_h):
            for j in range(in_w):
Y
Yibing Liu 已提交
105
                for g in range(groups):
106 107 108
                    input_masked = input_[
                        n, g * sub_in_c : (g + 1) * sub_in_c, i, j
                    ]  # (c)
Y
Yibing Liu 已提交
109 110 111 112 113
                    input_masked = np.reshape(input_masked, (sub_in_c, 1, 1))
                    input_masked = np.tile(input_masked, (1, f_h, f_w))

                    for k in range(f_out_c):
                        tmp_out = np.sum(
114 115 116 117 118 119
                            input_masked
                            * filter_[
                                g * sub_in_c : (g + 1) * sub_in_c, k, :, :
                            ],
                            axis=0,
                        )
Y
Yibing Liu 已提交
120
                        i1, i2 = i * stride[0], i * stride[0] + d_bolck_h
121
                        j1, j2 = j * stride[1], j * stride[1] + d_bolck_w
122 123 124 125 126 127 128 129 130 131 132 133 134
                        out[
                            n,
                            g * f_out_c + k,
                            i1 : i2 : dilations[0],
                            j1 : j2 : dilations[1],
                        ] += tmp_out

    out = out[
        :,
        :,
        pad_h_0 : out_h - pad_h_1 + out_pad_h,
        pad_w_0 : out_w - pad_w_1 + out_pad_w,
    ]
135 136
    if attrs['data_format'] == 'NHWC':
        out = np.transpose(out, [0, 2, 3, 1])
Z
deconv  
zchen0211 已提交
137 138 139
    return out


C
cnn 已提交
140
class TestConv2DTransposeOp(OpTest):
Z
deconv  
zchen0211 已提交
141
    def setUp(self):
Z
zchen0211 已提交
142
        # init as conv transpose
143
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
144
        self.need_check_grad = True
J
Jacek Czaja 已提交
145
        self.is_test = False
146
        self.use_cudnn = False
J
Jacek Czaja 已提交
147
        self.use_mkldnn = False
148
        self.output_size = None
L
LielinJiang 已提交
149
        self.output_padding = []
150 151 152
        self.data_format = "NCHW"
        self.pad = [0, 0]
        self.padding_algorithm = "EXPLICIT"
Z
deconv  
zchen0211 已提交
153 154 155
        self.init_op_type()
        self.init_test_case()

156 157
        input_ = np.random.random(self.input_size).astype(self.dtype)
        filter_ = np.random.random(self.filter_size).astype(self.dtype)
Z
deconv  
zchen0211 已提交
158 159 160 161 162

        self.inputs = {'Input': input_, 'Filter': filter_}
        self.attrs = {
            'strides': self.stride,
            'paddings': self.pad,
163
            'padding_algorithm': self.padding_algorithm,
Y
Yibing Liu 已提交
164
            'groups': self.groups,
165 166
            'dilations': self.dilations,
            'use_cudnn': self.use_cudnn,
J
Jacek Czaja 已提交
167 168
            'is_test': self.is_test,
            'use_mkldnn': self.use_mkldnn,
169
            'data_format': self.data_format,
Z
deconv  
zchen0211 已提交
170
        }
171 172
        if self.output_size is not None:
            self.attrs['output_size'] = self.output_size
C
chengduoZH 已提交
173

L
LielinJiang 已提交
174 175 176
        if len(self.output_padding) > 0:
            self.attrs['output_padding'] = self.output_padding

177 178 179
        output = conv2dtranspose_forward_naive(
            input_, filter_, self.attrs
        ).astype(self.dtype)
C
chengduoZH 已提交
180

Z
deconv  
zchen0211 已提交
181 182 183
        self.outputs = {'Output': output}

    def test_check_output(self):
184
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
185 186
        if self.use_cudnn:
            place = core.CUDAPlace(0)
187
            self.check_output_with_place(
188
                place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
189
            )
190
        else:
191
            self.check_output(check_dygraph=(not self.use_mkldnn))
Z
deconv  
zchen0211 已提交
192

Z
zchen0211 已提交
193
    def test_check_grad_no_input(self):
194 195 196
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
197 198 199 200 201 202 203
                self.check_grad_with_place(
                    place,
                    ['Filter'],
                    'Output',
                    max_relative_error=0.02,
                    no_grad_set=set(['Input']),
                )
204
            else:
205 206 207
                self.check_grad(
                    ['Filter'], 'Output', no_grad_set=set(['Input'])
                )
Z
zchen0211 已提交
208 209

    def test_check_grad_no_filter(self):
210 211 212
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
213 214 215
                self.check_grad_with_place(
                    place, ['Input'], 'Output', no_grad_set=set(['Filter'])
                )
216
            else:
217 218 219
                self.check_grad(
                    ['Input'], 'Output', no_grad_set=set(['Filter'])
                )
Z
deconv  
zchen0211 已提交
220

Z
zchen0211 已提交
221
    def test_check_grad(self):
222 223 224
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
225 226 227 228 229 230
                self.check_grad_with_place(
                    place,
                    set(['Input', 'Filter']),
                    'Output',
                    max_relative_error=0.02,
                )
231
            else:
232 233 234
                self.check_grad(
                    set(['Input', 'Filter']), 'Output', max_relative_error=0.02
                )
C
chengduoZH 已提交
235 236 237 238 239

    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
240
        self.groups = 1
C
chengduoZH 已提交
241 242 243 244 245 246
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.op_type = "conv2d_transpose"
Z
deconv  
zchen0211 已提交
247

Z
zchen0211 已提交
248

C
cnn 已提交
249
class TestWithSymmetricPad(TestConv2DTransposeOp):
C
chengduoZH 已提交
250 251 252 253
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
254
        self.groups = 1
C
chengduoZH 已提交
255 256 257 258 259
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
260
class TestWithAsymmetricPad(TestConv2DTransposeOp):
261 262 263 264 265 266 267 268 269 270
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
271
class TestWithSAMEPad(TestConv2DTransposeOp):
272
    def init_test_case(self):
273 274
        self.stride = [2, 1]
        self.dilations = [1, 2]
275
        self.groups = 1
276
        self.input_size = [2, 3, 6, 5]  # NCHW
277
        f_c = self.input_size[1]
278
        self.filter_size = [f_c, 6, 4, 3]
279 280 281
        self.padding_algorithm = 'SAME'


C
cnn 已提交
282
class TestWithVALIDPad(TestConv2DTransposeOp):
283 284 285 286 287 288 289 290 291 292
    def init_test_case(self):
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]
        self.padding_algorithm = 'VALID'


C
cnn 已提交
293
class TestWithGroups(TestConv2DTransposeOp):
Y
Yibing Liu 已提交
294 295 296 297 298 299 300 301 302 303
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 4, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3]


C
cnn 已提交
304
class TestWithStride(TestConv2DTransposeOp):
C
chengduoZH 已提交
305 306 307 308
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
309
        self.groups = 1
C
chengduoZH 已提交
310 311 312 313 314
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
315
class TestWithDilation(TestConv2DTransposeOp):
C
chengduoZH 已提交
316 317 318
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
Y
Yibing Liu 已提交
319
        self.groups = 1
C
chengduoZH 已提交
320 321 322 323 324 325
        self.dilations = [2, 2]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
326
class TestWithEvenUpsample(TestConv2DTransposeOp):
327 328 329 330 331 332 333 334 335 336 337
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 3, 7, 7]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 5, 5]


C
cnn 已提交
338
class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp):
L
LielinJiang 已提交
339 340 341 342 343 344 345 346 347 348 349
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_padding = [1, 1]
        self.input_size = [2, 3, 7, 7]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 5, 5]


C
cnn 已提交
350
class Test_NHWC(TestConv2DTransposeOp):
351 352 353 354 355 356 357 358 359 360 361
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
362
class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp):
363 364 365 366 367 368 369 370 371 372 373
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
374
class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp):
375 376 377 378 379 380 381 382 383 384 385
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
386
class TestWithGroups_NHWC(TestConv2DTransposeOp):
387 388 389 390 391 392 393 394 395 396 397
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
398
class TestWithStride_NHWC(TestConv2DTransposeOp):
399 400 401 402 403 404 405 406 407 408 409
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
410
class TestWithDilation_NHWC(TestConv2DTransposeOp):
411 412 413 414 415 416 417 418 419 420 421
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [2, 2]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
422
class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp):
423 424 425 426 427 428 429 430 431 432 433 434
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'


C
cnn 已提交
435
class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp):
L
LielinJiang 已提交
436 437 438 439 440 441 442 443 444 445 446 447
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_padding = [1, 1]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'


C
chengduoZH 已提交
448
# ------------ test_cudnn ------------
449 450 451
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
cnn 已提交
452
class TestCUDNN(TestConv2DTransposeOp):
Z
deconv  
zchen0211 已提交
453
    def init_op_type(self):
454 455
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
Z
zchen0211 已提交
456

Z
deconv  
zchen0211 已提交
457

458 459 460
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
461
class TestCUDNNWithSymmetricPad(TestWithSymmetricPad):
C
chengduoZH 已提交
462 463 464
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
Y
Yibing Liu 已提交
465
        self.groups = 1
C
chengduoZH 已提交
466 467 468 469 470 471
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
472 473
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
474 475


476 477 478
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


494 495 496
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
497 498 499
class TestCUDNNWithSAMEPad(TestWithSAMEPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
500
        self.stride = [1, 2]
501 502 503 504 505 506 507 508 509 510 511
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


512 513 514
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
class TestCUDNNWithVALIDPad(TestWithVALIDPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


530 531 532
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
533
class TestCUDNNWithStride(TestWithStride):
C
chengduoZH 已提交
534 535 536
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
Y
Yibing Liu 已提交
537
        self.groups = 1
C
chengduoZH 已提交
538 539 540 541 542 543
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
544 545
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
546 547


548 549 550
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
class TestCUDNNWithGroups(TestWithGroups):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 4, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


566
# ------------ test_cudnn ------------
567 568 569
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
570 571 572 573 574 575
class TestCUDNNWithEvenUpsample(TestWithEvenUpsample):
    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


576 577
# Please Don't remove the following code.
# Currently, CI use cudnn V5.0 which not support dilation conv.
578
# class TestCUDNNWithDilation(TestWithDilation):
C
chengduoZH 已提交
579 580 581 582 583 584 585 586 587
#     def init_test_case(self):
#         self.pad = [1, 1]
#         self.stride = [2, 2]
#         self.dilations = [2, 2]
#         self.input_size = [2, 3, 5, 5]  # NCHW
#         f_c = self.input_size[1]
#         self.filter_size = [f_c, 6, 3, 3]
#
#     def init_op_type(self):
588
#         self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
589

590

591 592 593
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
cnn 已提交
594
class TestCUDNN_NHWC(TestConv2DTransposeOp):
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


610 611 612
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


629 630 631
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
class TestCUDNNWithAsymmetricPad_NHWC(TestWithSymmetricPad):
    def init_test_case(self):
        self.pad = [1, 0, 2, 3]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


648 649 650
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
class TestCUDNNWithStride_NHWC(TestWithStride):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


667 668 669
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
class TestCUDNNWithGroups_NHWC(TestWithGroups):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


686 687 688
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample):
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


706 707 708
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
cnn 已提交
709
class TestCUDNN_FP16(TestConv2DTransposeOp):
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.need_check_grad = False
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"

    def test_check_output(self):
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(
729
                place, atol=0.02, check_dygraph=(not self.use_mkldnn)
730
            )
731
        else:
732
            self.check_output(check_dygraph=(not self.use_mkldnn))
733 734


735 736 737
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
738 739 740 741 742 743 744 745 746 747 748 749 750
class TestCUDNN_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


751 752 753
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
754 755 756 757 758 759 760 761 762 763 764 765 766
class TestCUDNNWithSymmetricPad_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


767 768 769
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
770 771 772 773 774 775 776 777 778 779 780 781 782
class TestCUDNNWithAsymmetricPad_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 0, 2, 3]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


783 784 785
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
786 787 788 789 790 791 792 793 794 795 796 797 798
class TestCUDNNWithStride_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


799 800 801
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
802 803 804 805 806 807 808 809 810 811 812 813 814
class TestCUDNNWithGroups_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'


815 816 817
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
818 819 820 821 822 823 824 825 826 827 828 829
class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'
830 831


C
cnn 已提交
832
class TestConv2DTransposeAPI(unittest.TestCase):
833
    def test_case1(self):
G
GGBond8488 已提交
834 835
        data1 = paddle.static.data(
            name='data1', shape=[-1, 3, 5, 5], dtype='float32'
836
        )
G
GGBond8488 已提交
837 838
        data2 = paddle.static.data(
            name='data2', shape=[-1, 5, 5, 3], dtype='float32'
839
        )
840
        out1 = paddle.static.nn.conv2d_transpose(
841 842 843 844 845 846
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            data_format='NCHW',
        )
847
        out2 = paddle.static.nn.conv2d_transpose(
848 849 850 851 852 853
            input=data2,
            groups=1,
            num_filters=6,
            filter_size=3,
            data_format='NHWC',
        )
854
        out3 = paddle.static.nn.conv2d_transpose(
855 856 857 858 859 860 861
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
            data_format='NHWC',
        )
862
        out4 = paddle.static.nn.conv2d_transpose(
863 864 865 866 867 868 869
            input=data1,
            groups=3,
            num_filters=6,
            filter_size=3,
            padding=[[0, 0], [0, 0], [2, 1], [0, 0]],
            data_format='NCHW',
        )
870
        out5 = paddle.static.nn.conv2d_transpose(
871 872 873 874 875 876 877
            input=data2,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding='SAME',
            data_format='NCHW',
        )
878
        out6 = paddle.static.nn.conv2d_transpose(
879 880 881 882 883 884 885
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding='VALID',
            data_format='NHWC',
        )
886
        out7 = paddle.static.nn.conv2d_transpose(
887 888 889 890 891 892 893
            input=data1,
            groups=1,
            num_filters=6,
            output_size=[7, 7],
            padding=[0, 0],
            data_format='NHWC',
        )
894 895 896 897 898 899 900 901 902 903

        data1_np = np.random.random((2, 3, 5, 5)).astype("float32")
        data2_np = np.random.random((2, 5, 5, 3)).astype("float32")

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
904 905 906 907 908 909
        results = exe.run(
            fluid.default_main_program(),
            feed={"data1": data1_np, "data2": data2_np},
            fetch_list=[out1, out2, out3, out4, out5, out6, out7],
            return_numpy=True,
        )
910 911 912 913 914 915 916 917 918
        self.assertIsNotNone(results[0])
        self.assertIsNotNone(results[1])
        self.assertIsNotNone(results[2])
        self.assertIsNotNone(results[3])
        self.assertIsNotNone(results[4])
        self.assertIsNotNone(results[5])
        self.assertIsNotNone(results[6])


C
cnn 已提交
919
class TestConv2DTransposeOpException(unittest.TestCase):
920
    def test_exception(self):
G
GGBond8488 已提交
921 922 923
        data = paddle.static.data(
            name='data', shape=[-1, 3, 5, 5], dtype="float32"
        )
924 925

        def attr_data_format():
926
            out = paddle.static.nn.conv2d_transpose(
927 928 929 930 931 932
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                data_format="NCDHW",
            )
933 934 935 936

        self.assertRaises(ValueError, attr_data_format)

        def attr_padding_str():
937
            out = paddle.static.nn.conv2d_transpose(
938 939 940 941 942 943
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding='Vald',
            )
944 945 946 947

        self.assertRaises(ValueError, attr_padding_str)

        def attr_padding_list():
948
            out = paddle.static.nn.conv2d_transpose(
949 950 951 952 953 954
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding=[[1, 1], [1, 1], [0, 0], [0, 0]],
            )
955 956 957 958

        self.assertRaises(ValueError, attr_padding_list)

        def attr_padding_with_data_format():
959
            out = paddle.static.nn.conv2d_transpose(
960 961 962 963 964 965 966
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding=[[1, 1], [0, 0], [0, 0], [1, 1]],
                data_format='NHWC',
            )
967 968 969

        self.assertRaises(ValueError, attr_padding_with_data_format)

G
GGBond8488 已提交
970 971
        error_input = paddle.static.data(
            name='error_data', shape=[-1, 1], dtype="float32"
972
        )
973 974

        def error_input_size():
975
            out = paddle.static.nn.conv2d_transpose(
976 977
                input=error_input, groups=1, num_filters=6, filter_size=3
            )
978 979 980 981

        self.assertRaises(ValueError, error_input_size)

        def error_groups():
982
            out = paddle.static.nn.conv2d_transpose(
983 984 985 986 987 988
                input=data,
                groups=0,
                num_filters=6,
                filter_size=3,
                data_format='NHWC',
            )
989 990 991

        self.assertRaises(ValueError, error_groups)

992 993 994 995 996 997 998 999 1000 1001 1002
        def error_0_filter_number():
            out = paddle.static.nn.conv2d_transpose(
                input=data,
                groups=1,
                num_filters=0,
                filter_size=3,
                data_format='NCHW',
            )

        self.assertRaises(ValueError, error_0_filter_number)

1003

1004 1005 1006
class TestConv2DTransposeRepr(unittest.TestCase):
    def test_case(self):
        paddle.disable_static()
1007
        x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1.0, max=1.0)
1008 1009 1010 1011 1012 1013 1014 1015
        conv = nn.Conv2DTranspose(4, 6, (3, 3), output_padding=1, stride=2)
        print(conv)
        y_var = conv(x_var)
        y_np = y_var.numpy()
        self.assertIsNotNone(y_np)
        paddle.enable_static()


1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
class TestConv2dTranspose(unittest.TestCase):
    def error_weight_input(self):
        array = np.array([1], dtype=np.float32)
        x = paddle.to_tensor(np.reshape(array, [1, 1, 1, 1]), dtype='float32')
        weight = paddle.to_tensor(np.reshape(array, [1]), dtype='float32')
        paddle.nn.functional.conv2d_transpose(x, weight, bias=0)

    def test_type_error(self):
        self.assertRaises(ValueError, self.error_weight_input)


1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
class TestTensorOutputSize1(UnittestBase):
    def init_info(self):
        self.shapes = [[2, 3, 8, 8]]
        self.save_path = os.path.join(self.temp_dir.name, self.path_prefix())

    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size1'

    def var_prefix(self):
        return "Vars["

    def call_func(self, x):
        w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
        output_size = paddle.assign([17])
        out = paddle.paddle.nn.functional.conv2d_transpose(
1042 1043
            x, w_var, stride=2, output_size=output_size
        )
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
        return out

    def test_static(self):
        main_prog = Program()
        starup_prog = Program()
        with program_guard(main_prog, starup_prog):
            fc = paddle.nn.Linear(8, 8)
            x = paddle.randn([2, 3, 8, 8])
            x.stop_gradient = False
            feat = fc(x)
            out = self.call_func(feat)

            sgd = paddle.optimizer.SGD()
            sgd.minimize(paddle.mean(out))
            self.assertTrue(self.var_prefix() in str(main_prog))

            exe = paddle.static.Executor()
            exe.run(starup_prog)
            res = exe.run(fetch_list=[feat, out])
            np.testing.assert_allclose(res[1].shape, (2, 6, 17, 17))

1065 1066 1067
            paddle.static.save_inference_model(
                self.save_path, [x], [feat, out], exe
            )
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
            # Test for Inference Predictor
            infer_outs = self.infer_prog()
            np.testing.assert_allclose(infer_outs[1].shape, (2, 6, 17, 17))


class TestTensorOutputSize2(TestTensorOutputSize1):
    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size2'

    def call_func(self, x):
        w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
        output_size = [17, paddle.assign([17])]
        out = paddle.paddle.nn.functional.conv2d_transpose(
1081 1082
            x, w_var, stride=2, output_size=output_size
        )
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
        return out


class TestTensorOutputSize3(TestTensorOutputSize1):
    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size3'

    def call_func(self, x):
        w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
        output_size = paddle.assign([17])
1093
        out = paddle.static.nn.conv2d_transpose(
1094 1095
            x, num_filters=6, output_size=output_size, filter_size=3, stride=2
        )
1096 1097 1098 1099 1100 1101 1102 1103 1104
        return out


class TestTensorOutputSize4(TestTensorOutputSize1):
    def path_prefix(self):
        return 'conv2d_transpose_tensor_output_size4'

    def call_func(self, x):
        output_size = [17, paddle.assign([17])]
1105
        out = paddle.static.nn.conv2d_transpose(
1106 1107
            x, num_filters=6, output_size=output_size, filter_size=3, stride=2
        )
1108 1109 1110
        return out


Z
deconv  
zchen0211 已提交
1111 1112
if __name__ == '__main__':
    unittest.main()