test_conv2d_transpose_op.py 28.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Z
deconv  
zchen0211 已提交
17 18
import unittest
import numpy as np
19

K
Kaipeng Deng 已提交
20 21
import paddle
paddle.enable_static()
22
import paddle.fluid.core as core
23
import paddle.fluid as fluid
24
from op_test import OpTest
Z
deconv  
zchen0211 已提交
25 26


C
chengduoZH 已提交
27
def conv2dtranspose_forward_naive(input_, filter_, attrs):
28 29 30 31 32 33 34 35
    padding_algorithm = attrs['padding_algorithm']
    if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
        raise ValueError("Unknown Attr(padding_algorithm): '%s'. "
                         "It can only be 'SAME' or 'VALID'." %
                         str(padding_algorithm))

    if attrs['data_format'] == 'NHWC':
        input_ = np.transpose(input_, [0, 3, 1, 2])
Z
deconv  
zchen0211 已提交
36
    in_n, in_c, in_h, in_w = input_.shape
Y
Yibing Liu 已提交
37 38
    f_c, f_out_c, f_h, f_w = filter_.shape
    groups = attrs['groups']
Z
deconv  
zchen0211 已提交
39
    assert in_c == f_c
Y
Yibing Liu 已提交
40
    out_c = f_out_c * groups
M
minqiyang 已提交
41
    sub_in_c = in_c // groups
Z
deconv  
zchen0211 已提交
42

C
chengduoZH 已提交
43 44
    stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[
        'dilations']
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

    # update pad and dilation
    def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride):
        padding = []
        for input_size, filter_size, stride_size in zip(
                input_shape, kernel_size, kernel_stride):
            out_size = int((input_size + stride_size - 1) / stride_size)
            pad_sum = np.max((
                (out_size - 1) * stride_size + filter_size - input_size, 0))
            pad_0 = int(pad_sum / 2)
            pad_1 = int(pad_sum - pad_0)
            padding.append(pad_0)
            padding.append(pad_1)
        return padding

    ksize = filter_.shape[2:4]
    if padding_algorithm == "VALID":
        pad = [0, 0, 0, 0]
    elif padding_algorithm == "SAME":
64 65
        dilations = [1, 1]
        input_data_shape = input_.shape[2:4]
66 67 68 69 70 71 72 73
        pad = _get_padding_with_SAME(input_data_shape, ksize, stride)

    pad_h_0, pad_h_1 = pad[0], pad[0]
    pad_w_0, pad_w_1 = pad[1], pad[1]
    if len(pad) == 4:
        pad_h_0, pad_h_1 = pad[0], pad[1]
        pad_w_0, pad_w_1 = pad[2], pad[3]

C
chengduoZH 已提交
74 75 76 77
    d_bolck_h = dilations[0] * (f_h - 1) + 1
    d_bolck_w = dilations[1] * (f_w - 1) + 1
    out_h = (in_h - 1) * stride[0] + d_bolck_h
    out_w = (in_w - 1) * stride[1] + d_bolck_w
78 79
    if 'output_size' in attrs:
        output_size = attrs['output_size']
80 81
        out_h = output_size[0] + pad_h_0 + pad_h_1
        out_w = output_size[1] + pad_w_0 + pad_w_1
L
LielinJiang 已提交
82 83 84 85 86 87 88
    out_pad_h = 0
    out_pad_w = 0
    if 'output_padding' in attrs:
        out_pad_h = attrs['output_padding'][0]
        out_pad_w = attrs['output_padding'][1]
    out = np.zeros(
        (in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), dtype=input_.dtype)
Z
deconv  
zchen0211 已提交
89 90 91 92

    for n in range(in_n):
        for i in range(in_h):
            for j in range(in_w):
Y
Yibing Liu 已提交
93 94 95 96 97 98 99 100 101 102 103 104
                for g in range(groups):
                    input_masked = input_[n, g * sub_in_c:(g + 1) * sub_in_c, i,
                                          j]  # (c)
                    input_masked = np.reshape(input_masked, (sub_in_c, 1, 1))
                    input_masked = np.tile(input_masked, (1, f_h, f_w))

                    for k in range(f_out_c):
                        tmp_out = np.sum(
                            input_masked *
                            filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :],
                            axis=0)
                        i1, i2 = i * stride[0], i * stride[0] + d_bolck_h
105
                        j1, j2 = j * stride[1], j * stride[1] + d_bolck_w
Y
Yibing Liu 已提交
106 107
                        out[n, g * f_out_c + k, i1:i2:dilations[0], j1:j2:
                            dilations[1]] += tmp_out
Z
deconv  
zchen0211 已提交
108

L
LielinJiang 已提交
109 110
    out = out[:, :, pad_h_0:out_h - pad_h_1 + out_pad_h, pad_w_0:out_w - pad_w_1
              + out_pad_w]
111 112
    if attrs['data_format'] == 'NHWC':
        out = np.transpose(out, [0, 2, 3, 1])
Z
deconv  
zchen0211 已提交
113 114 115
    return out


C
cnn 已提交
116
class TestConv2DTransposeOp(OpTest):
Z
deconv  
zchen0211 已提交
117
    def setUp(self):
Z
zchen0211 已提交
118
        # init as conv transpose
119
        self.dtype = np.float64
120
        self.need_check_grad = True
J
Jacek Czaja 已提交
121
        self.is_test = False
122
        self.use_cudnn = False
J
Jacek Czaja 已提交
123
        self.use_mkldnn = False
124
        self.output_size = None
L
LielinJiang 已提交
125
        self.output_padding = []
126 127 128
        self.data_format = "NCHW"
        self.pad = [0, 0]
        self.padding_algorithm = "EXPLICIT"
Z
deconv  
zchen0211 已提交
129 130 131
        self.init_op_type()
        self.init_test_case()

132 133
        input_ = np.random.random(self.input_size).astype(self.dtype)
        filter_ = np.random.random(self.filter_size).astype(self.dtype)
Z
deconv  
zchen0211 已提交
134 135 136 137 138

        self.inputs = {'Input': input_, 'Filter': filter_}
        self.attrs = {
            'strides': self.stride,
            'paddings': self.pad,
139
            'padding_algorithm': self.padding_algorithm,
Y
Yibing Liu 已提交
140
            'groups': self.groups,
141 142
            'dilations': self.dilations,
            'use_cudnn': self.use_cudnn,
J
Jacek Czaja 已提交
143 144 145
            'is_test': self.is_test,
            'use_mkldnn': self.use_mkldnn,
            'data_format': self.data_format
Z
deconv  
zchen0211 已提交
146
        }
147 148
        if self.output_size is not None:
            self.attrs['output_size'] = self.output_size
C
chengduoZH 已提交
149

L
LielinJiang 已提交
150 151 152
        if len(self.output_padding) > 0:
            self.attrs['output_padding'] = self.output_padding

C
chengduoZH 已提交
153
        output = conv2dtranspose_forward_naive(input_, filter_,
154
                                               self.attrs).astype(self.dtype)
C
chengduoZH 已提交
155

Z
deconv  
zchen0211 已提交
156 157 158
        self.outputs = {'Output': output}

    def test_check_output(self):
159
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
160 161
        if self.use_cudnn:
            place = core.CUDAPlace(0)
162 163
            self.check_output_with_place(
                place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
164
        else:
165
            self.check_output(check_dygraph=(self.use_mkldnn == False))
Z
deconv  
zchen0211 已提交
166

Z
zchen0211 已提交
167
    def test_check_grad_no_input(self):
168 169 170 171 172 173 174 175 176 177 178
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
                self.check_grad_with_place(
                    place, ['Filter'],
                    'Output',
                    max_relative_error=0.02,
                    no_grad_set=set(['Input']))
            else:
                self.check_grad(
                    ['Filter'], 'Output', no_grad_set=set(['Input']))
Z
zchen0211 已提交
179 180

    def test_check_grad_no_filter(self):
181 182 183 184 185 186 187 188
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
                self.check_grad_with_place(
                    place, ['Input'], 'Output', no_grad_set=set(['Filter']))
            else:
                self.check_grad(
                    ['Input'], 'Output', no_grad_set=set(['Filter']))
Z
deconv  
zchen0211 已提交
189

Z
zchen0211 已提交
190
    def test_check_grad(self):
191 192 193 194 195 196 197 198 199 200 201
        if self.need_check_grad:
            if self.use_cudnn:
                place = core.CUDAPlace(0)
                self.check_grad_with_place(
                    place,
                    set(['Input', 'Filter']),
                    'Output',
                    max_relative_error=0.02)
            else:
                self.check_grad(
                    set(['Input', 'Filter']), 'Output', max_relative_error=0.02)
C
chengduoZH 已提交
202 203 204 205 206

    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
207
        self.groups = 1
C
chengduoZH 已提交
208 209 210 211 212 213
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.op_type = "conv2d_transpose"
Z
deconv  
zchen0211 已提交
214

Z
zchen0211 已提交
215

C
cnn 已提交
216
class TestWithSymmetricPad(TestConv2DTransposeOp):
C
chengduoZH 已提交
217 218 219 220
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
221
        self.groups = 1
C
chengduoZH 已提交
222 223 224 225 226
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
227
class TestWithAsymmetricPad(TestConv2DTransposeOp):
228 229 230 231 232 233 234 235 236 237
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
238
class TestWithSAMEPad(TestConv2DTransposeOp):
239
    def init_test_case(self):
240 241
        self.stride = [2, 1]
        self.dilations = [1, 2]
242
        self.groups = 1
243
        self.input_size = [2, 3, 6, 5]  # NCHW
244
        f_c = self.input_size[1]
245
        self.filter_size = [f_c, 6, 4, 3]
246 247 248
        self.padding_algorithm = 'SAME'


C
cnn 已提交
249
class TestWithVALIDPad(TestConv2DTransposeOp):
250 251 252 253 254 255 256 257 258 259
    def init_test_case(self):
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]
        self.padding_algorithm = 'VALID'


C
cnn 已提交
260
class TestWithGroups(TestConv2DTransposeOp):
Y
Yibing Liu 已提交
261 262 263 264 265 266 267 268 269 270
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 4, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3]


C
cnn 已提交
271
class TestWithStride(TestConv2DTransposeOp):
C
chengduoZH 已提交
272 273 274 275
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.dilations = [1, 1]
Y
Yibing Liu 已提交
276
        self.groups = 1
C
chengduoZH 已提交
277 278 279 280 281
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
282
class TestWithDilation(TestConv2DTransposeOp):
C
chengduoZH 已提交
283 284 285
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
Y
Yibing Liu 已提交
286
        self.groups = 1
C
chengduoZH 已提交
287 288 289 290 291 292
        self.dilations = [2, 2]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]


C
cnn 已提交
293
class TestWithEvenUpsample(TestConv2DTransposeOp):
294 295 296 297 298 299 300 301 302 303 304
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 3, 7, 7]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 5, 5]


C
cnn 已提交
305
class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp):
L
LielinJiang 已提交
306 307 308 309 310 311 312 313 314 315 316
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_padding = [1, 1]
        self.input_size = [2, 3, 7, 7]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 5, 5]


C
cnn 已提交
317
class Test_NHWC(TestConv2DTransposeOp):
318 319 320 321 322 323 324 325 326 327 328
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
329
class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp):
330 331 332 333 334 335 336 337 338 339 340
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
341
class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp):
342 343 344 345 346 347 348 349 350 351 352
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
353
class TestWithGroups_NHWC(TestConv2DTransposeOp):
354 355 356 357 358 359 360 361 362 363 364
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
365
class TestWithStride_NHWC(TestConv2DTransposeOp):
366 367 368 369 370 371 372 373 374 375 376
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
377
class TestWithDilation_NHWC(TestConv2DTransposeOp):
378 379 380 381 382 383 384 385 386 387 388
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [2, 2]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


C
cnn 已提交
389
class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp):
390 391 392 393 394 395 396 397 398 399 400 401
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'


C
cnn 已提交
402
class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp):
L
LielinJiang 已提交
403 404 405 406 407 408 409 410 411 412 413 414
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_padding = [1, 1]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'


C
chengduoZH 已提交
415
# ------------ test_cudnn ------------
416 417
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
cnn 已提交
418
class TestCUDNN(TestConv2DTransposeOp):
Z
deconv  
zchen0211 已提交
419
    def init_op_type(self):
420 421
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
Z
zchen0211 已提交
422

Z
deconv  
zchen0211 已提交
423

424 425
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
426
class TestCUDNNWithSymmetricPad(TestWithSymmetricPad):
C
chengduoZH 已提交
427 428 429
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
Y
Yibing Liu 已提交
430
        self.groups = 1
C
chengduoZH 已提交
431 432 433 434 435 436
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
437 438
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
439 440


441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithSAMEPad(TestWithSAMEPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
463
        self.stride = [1, 2]
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithVALIDPad(TestWithVALIDPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 2]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


492 493
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
494
class TestCUDNNWithStride(TestWithStride):
C
chengduoZH 已提交
495 496 497
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
Y
Yibing Liu 已提交
498
        self.groups = 1
C
chengduoZH 已提交
499 500 501 502 503 504
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
505 506
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
507 508


509 510
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
class TestCUDNNWithGroups(TestWithGroups):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 4, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


526 527 528 529 530 531 532 533 534
# ------------ test_cudnn ------------
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithEvenUpsample(TestWithEvenUpsample):
    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


535 536
# Please Don't remove the following code.
# Currently, CI use cudnn V5.0 which not support dilation conv.
537
# class TestCUDNNWithDilation(TestWithDilation):
C
chengduoZH 已提交
538 539 540 541 542 543 544 545 546
#     def init_test_case(self):
#         self.pad = [1, 1]
#         self.stride = [2, 2]
#         self.dilations = [2, 2]
#         self.input_size = [2, 3, 5, 5]  # NCHW
#         f_c = self.input_size[1]
#         self.filter_size = [f_c, 6, 3, 3]
#
#     def init_op_type(self):
547
#         self.op_type = "conv2d_transpose"
C
chengduoZH 已提交
548

549 550 551

@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
cnn 已提交
552
class TestCUDNN_NHWC(TestConv2DTransposeOp):
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithAsymmetricPad_NHWC(TestWithSymmetricPad):
    def init_test_case(self):
        self.pad = [1, 0, 2, 3]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithStride_NHWC(TestWithStride):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithGroups_NHWC(TestWithGroups):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample):
    def init_test_case(self):
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"


659 660
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
C
cnn 已提交
661
class TestCUDNN_FP16(TestConv2DTransposeOp):
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3]

    def init_op_type(self):
        self.need_check_grad = False
        self.use_cudnn = True
        self.op_type = "conv2d_transpose"

    def test_check_output(self):
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(
                place, atol=0.02, check_dygraph=(self.use_mkldnn == False))
        else:
            self.check_output(check_dygraph=(self.use_mkldnn == False))


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNN_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 1
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithSymmetricPad_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithAsymmetricPad_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 0, 2, 3]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithStride_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.input_size = [2, 5, 5, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithGroups_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.dilations = [1, 1]
        self.groups = 2
        self.input_size = [2, 5, 5, 4]  # NCHW
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3]
        self.data_format = 'NHWC'


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16):
    def init_test_case(self):
        self.dtype = np.float16
        self.pad = [2, 2]
        self.stride = [2, 2]
        self.groups = 1
        self.dilations = [1, 1]
        self.output_size = [14, 14]
        self.input_size = [2, 7, 7, 3]  # NHWC
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 5, 5]
        self.data_format = 'NHWC'
775 776


C
cnn 已提交
777
class TestConv2DTransposeAPI(unittest.TestCase):
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854
    def test_case1(self):
        data1 = fluid.layers.data(
            name='data1', shape=[3, 5, 5], dtype='float32')
        data2 = fluid.layers.data(
            name='data2', shape=[5, 5, 3], dtype='float32')
        out1 = fluid.layers.conv2d_transpose(
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            data_format='NCHW')
        out2 = fluid.layers.conv2d_transpose(
            input=data2,
            groups=1,
            num_filters=6,
            filter_size=3,
            data_format='NHWC')
        out3 = fluid.layers.conv2d_transpose(
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
            data_format='NHWC')
        out4 = fluid.layers.conv2d_transpose(
            input=data1,
            groups=3,
            num_filters=6,
            filter_size=3,
            padding=[[0, 0], [0, 0], [2, 1], [0, 0]],
            data_format='NCHW')
        out5 = fluid.layers.conv2d_transpose(
            input=data2,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding='SAME',
            data_format='NCHW')
        out6 = fluid.layers.conv2d_transpose(
            input=data1,
            groups=1,
            num_filters=6,
            filter_size=3,
            padding='VALID',
            data_format='NHWC')
        out7 = fluid.layers.conv2d_transpose(
            input=data1,
            groups=1,
            num_filters=6,
            output_size=[7, 7],
            padding=[0, 0],
            data_format='NHWC')

        data1_np = np.random.random((2, 3, 5, 5)).astype("float32")
        data2_np = np.random.random((2, 5, 5, 3)).astype("float32")

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        results = exe.run(
            fluid.default_main_program(),
            feed={"data1": data1_np,
                  "data2": data2_np},
            fetch_list=[out1, out2, out3, out4, out5, out6, out7],
            return_numpy=True)
        self.assertIsNotNone(results[0])
        self.assertIsNotNone(results[1])
        self.assertIsNotNone(results[2])
        self.assertIsNotNone(results[3])
        self.assertIsNotNone(results[4])
        self.assertIsNotNone(results[5])
        self.assertIsNotNone(results[6])


C
cnn 已提交
855
class TestConv2DTransposeOpException(unittest.TestCase):
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
    def test_exception(self):
        data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32")

        def attr_data_format():
            out = fluid.layers.conv2d_transpose(
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                data_format="NCDHW")

        self.assertRaises(ValueError, attr_data_format)

        def attr_padding_str():
            out = fluid.layers.conv2d_transpose(
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding='Vald')

        self.assertRaises(ValueError, attr_padding_str)

        def attr_padding_list():
            out = fluid.layers.conv2d_transpose(
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding=[[1, 1], [1, 1], [0, 0], [0, 0]])

        self.assertRaises(ValueError, attr_padding_list)

        def attr_padding_with_data_format():
            out = fluid.layers.conv2d_transpose(
                input=data,
                groups=1,
                num_filters=6,
                filter_size=3,
                padding=[[1, 1], [0, 0], [0, 0], [1, 1]],
                data_format='NHWC')

        self.assertRaises(ValueError, attr_padding_with_data_format)


Z
deconv  
zchen0211 已提交
901 902
if __name__ == '__main__':
    unittest.main()