test_conv3d_transpose_op.py 17.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

C
chengduoZH 已提交
15
import unittest
16

C
chengduoZH 已提交
17
import numpy as np
18

K
Kaipeng Deng 已提交
19
import paddle
20

K
Kaipeng Deng 已提交
21
paddle.enable_static()
22
from op_test import OpTest
C
chengduoZH 已提交
23

24 25
import paddle.fluid.core as core

C
chengduoZH 已提交
26

C
chengduoZH 已提交
27
def conv3dtranspose_forward_naive(input_, filter_, attrs):
28 29
    padding_algorithm = attrs['padding_algorithm']
    if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
30 31 32 33
        raise ValueError(
            "Unknown Attr(padding_algorithm): '%s'. "
            "It can only be 'SAME' or 'VALID'." % str(padding_algorithm)
        )
34 35 36

    if attrs['data_format'] == 'NHWC':
        input_ = np.transpose(input_, [0, 4, 1, 2, 3])
C
chengduoZH 已提交
37
    in_n, in_c, in_d, in_h, in_w = input_.shape
38 39
    f_c, f_out_c, f_d, f_h, f_w = filter_.shape
    groups = attrs['groups']
C
chengduoZH 已提交
40
    assert in_c == f_c
41
    out_c = f_out_c * groups
M
minqiyang 已提交
42
    sub_in_c = in_c // groups
C
chengduoZH 已提交
43

44 45 46 47 48
    stride, pad, dilations = (
        attrs['strides'],
        attrs['paddings'],
        attrs['dilations'],
    )
C
chengduoZH 已提交
49

50 51
    def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride):
        padding = []
52 53 54
        for input_size, filter_size, stride_size in zip(
            input_shape, kernel_size, kernel_stride
        ):
55
            out_size = int((input_size + stride_size - 1) / stride_size)
56
            pad_sum = np.max(
57 58
                ((out_size - 1) * stride_size + filter_size - input_size, 0)
            )
59 60 61 62 63 64 65 66 67 68
            pad_0 = int(pad_sum / 2)
            pad_1 = int(pad_sum - pad_0)
            padding.append(pad_0)
            padding.append(pad_1)
        return padding

    ksize = filter_.shape[2:5]
    if padding_algorithm == "VALID":
        pad = [0, 0, 0, 0, 0, 0]
    elif padding_algorithm == "SAME":
69 70
        dilations = [1, 1, 1]
        input_data_shape = input_.shape[2:5]
71 72 73 74 75 76 77 78 79 80
        pad = _get_padding_with_SAME(input_data_shape, ksize, stride)

    pad_d_0, pad_d_1 = pad[0], pad[0]
    pad_h_0, pad_h_1 = pad[1], pad[1]
    pad_w_0, pad_w_1 = pad[2], pad[2]
    if len(pad) == 6:
        pad_d_0, pad_d_1 = pad[0], pad[1]
        pad_h_0, pad_h_1 = pad[2], pad[3]
        pad_w_0, pad_w_1 = pad[4], pad[5]

C
chengduoZH 已提交
81 82 83 84 85 86
    d_bolck_d = dilations[0] * (f_d - 1) + 1
    d_bolck_h = dilations[1] * (f_h - 1) + 1
    d_bolck_w = dilations[2] * (f_w - 1) + 1
    out_d = (in_d - 1) * stride[0] + d_bolck_d
    out_h = (in_h - 1) * stride[1] + d_bolck_h
    out_w = (in_w - 1) * stride[2] + d_bolck_w
C
chengduoZH 已提交
87 88 89 90 91 92
    out = np.zeros((in_n, out_c, out_d, out_h, out_w))

    for n in range(in_n):
        for d in range(in_d):
            for i in range(in_h):
                for j in range(in_w):
93
                    for g in range(groups):
94 95 96 97 98 99
                        input_masked = input_[
                            n, g * sub_in_c : (g + 1) * sub_in_c, d, i, j
                        ]  # (c)
                        input_masked = np.reshape(
                            input_masked, (sub_in_c, 1, 1, 1)
                        )
100 101 102
                        input_masked = np.tile(input_masked, (1, f_d, f_h, f_w))

                        for k in range(f_out_c):
103 104 105 106 107 108 109 110 111 112 113
                            tmp_out = np.sum(
                                input_masked
                                * filter_[
                                    g * sub_in_c : (g + 1) * sub_in_c,
                                    k,
                                    :,
                                    :,
                                    :,
                                ],
                                axis=0,
                            )
114 115 116
                            d1, d2 = d * stride[0], d * stride[0] + d_bolck_d
                            i1, i2 = i * stride[1], i * stride[1] + d_bolck_h
                            j1, j2 = j * stride[2], j * stride[2] + d_bolck_w
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
                            out[
                                n,
                                g * f_out_c + k,
                                d1 : d2 : dilations[0],
                                i1 : i2 : dilations[1],
                                j1 : j2 : dilations[2],
                            ] += tmp_out

    out = out[
        :,
        :,
        pad_d_0 : out_d - pad_d_1,
        pad_h_0 : out_h - pad_h_1,
        pad_w_0 : out_w - pad_w_1,
    ]
132 133
    if attrs['data_format'] == 'NHWC':
        out = np.transpose(out, [0, 2, 3, 4, 1])
C
chengduoZH 已提交
134 135 136
    return out


C
cnn 已提交
137
class TestConv3DTransposeOp(OpTest):
C
chengduoZH 已提交
138 139
    def setUp(self):
        # init as conv transpose
140
        self.use_cudnn = False
141 142
        self.check_no_input = False
        self.check_no_filter = False
143 144 145
        self.data_format = 'NCHW'
        self.pad = [0, 0, 0]
        self.padding_algorithm = "EXPLICIT"
C
chengduoZH 已提交
146 147 148 149 150 151 152 153 154 155
        self.init_op_type()
        self.init_test_case()

        input_ = np.random.random(self.input_size).astype("float32")
        filter_ = np.random.random(self.filter_size).astype("float32")

        self.inputs = {'Input': input_, 'Filter': filter_}
        self.attrs = {
            'strides': self.stride,
            'paddings': self.pad,
156
            'padding_algorithm': self.padding_algorithm,
157
            'dilations': self.dilations,
158
            'groups': self.groups,
159
            'use_cudnn': self.use_cudnn,
160
            'data_format': self.data_format,
C
chengduoZH 已提交
161
        }
C
chengduoZH 已提交
162

163 164 165
        output = conv3dtranspose_forward_naive(
            input_, filter_, self.attrs
        ).astype("float32")
C
chengduoZH 已提交
166

C
chengduoZH 已提交
167 168 169
        self.outputs = {'Output': output}

    def test_check_output(self):
170 171 172 173 174
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)
        else:
            self.check_output()
C
chengduoZH 已提交
175 176

    def test_check_grad(self):
177 178
        if self.use_cudnn:
            place = core.CUDAPlace(0)
179 180 181 182 183 184
            self.check_grad_with_place(
                place,
                set(['Input', 'Filter']),
                'Output',
                max_relative_error=0.03,
            )
185
        else:
186 187 188
            self.check_grad(
                set(['Input', 'Filter']), 'Output', max_relative_error=0.03
            )
C
chengduoZH 已提交
189 190

    def test_check_grad_no_filter(self):
191 192
        if self.use_cudnn:
            place = core.CUDAPlace(0)
193 194 195 196 197 198 199
            self.check_grad_with_place(
                place,
                ['Input'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Filter']),
            )
200
        elif self.check_no_filter:
201 202 203 204 205 206
            self.check_grad(
                ['Input'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Filter']),
            )
C
chengduoZH 已提交
207 208

    def test_check_grad_no_input(self):
209 210
        if self.use_cudnn:
            place = core.CUDAPlace(0)
211 212 213 214 215 216 217
            self.check_grad_with_place(
                place,
                ['Filter'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Input']),
            )
218
        elif self.check_no_input:
219 220 221 222 223 224
            self.check_grad(
                ['Filter'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Input']),
            )
C
chengduoZH 已提交
225 226 227 228 229

    def init_test_case(self):
        self.pad = [0, 0, 0]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
230
        self.groups = 1
C
chengduoZH 已提交
231
        self.input_size = [2, 3, 5, 5, 5]  # NCDHW
C
chengduoZH 已提交
232 233 234 235
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3, 3]

    def init_op_type(self):
C
chengduoZH 已提交
236
        self.op_type = "conv3d_transpose"
C
chengduoZH 已提交
237 238


C
cnn 已提交
239
class TestWithSymmetricPad(TestConv3DTransposeOp):
C
chengduoZH 已提交
240
    def init_test_case(self):
241
        self.check_no_input = True
C
chengduoZH 已提交
242 243 244
        self.pad = [1, 1, 1]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
245
        self.groups = 1
K
Kaipeng Deng 已提交
246
        self.input_size = [1, 2, 5, 5, 5]  # NCDHW
C
chengduoZH 已提交
247 248 249 250
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3, 3]


C
cnn 已提交
251
class TestWithAsymmetricPad(TestConv3DTransposeOp):
252 253 254 255 256
    def init_test_case(self):
        self.pad = [1, 0, 1, 0, 1, 2]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
        self.groups = 1
K
Kaipeng Deng 已提交
257
        self.input_size = [1, 2, 5, 5, 5]  # NCDHW
258 259 260 261
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3, 3]


C
cnn 已提交
262
class TestWithSAMEPad(TestConv3DTransposeOp):
263
    def init_test_case(self):
264 265
        self.stride = [1, 1, 2]
        self.dilations = [1, 2, 1]
266
        self.groups = 1
K
Kaipeng Deng 已提交
267
        self.input_size = [1, 2, 5, 5, 6]  # NCDHW
268
        f_c = self.input_size[1]
269
        self.filter_size = [f_c, 6, 3, 3, 4]
270 271 272
        self.padding_algorithm = 'SAME'


C
cnn 已提交
273
class TestWithVALIDPad(TestConv3DTransposeOp):
274
    def init_test_case(self):
275
        self.stride = [2, 1, 1]
276 277
        self.dilations = [1, 1, 1]
        self.groups = 1
K
Kaipeng Deng 已提交
278
        self.input_size = [1, 2, 5, 5, 5]  # NCDHW
279
        f_c = self.input_size[1]
280
        self.filter_size = [f_c, 6, 3, 4, 3]
281 282 283
        self.padding_algorithm = 'VALID'


C
cnn 已提交
284
class TestWithStride(TestConv3DTransposeOp):
285
    def init_test_case(self):
286
        self.check_no_filter = True
287
        self.pad = [1, 1, 1]
288
        self.stride = [2, 2, 2]
289
        self.dilations = [1, 1, 1]
290
        self.groups = 1
K
Kaipeng Deng 已提交
291
        self.input_size = [1, 2, 5, 5, 5]  # NCDHW
292
        f_c = self.input_size[1]
293
        self.filter_size = [f_c, 6, 3, 3, 3]
294 295


C
cnn 已提交
296
class TestWithGroups(TestConv3DTransposeOp):
C
chengduoZH 已提交
297 298
    def init_test_case(self):
        self.pad = [1, 1, 1]
299
        self.stride = [1, 1, 1]
C
chengduoZH 已提交
300
        self.dilations = [1, 1, 1]
301
        self.groups = 2
K
Kaipeng Deng 已提交
302
        self.input_size = [1, 2, 5, 5, 5]  # NCHW
C
chengduoZH 已提交
303
        f_c = self.input_size[1]
304
        self.filter_size = [f_c, 3, 3, 3, 3]
C
chengduoZH 已提交
305 306


C
cnn 已提交
307
class TestWithDilation(TestConv3DTransposeOp):
C
chengduoZH 已提交
308 309 310 311
    def init_test_case(self):
        self.pad = [1, 1, 1]
        self.stride = [1, 1, 1]
        self.dilations = [2, 2, 2]
312
        self.groups = 1
K
Kaipeng Deng 已提交
313
        self.input_size = [1, 2, 5, 5, 5]  # NCDHW
C
chengduoZH 已提交
314 315 316 317
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3, 3]


C
cnn 已提交
318
class Test_NHWC(TestConv3DTransposeOp):
319 320 321 322 323
    def init_test_case(self):
        self.pad = [0, 0, 0]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
        self.groups = 1
K
Kaipeng Deng 已提交
324
        self.input_size = [1, 5, 5, 5, 2]  # NDHWC
325 326 327 328 329
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3, 3]
        self.data_format = 'NHWC'


C
chengduoZH 已提交
330
# ------------ test_cudnn ------------
331 332 333
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
cnn 已提交
334
class TestCUDNN(TestConv3DTransposeOp):
C
chengduoZH 已提交
335
    def init_op_type(self):
336 337
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"
C
chengduoZH 已提交
338 339


340 341 342
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
343
class TestCUDNNWithSymmetricPad(TestWithSymmetricPad):
C
chengduoZH 已提交
344 345 346 347
    def init_test_case(self):
        self.pad = [1, 1, 1]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
348
        self.groups = 1
K
Kaipeng Deng 已提交
349
        self.input_size = [1, 2, 5, 5, 5]  # NCDHW
C
chengduoZH 已提交
350 351 352 353
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3, 3]

    def init_op_type(self):
354 355
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"
C
chengduoZH 已提交
356 357


358 359 360
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
361 362 363 364 365 366
class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
    def init_test_case(self):
        self.pad = [1, 1, 1, 0, 0, 2]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
        self.groups = 1
K
Kaipeng Deng 已提交
367
        self.input_size = [1, 2, 4, 4, 4]  # NCDHW
368 369 370 371 372 373 374 375
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"


376 377 378
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
379 380
class TestCUDNNWithSAMEPad(TestWithSAMEPad):
    def init_test_case(self):
381 382
        self.stride = [1, 1, 2]
        self.dilations = [1, 2, 1]
383
        self.groups = 1
K
Kaipeng Deng 已提交
384
        self.input_size = [1, 2, 5, 5, 5]  # NCDHW
385
        f_c = self.input_size[1]
386
        self.filter_size = [f_c, 6, 3, 4, 3]
387 388 389 390 391 392 393
        self.padding_algorithm = 'SAME'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"


394 395 396
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
397 398 399 400 401
class TestCUDNNWithVALIDPad(TestWithVALIDPad):
    def init_test_case(self):
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
        self.groups = 1
K
Kaipeng Deng 已提交
402
        self.input_size = [1, 2, 5, 5, 5]  # NCDHW
403 404 405 406 407 408 409 410 411
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3, 3]
        self.padding_algorithm = 'VALID'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"


412 413 414
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
415
class TestCUDNNWithStride(TestWithStride):
C
chengduoZH 已提交
416 417 418 419
    def init_test_case(self):
        self.pad = [1, 1, 1]
        self.stride = [2, 2, 2]
        self.dilations = [1, 1, 1]
420
        self.groups = 1
K
Kaipeng Deng 已提交
421
        self.input_size = [1, 2, 5, 5, 5]  # NCDHW
C
chengduoZH 已提交
422 423 424 425
        f_c = self.input_size[1]
        self.filter_size = [f_c, 6, 3, 3, 3]

    def init_op_type(self):
426 427
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"
C
chengduoZH 已提交
428 429


430 431 432
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
433 434 435 436 437 438
class TestCUDNNWithGroups(TestWithGroups):
    def init_test_case(self):
        self.pad = [1, 1, 1]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
        self.groups = 2
K
Kaipeng Deng 已提交
439
        self.input_size = [1, 2, 5, 5, 5]  # NCHW
440 441 442 443 444 445 446 447
        f_c = self.input_size[1]
        self.filter_size = [f_c, 3, 3, 3, 3]

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"


448 449
# Please Don't remove the following code.
# Currently, CI use cudnn V5.0 which not support dilation conv.
450
# class TestCUDNNWithDilation(TestWithDilation):
C
chengduoZH 已提交
451 452 453 454 455 456 457 458 459
#     def init_test_case(self):
#         self.pad = [1, 1, 1]
#         self.stride = [2, 2, 2]
#         self.dilations = [2, 2, 2]
#         self.input_size = [2, 3, 5, 5, 5]  # NCDHW
#         f_c = self.input_size[1]
#         self.filter_size = [f_c, 6, 3, 3, 3]
#
#     def init_op_type(self):
460
#         self.op_type = "conv3d_transpose"
C
chengduoZH 已提交
461

462

463 464 465
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
cnn 已提交
466
class TestCUDNN_NHWC(TestConv3DTransposeOp):
467 468 469 470 471
    def init_test_case(self):
        self.pad = [0, 0, 0]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
        self.groups = 1
K
Kaipeng Deng 已提交
472
        self.input_size = [1, 5, 5, 5, 2]  # NDHWC
473 474 475 476 477 478 479 480 481
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"


482 483 484
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
485 486 487 488 489 490
class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad):
    def init_test_case(self):
        self.pad = [1, 1, 1]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
        self.groups = 1
K
Kaipeng Deng 已提交
491
        self.input_size = [1, 5, 5, 5, 2]  # NDHWC
492 493 494 495 496 497 498 499 500
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"


501 502 503
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
504 505 506 507 508 509
class TestCUDNNWithAsymmetricPad_NHWC(TestWithAsymmetricPad):
    def init_test_case(self):
        self.pad = [1, 0, 1, 0, 0, 2]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
        self.groups = 1
K
Kaipeng Deng 已提交
510
        self.input_size = [1, 5, 5, 5, 2]  # NDHWC
511 512 513 514 515 516 517 518 519
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"


520 521 522
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
523 524 525 526 527 528
class TestCUDNNWithStride_NHWC(TestWithStride):
    def init_test_case(self):
        self.pad = [1, 1, 1]
        self.stride = [2, 2, 2]
        self.dilations = [1, 1, 1]
        self.groups = 1
K
Kaipeng Deng 已提交
529
        self.input_size = [1, 5, 5, 5, 2]  # NDHWC
530 531 532 533 534 535 536 537 538
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 6, 3, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"


539 540 541
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
542 543 544 545 546 547
class TestCUDNNWithGroups_NHWC(TestWithGroups):
    def init_test_case(self):
        self.pad = [1, 1, 1]
        self.stride = [1, 1, 1]
        self.dilations = [1, 1, 1]
        self.groups = 2
K
Kaipeng Deng 已提交
548
        self.input_size = [1, 5, 5, 5, 2]  # NDHWC
549 550 551 552 553 554 555 556 557
        f_c = self.input_size[-1]
        self.filter_size = [f_c, 3, 3, 3, 3]
        self.data_format = 'NHWC'

    def init_op_type(self):
        self.use_cudnn = True
        self.op_type = "conv3d_transpose"


558 559 560 561 562 563 564 565 566 567 568 569 570
class TestConv3dTranspose(unittest.TestCase):
    def error_weight_input(self):
        array = np.array([1], dtype=np.float32)
        x = paddle.to_tensor(
            np.reshape(array, [1, 1, 1, 1, 1]), dtype='float32'
        )
        weight = paddle.to_tensor(np.reshape(array, [1]), dtype='float32')
        paddle.nn.functional.conv3d_transpose(x, weight, bias=0)

    def test_type_error(self):
        self.assertRaises(ValueError, self.error_weight_input)


C
chengduoZH 已提交
571 572
if __name__ == '__main__':
    unittest.main()