test_conv2d_op.py 12.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17 18
import unittest
import numpy as np
D
dzhwinter 已提交
19

20
import paddle.fluid.core as core
21
from op_test import OpTest
22 23


C
chengduoZH 已提交
24 25 26 27 28
def conv2d_forward_naive(input, filter, group, conv_param):
    in_n, in_c, in_h, in_w = input.shape
    out_c, f_c, f_h, f_w = filter.shape
    assert f_c * group == in_c
    assert np.mod(out_c, group) == 0
M
minqiyang 已提交
29
    sub_out_c = out_c // group
C
chengduoZH 已提交
30

C
chengduoZH 已提交
31 32
    stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[
        'dilation']
M
minqiyang 已提交
33 34
    out_h = 1 + (in_h + 2 * pad[0] - (dilation[0] * (f_h - 1) + 1)) // stride[0]
    out_w = 1 + (in_w + 2 * pad[1] - (dilation[1] * (f_w - 1) + 1)) // stride[1]
C
chengduoZH 已提交
35 36
    out = np.zeros((in_n, out_c, out_h, out_w))

武毅 已提交
37 38
    d_bolck_h = (dilation[0] * (f_h - 1) + 1)
    d_bolck_w = (dilation[1] * (f_w - 1) + 1)
C
chengduoZH 已提交
39

C
chengduoZH 已提交
40
    input_pad = np.pad(input, ((0, ), (0, ), (pad[0], ), (pad[1], )),
C
chengduoZH 已提交
41 42
                       mode='constant',
                       constant_values=0)
C
chengduoZH 已提交
43 44 45 46 47

    filter_dilation = np.zeros((out_c, f_c, d_bolck_h, d_bolck_w))
    filter_dilation[:, :, 0:d_bolck_h:dilation[0], 0:d_bolck_w:dilation[
        1]] = filter

C
chengduoZH 已提交
48 49 50
    for i in range(out_h):
        for j in range(out_w):
            for g in range(group):
C
chengduoZH 已提交
51 52
                input_pad_masked = \
                    input_pad[:, g * f_c:(g + 1) * f_c,
C
chengduoZH 已提交
53 54
                    i * stride[0]:i * stride[0] + d_bolck_h,
                    j * stride[1]:j * stride[1] + d_bolck_w]
C
chengduoZH 已提交
55

C
chengduoZH 已提交
56 57
                f_sub = filter_dilation[g * sub_out_c:(g + 1) *
                                        sub_out_c, :, :, :]
C
chengduoZH 已提交
58
                for k in range(sub_out_c):
C
chengduoZH 已提交
59 60 61
                    out[:, g * sub_out_c + k, i, j] = \
                        np.sum(input_pad_masked * f_sub[k, :, :, :],
                               axis=(1, 2, 3))
C
chengduoZH 已提交
62

63
    return out, in_n, out_h, out_w, out_c
C
chengduoZH 已提交
64 65


H
hedaoyuan 已提交
66
class TestConv2dOp(OpTest):
67
    def setUp(self):
K
Kexin Zhao 已提交
68
        self.op_type = "conv2d"
69
        self.use_cudnn = False
70
        self.exhaustive_search = False
71
        self.use_cuda = False
72
        self.use_mkldnn = False
73
        self.data_format = "AnyLayout"
K
Kexin Zhao 已提交
74
        self.dtype = np.float32
K
Kexin Zhao 已提交
75
        self.init_kernel_type()
C
chengduoZH 已提交
76
        self.init_group()
C
chengduoZH 已提交
77
        self.init_dilation()
C
chengduoZH 已提交
78
        self.init_test_case()
C
chengduoZH 已提交
79

C
chengduoZH 已提交
80 81 82 83 84
        conv2d_param = {
            'stride': self.stride,
            'pad': self.pad,
            'dilation': self.dilations
        }
85

K
Kexin Zhao 已提交
86 87
        input = np.random.random(self.input_size).astype(self.dtype)
        filter = np.random.random(self.filter_size).astype(self.dtype)
88 89 90
        output, _, _, _, _ = conv2d_forward_naive(input, filter, self.groups,
                                                  conv2d_param)
        output = output.astype(self.dtype)
K
Kexin Zhao 已提交
91 92

        self.inputs = {
K
Kexin Zhao 已提交
93 94
            'Input': OpTest.np_dtype_to_fluid_dtype(input),
            'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
K
Kexin Zhao 已提交
95
        }
H
hedaoyuan 已提交
96
        self.attrs = {
C
chengduoZH 已提交
97 98
            'strides': self.stride,
            'paddings': self.pad,
C
chengduoZH 已提交
99
            'groups': self.groups,
100
            'dilations': self.dilations,
101
            'use_cudnn': self.use_cudnn,
102
            'use_mkldnn': self.use_mkldnn,
103 104
            'data_format': self.data_format,
            'exhaustive_search': self.exhaustive_search
H
hedaoyuan 已提交
105
        }
106 107
        self.outputs = {'Output': output}

108 109 110
    def testcuda(self):
        return core.is_compiled_with_cuda() and (self.use_cudnn or
                                                 self.use_cuda)
111

H
hedaoyuan 已提交
112
    def test_check_output(self):
113
        place = core.CUDAPlace(0) if self.testcuda() else core.CPUPlace()
114
        self.check_output_with_place(place, atol=1e-5)
H
hedaoyuan 已提交
115

H
hedaoyuan 已提交
116
    def test_check_grad(self):
K
Kexin Zhao 已提交
117 118
        if self.dtype == np.float16:
            return
119
        place = core.CUDAPlace(0) if self.testcuda() else core.CPUPlace()
120
        self.check_grad_with_place(
Y
Yu Yang 已提交
121
            place, {'Input', 'Filter'}, 'Output', max_relative_error=0.02)
H
hedaoyuan 已提交
122

123
    def test_check_grad_no_filter(self):
K
Kexin Zhao 已提交
124 125
        if self.dtype == np.float16:
            return
126
        place = core.CUDAPlace(0) if self.testcuda() else core.CPUPlace()
127 128 129 130 131
        self.check_grad_with_place(
            place, ['Input'],
            'Output',
            max_relative_error=0.02,
            no_grad_set=set(['Filter']))
132 133

    def test_check_grad_no_input(self):
K
Kexin Zhao 已提交
134 135
        if self.dtype == np.float16:
            return
136
        place = core.CUDAPlace(0) if self.testcuda() else core.CPUPlace()
137 138 139 140 141
        self.check_grad_with_place(
            place, ['Filter'],
            'Output',
            max_relative_error=0.02,
            no_grad_set=set(['Input']))
142

C
chengduoZH 已提交
143 144 145 146 147
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
148
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
149 150
        self.filter_size = [6, f_c, 3, 3]

C
chengduoZH 已提交
151 152 153
    def init_dilation(self):
        self.dilations = [1, 1]

C
chengduoZH 已提交
154
    def init_group(self):
H
hedaoyuan 已提交
155 156
        self.groups = 1

K
Kexin Zhao 已提交
157 158
    def init_kernel_type(self):
        pass
武毅 已提交
159

H
hedaoyuan 已提交
160

C
chengduoZH 已提交
161 162 163 164 165 166
class TestWithPad(TestConv2dOp):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
167
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
168 169 170 171 172 173 174 175 176
        self.filter_size = [6, f_c, 3, 3]


class TestWithStride(TestConv2dOp):
    def init_test_case(self):
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.input_size = [2, 3, 6, 6]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
177
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
178 179 180
        self.filter_size = [6, f_c, 3, 3]


H
hedaoyuan 已提交
181
class TestWithGroup(TestConv2dOp):
C
chengduoZH 已提交
182
    def init_group(self):
H
hedaoyuan 已提交
183 184
        self.groups = 3

武毅 已提交
185

C
chengduoZH 已提交
186 187 188 189 190 191
class TestWith1x1(TestConv2dOp):
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
192
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
193 194 195 196 197 198
        self.filter_size = [6, f_c, 1, 1]

    def init_group(self):
        self.groups = 3


C
chengduoZH 已提交
199 200 201 202 203 204
class TestWithDilation(TestConv2dOp):
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.input_size = [2, 3, 10, 10]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
205
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
206
        self.filter_size = [6, f_c, 3, 3]
C
chengduoZH 已提交
207

C
chengduoZH 已提交
208 209
    def init_dilation(self):
        self.dilations = [2, 2]
C
chengduoZH 已提交
210

C
chengduoZH 已提交
211
    def init_group(self):
C
chengduoZH 已提交
212
        self.groups = 3
武毅 已提交
213

C
chengduoZH 已提交
214

215 216 217 218 219 220
class TestWithInput1x1Filter1x1(TestConv2dOp):
    def init_test_case(self):
        self.pad = [0, 0]
        self.stride = [1, 1]
        self.input_size = [2, 3, 1, 1]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
221
        f_c = self.input_size[1] // self.groups
222 223 224 225 226 227
        self.filter_size = [6, f_c, 1, 1]

    def init_group(self):
        self.groups = 3


228
#----------------Conv2dCUDNN----------------
C
chengduoZH 已提交
229

K
Kexin Zhao 已提交
230

C
chengduo 已提交
231
def create_test_cudnn_class(parent):
C
chengduo 已提交
232 233 234 235 236
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestCUDNNCase(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
K
Kexin Zhao 已提交
237

C
chengduo 已提交
238
    cls_name = "{0}_{1}".format(parent.__name__, "CUDNN")
C
chengduo 已提交
239 240
    TestCUDNNCase.__name__ = cls_name
    globals()[cls_name] = TestCUDNNCase
K
Kexin Zhao 已提交
241

K
Kexin Zhao 已提交
242

C
chengduo 已提交
243 244 245 246 247 248
create_test_cudnn_class(TestConv2dOp)
create_test_cudnn_class(TestWithPad)
create_test_cudnn_class(TestWithStride)
create_test_cudnn_class(TestWithGroup)
create_test_cudnn_class(TestWith1x1)
create_test_cudnn_class(TestWithInput1x1Filter1x1)
K
Kexin Zhao 已提交
249

C
chengduo 已提交
250
#----------------Conv2dCUDNN----------------
K
Kexin Zhao 已提交
251

C
chengduoZH 已提交
252

C
chengduo 已提交
253
def create_test_cudnn_fp16_class(parent, grad_check=True):
C
chengduo 已提交
254 255 256 257 258 259
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestConv2DCUDNNFp16(parent):
        def init_kernel_type(self):
            self.use_cudnn = True
            self.dtype = np.float16
武毅 已提交
260

C
chengduo 已提交
261 262 263 264 265
        def test_check_output(self):
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
                    self.check_output_with_place(place, atol=2e-2)
K
Kexin Zhao 已提交
266

C
chengduo 已提交
267
        def test_check_grad_no_filter(self):
K
Kexin Zhao 已提交
268
            place = core.CUDAPlace(0)
C
chengduo 已提交
269 270 271 272 273 274 275 276
            if core.is_float16_supported(place) and grad_check:
                self.check_grad_with_place(
                    place, ['Input'],
                    'Output',
                    max_relative_error=0.02,
                    no_grad_set=set(['Filter']))

        def test_check_grad_no_input(self):
K
Kexin Zhao 已提交
277
            place = core.CUDAPlace(0)
C
chengduo 已提交
278 279 280 281 282 283 284
            if core.is_float16_supported(place) and grad_check:
                self.check_grad_with_place(
                    place, ['Filter'],
                    'Output',
                    max_relative_error=0.02,
                    no_grad_set=set(['Input']))

C
chengduo 已提交
285
    cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16")
C
chengduo 已提交
286 287 288 289
    TestConv2DCUDNNFp16.__name__ = cls_name
    globals()[cls_name] = TestConv2DCUDNNFp16


C
chengduo 已提交
290 291 292 293 294 295
create_test_cudnn_fp16_class(TestConv2dOp, grad_check=False)
create_test_cudnn_fp16_class(TestWithPad, grad_check=False)
create_test_cudnn_fp16_class(TestWithStride, grad_check=False)
create_test_cudnn_fp16_class(TestWithGroup, grad_check=False)
create_test_cudnn_fp16_class(TestWith1x1, grad_check=False)
create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False)
C
chengduo 已提交
296 297

# -------TestDepthwiseConv
K
Kexin Zhao 已提交
298 299


300 301
class TestDepthwiseConv(TestConv2dOp):
    def init_test_case(self):
302
        self.use_cuda = True
303 304 305 306 307
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.input_size = [2, 3, 5, 5]  # NCHW
        self.groups = 3
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
308
        f_c = self.input_size[1] // self.groups
309
        self.filter_size = [3, f_c, 3, 3]
310
        self.op_type = "depthwise_conv2d"
311 312 313 314


class TestDepthwiseConv2(TestConv2dOp):
    def init_test_case(self):
315 316 317 318 319 320 321 322 323 324 325 326 327 328
        self.use_cuda = True
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        self.groups = 3
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [3, f_c, 3, 3]
        self.op_type = "depthwise_conv2d"


class TestDepthwiseConv3(TestConv2dOp):
    def init_test_case(self):
        self.use_cuda = True
329 330 331 332 333
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        self.groups = 3
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
334
        f_c = self.input_size[1] // self.groups
335
        self.filter_size = [6, f_c, 3, 3]
336
        self.op_type = "depthwise_conv2d"
337 338


339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
class TestDepthwiseConvWithDilation(TestConv2dOp):
    def init_test_case(self):
        self.use_cuda = True
        self.pad = [1, 1]
        self.stride = [2, 2]
        self.input_size = [2, 3, 5, 5]  # NCHW
        self.groups = 3
        self.dilations = [2, 2]
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [6, f_c, 3, 3]
        self.op_type = "depthwise_conv2d"


class TestDepthwiseConvWithDilation2(TestConv2dOp):
    def init_test_case(self):
        self.use_cuda = True
        self.pad = [1, 1]
        self.stride = [1, 1]
        self.input_size = [2, 3, 5, 5]  # NCHW
        self.groups = 3
        self.dilations = [2, 2]
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] // self.groups
        self.filter_size = [6, f_c, 3, 3]
        self.op_type = "depthwise_conv2d"


367 368 369 370 371 372
class TestCUDNNExhaustiveSearch(TestConv2dOp):
    def init_kernel_type(self):
        self.use_cudnn = True
        self.exhaustive_search = True


373 374
# Please Don't remove the following code.
# Currently, CI use cudnn V5.0 which not support dilation conv.
375
# class TestCUDNNWithDilation(TestWithDilation):
C
chengduoZH 已提交
376 377 378
#     def init_op_type(self):
#         self.op_type = "conv_cudnn"

379 380
if __name__ == '__main__':
    unittest.main()