test_conv3d_op.py 10.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

C
chengduoZH 已提交
17 18
import unittest
import numpy as np
19

20
import paddle.fluid.core as core
21
from op_test import OpTest
C
chengduoZH 已提交
22 23


24 25 26 27 28
def conv3d_forward_naive(input, filter, group, conv_param):
    in_n, in_c, in_d, in_h, in_w = input.shape
    out_c, f_c, f_d, f_h, f_w = filter.shape
    assert f_c * group == in_c
    assert np.mod(out_c, group) == 0
M
minqiyang 已提交
29
    sub_out_c = out_c // group
30

C
chengduoZH 已提交
31 32 33
    stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[
        'dilations']

M
minqiyang 已提交
34 35 36
    out_d = 1 + (in_d + 2 * pad[0] - (dilation[0] * (f_d - 1) + 1)) // stride[0]
    out_h = 1 + (in_h + 2 * pad[1] - (dilation[1] * (f_h - 1) + 1)) // stride[1]
    out_w = 1 + (in_w + 2 * pad[2] - (dilation[2] * (f_w - 1) + 1)) // stride[2]
C
chengduoZH 已提交
37

38 39
    out = np.zeros((in_n, out_c, out_d, out_h, out_w))

C
chengduoZH 已提交
40 41 42 43
    d_bolck_d = (dilation[0] * (f_d - 1) + 1)
    d_bolck_h = (dilation[1] * (f_h - 1) + 1)
    d_bolck_w = (dilation[2] * (f_w - 1) + 1)

44 45 46 47
    input_pad = np.pad(input, ((0, ), (0, ), (pad[0], ), (pad[1], ),
                               (pad[2], )),
                       mode='constant',
                       constant_values=0)
C
chengduoZH 已提交
48 49 50 51 52

    filter_dilation = np.zeros((out_c, f_c, d_bolck_d, d_bolck_h, d_bolck_w))
    filter_dilation[:, :, 0:d_bolck_d:dilation[0], 0:d_bolck_h:dilation[1], 0:
                    d_bolck_w:dilation[2]] = filter

53 54 55 56 57 58
    for d in range(out_d):
        for i in range(out_h):
            for j in range(out_w):
                for g in range(group):
                    input_pad_masked = \
                        input_pad[:, g * f_c:(g + 1) * f_c,
C
chengduoZH 已提交
59 60 61 62 63 64
                        d * stride[0]:d * stride[0] + d_bolck_d,
                        i * stride[1]:i * stride[1] + d_bolck_h,
                        j * stride[2]:j * stride[2] + d_bolck_w]

                    f_sub = filter_dilation[g * sub_out_c:(g + 1) *
                                            sub_out_c, :, :, :, :]
65 66 67
                    for k in range(sub_out_c):
                        out[:, g * sub_out_c + k, d, i, j] = \
                            np.sum(input_pad_masked * f_sub[k, :, :, :, :],
C
chengduoZH 已提交
68
                                   axis=(1, 2, 3, 4))
69 70 71 72

    return out


C
chengduoZH 已提交
73 74
class TestConv3dOp(OpTest):
    def setUp(self):
K
Kexin Zhao 已提交
75
        self.op_type = "conv3d"
76
        self.use_cudnn = False
77 78
        self.use_mkldnn = False
        self.data_format = "AnyLayout"
K
Kexin Zhao 已提交
79 80
        self.dtype = np.float32
        self.init_kernel_type()
81
        self.init_group()
C
chengduoZH 已提交
82
        self.init_dilation()
83 84
        self.init_test_case()

C
chengduoZH 已提交
85 86 87
        conv3d_param = {
            'stride': self.stride,
            'pad': self.pad,
88
            'dilations': self.dilations
C
chengduoZH 已提交
89
        }
K
Kexin Zhao 已提交
90 91 92

        input = np.random.random(self.input_size).astype(self.dtype)
        filter = np.random.random(self.filter_size).astype(self.dtype)
C
chengduoZH 已提交
93
        output = conv3d_forward_naive(input, filter, self.groups,
K
Kexin Zhao 已提交
94
                                      conv3d_param).astype(self.dtype)
C
chengduoZH 已提交
95

K
Kexin Zhao 已提交
96 97 98 99
        self.inputs = {
            'Input': OpTest.np_dtype_to_fluid_dtype(input),
            'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
        }
C
chengduoZH 已提交
100
        self.attrs = {
101 102
            'strides': self.stride,
            'paddings': self.pad,
C
chengduoZH 已提交
103
            'groups': self.groups,
K
Kexin Zhao 已提交
104
            'dilations': self.dilations,
105 106 107
            'use_cudnn': self.use_cudnn,
            'use_mkldnn': self.use_mkldnn,
            'data_format': self.data_format
C
chengduoZH 已提交
108 109 110
        }
        self.outputs = {'Output': output}

111 112 113
    def testcudnn(self):
        return core.is_compiled_with_cuda() and self.use_cudnn

C
chengduoZH 已提交
114
    def test_check_output(self):
115
        if self.testcudnn():
116 117 118 119
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)
        else:
            self.check_output()
C
chengduoZH 已提交
120 121

    def test_check_grad(self):
K
Kexin Zhao 已提交
122 123
        if self.dtype == np.float16:
            return
124
        if self.testcudnn():
125 126 127 128 129 130 131 132 133
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place,
                set(['Input', 'Filter']),
                'Output',
                max_relative_error=0.03)
        else:
            self.check_grad(
                set(['Input', 'Filter']), 'Output', max_relative_error=0.03)
C
chengduoZH 已提交
134

C
chengduoZH 已提交
135
    def test_check_grad_no_filter(self):
K
Kexin Zhao 已提交
136 137
        if self.dtype == np.float16:
            return
138
        if self.testcudnn():
139 140 141 142 143 144 145 146 147 148 149 150
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place, ['Input'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Filter']))
        else:
            self.check_grad(
                ['Input'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Filter']))
C
chengduoZH 已提交
151 152

    def test_check_grad_no_input(self):
K
Kexin Zhao 已提交
153 154
        if self.dtype == np.float16:
            return
155
        if self.testcudnn():
156 157 158 159 160 161 162 163 164 165 166 167
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place, ['Filter'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Input']))
        else:
            self.check_grad(
                ['Filter'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Input']))
C
chengduoZH 已提交
168

169 170 171
    def init_test_case(self):
        self.pad = [0, 0, 0]
        self.stride = [1, 1, 1]
C
chengduoZH 已提交
172
        self.input_size = [2, 3, 4, 4, 4]  # NCDHW
173
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
174
        f_c = self.input_size[1] // self.groups
175 176
        self.filter_size = [6, f_c, 3, 3, 3]

C
chengduoZH 已提交
177 178 179
    def init_dilation(self):
        self.dilations = [1, 1, 1]

180
    def init_group(self):
C
chengduoZH 已提交
181 182
        self.groups = 1

K
Kexin Zhao 已提交
183 184
    def init_kernel_type(self):
        pass
185

C
chengduoZH 已提交
186

C
chengduoZH 已提交
187 188 189 190
class TestCase1(TestConv3dOp):
    def init_test_case(self):
        self.pad = [1, 1, 1]
        self.stride = [1, 1, 1]
C
chengduoZH 已提交
191
        self.input_size = [2, 3, 4, 4, 4]  # NCDHW
C
chengduoZH 已提交
192
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
193
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
194 195 196
        self.filter_size = [6, f_c, 3, 3, 3]


C
chengduoZH 已提交
197 198 199
class TestWithGroup1(TestConv3dOp):
    def init_group(self):
        self.groups = 3
C
chengduoZH 已提交
200 201


C
chengduoZH 已提交
202
class TestWithGroup2(TestCase1):
203
    def init_group(self):
C
chengduoZH 已提交
204 205
        self.groups = 3

206

C
chengduoZH 已提交
207 208 209 210 211 212
class TestWith1x1(TestConv3dOp):
    def init_test_case(self):
        self.pad = [0, 0, 0]
        self.stride = [1, 1, 1]
        self.input_size = [2, 3, 4, 4, 4]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
213
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
214 215 216 217
        self.filter_size = [6, f_c, 1, 1, 1]

    def init_dilation(self):
        self.dilations = [1, 1, 1]
C
chengduoZH 已提交
218

C
chengduoZH 已提交
219 220 221
    def init_group(self):
        self.groups = 3

C
chengduoZH 已提交
222

223 224 225 226 227 228
class TestWithInput1x1Filter1x1(TestConv3dOp):
    def init_test_case(self):
        self.pad = [0, 0, 0]
        self.stride = [1, 1, 1]
        self.input_size = [2, 3, 1, 1, 1]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
229
        f_c = self.input_size[1] // self.groups
230 231 232 233 234 235 236 237 238
        self.filter_size = [6, f_c, 1, 1, 1]

    def init_dilation(self):
        self.dilations = [1, 1, 1]

    def init_group(self):
        self.groups = 3


C
chengduoZH 已提交
239 240 241 242 243 244
class TestWithDilation(TestConv3dOp):
    def init_test_case(self):
        self.pad = [0, 0, 0]
        self.stride = [1, 1, 1]
        self.input_size = [2, 3, 6, 6, 6]  # NCDHW
        assert np.mod(self.input_size[1], self.groups) == 0
M
minqiyang 已提交
245
        f_c = self.input_size[1] // self.groups
C
chengduoZH 已提交
246 247 248 249 250 251 252
        self.filter_size = [6, f_c, 2, 2, 2]

    def init_dilation(self):
        self.dilations = [2, 2, 2]

    def init_group(self):
        self.groups = 3
C
chengduoZH 已提交
253

C
chengduoZH 已提交
254

K
Kexin Zhao 已提交
255
#----------------Conv3dCUDNN----------------
256
class TestCUDNN(TestConv3dOp):
K
Kexin Zhao 已提交
257
    def init_kernel_type(self):
258
        self.use_cudnn = True
K
Kexin Zhao 已提交
259 260 261 262 263 264 265 266 267 268 269 270


class TestFP16CUDNN(TestConv3dOp):
    def init_kernel_type(self):
        self.use_cudnn = True
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=2e-2)
武毅 已提交
271 272


273
class TestWithGroup1CUDNN(TestWithGroup1):
K
Kexin Zhao 已提交
274
    def init_kernel_type(self):
275
        self.use_cudnn = True
K
Kexin Zhao 已提交
276 277 278 279 280 281 282 283 284 285 286 287


class TestFP16WithGroup1CUDNN(TestWithGroup1):
    def init_kernel_type(self):
        self.use_cudnn = True
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=2e-2)
武毅 已提交
288 289


290
class TestWithGroup2CUDNN(TestWithGroup2):
K
Kexin Zhao 已提交
291
    def init_kernel_type(self):
292
        self.use_cudnn = True
K
Kexin Zhao 已提交
293 294 295 296 297 298 299 300 301 302 303 304


class TestFP16WithGroup2CUDNN(TestWithGroup2):
    def init_kernel_type(self):
        self.use_cudnn = True
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=2e-2)
武毅 已提交
305 306


307
class TestWith1x1CUDNN(TestWith1x1):
K
Kexin Zhao 已提交
308
    def init_kernel_type(self):
309
        self.use_cudnn = True
K
Kexin Zhao 已提交
310 311 312 313 314 315 316 317 318 319 320 321


class TestFP16With1x1CUDNN(TestWith1x1):
    def init_kernel_type(self):
        self.use_cudnn = True
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=2e-2)
武毅 已提交
322 323


324
class TestWithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1):
K
Kexin Zhao 已提交
325
    def init_kernel_type(self):
326
        self.use_cudnn = True
K
Kexin Zhao 已提交
327 328 329 330 331 332 333 334 335 336 337 338


class TestFP16WithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1):
    def init_kernel_type(self):
        self.use_cudnn = True
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=2e-2)
339 340


341 342 343 344 345 346
class TestCUDNNExhaustiveSearch(TestCUDNN):
    def init_kernel_type(self):
        self.use_cudnn = True
        self.exhaustive_search = True


武毅 已提交
347 348
# FIXME(typhoonzero): find a way to determine if
# using cudnn > 6 in python
349
# class TestWithDilationCUDNN(TestWithDilation):
武毅 已提交
350
#     def init_op_type(self):
351
#         self.op_type = "conv3d"
武毅 已提交
352

C
chengduoZH 已提交
353 354
if __name__ == '__main__':
    unittest.main()