test_conv3d_op.py 7.7 KB
Newer Older
D
dzhwinter 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
C
chengduoZH 已提交
14 15
import unittest
import numpy as np
16 17

import paddle.v2.fluid.core as core
C
chengduoZH 已提交
18 19 20
from op_test import OpTest


21 22 23 24 25 26 27
def conv3d_forward_naive(input, filter, group, conv_param):
    in_n, in_c, in_d, in_h, in_w = input.shape
    out_c, f_c, f_d, f_h, f_w = filter.shape
    assert f_c * group == in_c
    assert np.mod(out_c, group) == 0
    sub_out_c = out_c / group

C
chengduoZH 已提交
28 29 30 31 32 33 34
    stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[
        'dilations']

    out_d = 1 + (in_d + 2 * pad[0] - (dilation[0] * (f_d - 1) + 1)) / stride[0]
    out_h = 1 + (in_h + 2 * pad[1] - (dilation[1] * (f_h - 1) + 1)) / stride[1]
    out_w = 1 + (in_w + 2 * pad[2] - (dilation[2] * (f_w - 1) + 1)) / stride[2]

35 36
    out = np.zeros((in_n, out_c, out_d, out_h, out_w))

C
chengduoZH 已提交
37 38 39 40
    d_bolck_d = (dilation[0] * (f_d - 1) + 1)
    d_bolck_h = (dilation[1] * (f_h - 1) + 1)
    d_bolck_w = (dilation[2] * (f_w - 1) + 1)

41 42 43 44
    input_pad = np.pad(input, ((0, ), (0, ), (pad[0], ), (pad[1], ),
                               (pad[2], )),
                       mode='constant',
                       constant_values=0)
C
chengduoZH 已提交
45 46 47 48 49

    filter_dilation = np.zeros((out_c, f_c, d_bolck_d, d_bolck_h, d_bolck_w))
    filter_dilation[:, :, 0:d_bolck_d:dilation[0], 0:d_bolck_h:dilation[1], 0:
                    d_bolck_w:dilation[2]] = filter

50 51 52 53 54 55
    for d in range(out_d):
        for i in range(out_h):
            for j in range(out_w):
                for g in range(group):
                    input_pad_masked = \
                        input_pad[:, g * f_c:(g + 1) * f_c,
C
chengduoZH 已提交
56 57 58 59 60 61
                        d * stride[0]:d * stride[0] + d_bolck_d,
                        i * stride[1]:i * stride[1] + d_bolck_h,
                        j * stride[2]:j * stride[2] + d_bolck_w]

                    f_sub = filter_dilation[g * sub_out_c:(g + 1) *
                                            sub_out_c, :, :, :, :]
62 63 64
                    for k in range(sub_out_c):
                        out[:, g * sub_out_c + k, d, i, j] = \
                            np.sum(input_pad_masked * f_sub[k, :, :, :, :],
C
chengduoZH 已提交
65
                                   axis=(1, 2, 3, 4))
66 67 68 69

    return out


C
chengduoZH 已提交
70 71
class TestConv3dOp(OpTest):
    def setUp(self):
72
        self.use_cudnn = False
73 74
        self.init_group()
        self.init_op_type()
C
chengduoZH 已提交
75
        self.init_dilation()
76 77
        self.init_test_case()

C
chengduoZH 已提交
78 79 80
        conv3d_param = {
            'stride': self.stride,
            'pad': self.pad,
81 82 83
            'dilations': self.dilations,
            'use_cudnn': self.use_cudnn,
            'data_format': 'AnyLayout'  # TODO(dzhwinter) : should be fix latter
C
chengduoZH 已提交
84
        }
85 86
        input = np.random.random(self.input_size).astype("float32")
        filter = np.random.random(self.filter_size).astype("float32")
C
chengduoZH 已提交
87 88
        output = conv3d_forward_naive(input, filter, self.groups,
                                      conv3d_param).astype("float32")
C
chengduoZH 已提交
89 90 91

        self.inputs = {'Input': input, 'Filter': filter}
        self.attrs = {
92 93
            'strides': self.stride,
            'paddings': self.pad,
C
chengduoZH 已提交
94 95
            'groups': self.groups,
            'dilations': self.dilations
C
chengduoZH 已提交
96 97 98 99
        }
        self.outputs = {'Output': output}

    def test_check_output(self):
100 101 102 103 104
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)
        else:
            self.check_output()
C
chengduoZH 已提交
105 106

    def test_check_grad(self):
107 108 109 110 111 112 113 114 115 116
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place,
                set(['Input', 'Filter']),
                'Output',
                max_relative_error=0.03)
        else:
            self.check_grad(
                set(['Input', 'Filter']), 'Output', max_relative_error=0.03)
C
chengduoZH 已提交
117

C
chengduoZH 已提交
118
    def test_check_grad_no_filter(self):
119 120 121 122 123 124 125 126 127 128 129 130 131
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place, ['Input'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Filter']))
        else:
            self.check_grad(
                ['Input'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Filter']))
C
chengduoZH 已提交
132 133

    def test_check_grad_no_input(self):
134 135 136 137 138 139 140 141 142 143 144 145 146
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place, ['Filter'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Input']))
        else:
            self.check_grad(
                ['Filter'],
                'Output',
                max_relative_error=0.03,
                no_grad_set=set(['Input']))
C
chengduoZH 已提交
147

148 149 150
    def init_test_case(self):
        self.pad = [0, 0, 0]
        self.stride = [1, 1, 1]
C
chengduoZH 已提交
151
        self.input_size = [2, 3, 4, 4, 4]  # NCDHW
152 153 154 155
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] / self.groups
        self.filter_size = [6, f_c, 3, 3, 3]

C
chengduoZH 已提交
156 157 158
    def init_dilation(self):
        self.dilations = [1, 1, 1]

159
    def init_group(self):
C
chengduoZH 已提交
160 161
        self.groups = 1

162 163 164
    def init_op_type(self):
        self.op_type = "conv3d"

C
chengduoZH 已提交
165

C
chengduoZH 已提交
166 167 168 169
class TestCase1(TestConv3dOp):
    def init_test_case(self):
        self.pad = [1, 1, 1]
        self.stride = [1, 1, 1]
C
chengduoZH 已提交
170
        self.input_size = [2, 3, 4, 4, 4]  # NCDHW
C
chengduoZH 已提交
171 172 173 174 175
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] / self.groups
        self.filter_size = [6, f_c, 3, 3, 3]


C
chengduoZH 已提交
176 177 178
class TestWithGroup1(TestConv3dOp):
    def init_group(self):
        self.groups = 3
C
chengduoZH 已提交
179 180


C
chengduoZH 已提交
181
class TestWithGroup2(TestCase1):
182
    def init_group(self):
C
chengduoZH 已提交
183 184
        self.groups = 3

185

C
chengduoZH 已提交
186 187 188 189 190 191 192 193 194 195 196
class TestWith1x1(TestConv3dOp):
    def init_test_case(self):
        self.pad = [0, 0, 0]
        self.stride = [1, 1, 1]
        self.input_size = [2, 3, 4, 4, 4]  # NCHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] / self.groups
        self.filter_size = [6, f_c, 1, 1, 1]

    def init_dilation(self):
        self.dilations = [1, 1, 1]
C
chengduoZH 已提交
197

C
chengduoZH 已提交
198 199 200
    def init_group(self):
        self.groups = 3

C
chengduoZH 已提交
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215

class TestWithDilation(TestConv3dOp):
    def init_test_case(self):
        self.pad = [0, 0, 0]
        self.stride = [1, 1, 1]
        self.input_size = [2, 3, 6, 6, 6]  # NCDHW
        assert np.mod(self.input_size[1], self.groups) == 0
        f_c = self.input_size[1] / self.groups
        self.filter_size = [6, f_c, 2, 2, 2]

    def init_dilation(self):
        self.dilations = [2, 2, 2]

    def init_group(self):
        self.groups = 3
C
chengduoZH 已提交
216

C
chengduoZH 已提交
217

218
class TestCUDNN(TestConv3dOp):
武毅 已提交
219
    def init_op_type(self):
220 221
        self.use_cudnn = True
        self.op_type = "conv3d"
武毅 已提交
222 223


224
class TestWithGroup1CUDNN(TestWithGroup1):
武毅 已提交
225
    def init_op_type(self):
226 227
        self.use_cudnn = True
        self.op_type = "conv3d"
武毅 已提交
228 229


230
class TestWithGroup2CUDNN(TestWithGroup2):
武毅 已提交
231
    def init_op_type(self):
232 233
        self.use_cudnn = True
        self.op_type = "conv3d"
武毅 已提交
234 235


236
class TestWith1x1CUDNN(TestWith1x1):
武毅 已提交
237
    def init_op_type(self):
238 239
        self.use_cudnn = True
        self.op_type = "conv3d"
武毅 已提交
240 241 242 243


# FIXME(typhoonzero): find a way to determine if
# using cudnn > 6 in python
244
# class TestWithDilationCUDNN(TestWithDilation):
武毅 已提交
245
#     def init_op_type(self):
246
#         self.op_type = "conv3d"
武毅 已提交
247

C
chengduoZH 已提交
248 249
if __name__ == '__main__':
    unittest.main()