conv.py 15.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

__all__ = []

17
from paddle import _C_ops, in_dynamic_mode
18
from paddle.fluid.layers.utils import convert_to_list
Z
zhangkaihuo 已提交
19
from ...binary import add
20
from paddle.nn.functional.conv import _update_padding_nd
21
from paddle.fluid.layer_helper import LayerHelper
22 23 24 25 26 27 28 29 30 31


def _conv3d(x,
            weight,
            bias=None,
            stride=1,
            padding=0,
            dilation=1,
            groups=1,
            subm=False,
32
            key=None,
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
            data_format="NDHWC",
            name=None):
    assert groups == 1, "Currently, only support groups=1"

    dims = 3

    # Currently, only support 'NDHWC'
    if data_format not in ["NDHWC"]:
        raise ValueError("Attr(data_format) should be 'NDHWC'. Received "
                         "Attr(data_format): {}.".format(data_format))
    if len(x.shape) != 5:
        raise ValueError(
            "Input x should be 5D tensor, but received x with the shape of {}".
            format(x.shape))

    channel_last = (data_format == "NDHWC")
    channel_dim = -1 if channel_last else 1
    if len(x.shape) != 5:
        raise ValueError(
            "Input x should be 5D tensor, but received x with the shape of {}".
            format(x.shape))
    num_channels = x.shape[channel_dim]
    if num_channels < 0:
        raise ValueError(
            "The channel dimension of the input({}) should be defined. "
            "Received: {}.".format(x.shape, num_channels))

    padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims)
    stride = convert_to_list(stride, dims, 'stride')
    dilation = convert_to_list(dilation, dims, 'dilation')

64 65 66 67 68 69 70 71
    if in_dynamic_mode():
        pre_bias = _C_ops.sparse_conv3d(x, weight, padding, dilation, stride,
                                        groups, subm,
                                        key if key is not None else "")
        if bias is not None:
            return add(pre_bias, bias)
        else:
            return pre_bias
72
    else:
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
        inputs = {'x': x, 'kernel': weight}
        attrs = {
            'paddings': padding,
            'dilations': dilation,
            'strides': stride,
            'groups': groups,
            'subm': subm,
            'key': key
        }
        op_type = 'sparse_conv3d'
        helper = LayerHelper(op_type, **locals())
        rulebook = helper.create_variable_for_type_inference(dtype='int32',
                                                             stop_gradient=True)
        counter = helper.create_variable_for_type_inference(dtype='int32',
                                                            stop_gradient=True)
        pre_bias = helper.create_sparse_variable_for_type_inference(x.dtype)
        outputs = {"out": pre_bias, "rulebook": rulebook, "counter": counter}
        helper.append_op(type=op_type,
                         inputs=inputs,
                         outputs=outputs,
                         attrs=attrs)
        if bias is not None:
            return add(pre_bias, bias)
        else:
            return pre_bias
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112


def conv3d(x,
           weight,
           bias=None,
           stride=1,
           padding=0,
           dilation=1,
           groups=1,
           data_format="NDHWC",
           name=None):
    r"""

    The sparse convolution3d functional calculates the output based on the input, filter
    and strides, paddings, dilations, groups parameters. Input(Input) and
113
    Output(Output) are multidimensional SparseCooTensors with a shape of
114 115
    :math:`[N, D, H, W, C]` . Where N is batch size, C is the number of
    channels, D is the depth of the feature, H is the height of the feature,
116 117
    and W is the width of the feature. If bias attribution is provided,
    bias is added to the output of the convolution.
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152

    For each input :math:`X`, the equation is:

    ..  math::

        Out = \sigma (W \ast X + b)

    In the above equation:

    * :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
    * :math:`W`: Filter value, a tensor with MCDHW format.
    * :math:`\\ast`: Convolution operation.
    * :math:`b`: Bias value, a 1-D tensor with shape [M].
    * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.

    Example:

        - Input:

          Input shape: :math:`(N, D_{in}, H_{in}, W_{in}, C_{in})`

          Filter shape: :math:`(D_f, H_f, W_f, C_{in}, C_{out})`

        - Output:
          Output shape: :math:`(N, D_{out}, H_{out}, W_{out}, C_{out})`

        Where

        ..  math::

            D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
            H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
            W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1

    Args:
153
        x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data
154 155 156 157 158
            type of input is float16 or float32 or float64.
        weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M],
            where M is the number of filters(output channels), g is the number of groups,
            kD, kH, kW are the filter's depth, height and width respectively.
        bias (Tensor, optional): The bias, a Tensor of shape [M, ], currently, only support bias is None.
159 160
        stride (int|list|tuple): The stride size. It means the stride in convolution. If stride is a
            list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
161
            Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
162
        padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
163 164 165 166 167 168 169 170 171
            on both sides for each dimension. If `padding` is a string, either 'VALID' or
            'SAME' which is the padding algorithm. If padding size is a tuple or list,
            it could be in three forms: `[pad_depth, pad_height, pad_width]` or
            `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
            and when `data_format` is `"NCDHW"`, `padding` can be in the form
            `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
            when `data_format` is `"NDHWC"`, `padding` can be in the form
            `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
            Default: padding = 0.
172
        dilation (int|list|tuple): The dilation size. It means the spacing between the kernel points.
173
            If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
174
            dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
175 176 177 178 179 180
            Default: dilation = 1.
        groups (int): The groups number of the Conv3D Layer. According to grouped
            convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
            the first half of the filters is only connected to the first half
            of the input channels, while the second half of the filters is only
            connected to the second half of the input channels. Default: groups=1. Currently, only support groups=1.
181
        data_format (str, optional): Specify the data format of the input, and the data format of the output
182 183 184
            will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
            The default is `"NDHWC"`. When it is `"NDHWC"`, the data is stored in the order of:
            `[batch_size, input_depth, input_height, input_width, input_channels]`.
185 186
        name(str|None): For detailed information, please refer
           to :ref:`api_guide_Name`. Usually name is no need to set and
187 188 189
           None by default.

    Returns:
190
        A SparseCooTensor representing the conv3d, whose data type is the same with input.
191 192 193 194 195 196 197 198 199 200 201 202 203

    Examples:
        .. code-block:: python

            import paddle
            from paddle.fluid.framework import _test_eager_guard

            with _test_eager_guard():
              indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
              values = [[1], [2], [3], [4]]
              indices = paddle.to_tensor(indices, dtype='int32')
              values = paddle.to_tensor(values, dtype='float32')
              dense_shape = [1, 1, 3, 4, 1]
204
              sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
205
              weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
206
              y = paddle.sparse.nn.functional.conv3d(sparse_x, weight)
207 208 209 210
              print(y.shape)
              # (1, 1, 1, 2, 1)
    """
    return _conv3d(x, weight, bias, stride, padding, dilation, groups, False,
211
                   None, data_format, name)
212 213 214 215 216 217 218 219 220 221


def subm_conv3d(x,
                weight,
                bias=None,
                stride=1,
                padding=0,
                dilation=1,
                groups=1,
                data_format="NDHWC",
222
                key=None,
223 224 225 226 227
                name=None):
    r"""

    The sparse submanifold convolution3d functional calculates the output based on the input, filter
    and strides, paddings, dilations, groups parameters. Input(Input) and
228
    Output(Output) are multidimensional SparseCooTensors with a shape of
229 230
    :math:`[N, D, H, W, C]` . Where N is batch size, C is the number of
    channels, D is the depth of the feature, H is the height of the feature,
231 232
    and W is the width of the feature. If bias attribution is provided,
    bias is added to the output of the convolution.
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267

    For each input :math:`X`, the equation is:

    ..  math::

        Out = W \ast X + b

    In the above equation:

    * :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
    * :math:`W`: Filter value, a tensor with DHWCM format.
    * :math:`\\ast`: Submanifold Convolution operation, refer to the paper: https://arxiv.org/abs/1706.01307.
    * :math:`b`: Bias value, a 1-D tensor with shape [M].
    * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.

    Example:

        - Input:

          Input shape: :math:`(N, D_{in}, H_{in}, W_{in}, C_{in})`

          Filter shape: :math:`(D_f, H_f, W_f, C_{in}, C_{out})`

        - Output:
          Output shape: :math:`(N, D_{out}, H_{out}, W_{out}, C_{out})`

        Where

        ..  math::

            D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
            H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
            W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1

    Args:
268
        x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data
269 270 271 272 273
            type of input is float16 or float32 or float64.
        weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M],
            where M is the number of filters(output channels), g is the number of groups,
            kD, kH, kW are the filter's depth, height and width respectively.
        bias (Tensor, optional): The bias, a Tensor of shape [M, ], currently, only support bias is None.
274 275
        stride (int|list|tuple): The stride size. It means the stride in convolution. If stride is a
            list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
276
            Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
277
        padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
278 279 280 281 282 283 284 285 286
            on both sides for each dimension. If `padding` is a string, either 'VALID' or
            'SAME' which is the padding algorithm. If padding size is a tuple or list,
            it could be in three forms: `[pad_depth, pad_height, pad_width]` or
            `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
            and when `data_format` is `"NCDHW"`, `padding` can be in the form
            `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
            when `data_format` is `"NHWC"`, `padding` can be in the form
            `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
            Default: padding = 0.
287
        dilation (int|list|tuple): The dilation size. It means the spacing between the kernel points.
288
            If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
289
            dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
290 291 292 293 294 295
            Default: dilation = 1.
        groups (int): The groups number of the Conv3D Layer. According to grouped
            convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
            the first half of the filters is only connected to the first half
            of the input channels, while the second half of the filters is only
            connected to the second half of the input channels. Currently, only support groups=1.
296
        data_format (str, optional): Specify the data format of the input, and the data format of the output
297 298 299
            will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
            The default is `"NDHWC"`. When it is `"NDHWC"`, the data is stored in the order of:
            `[batch_size, input_depth, input_height, input_width, input_channels]`.
300
        key(str, optional): the key is used to save or use the same rulebook,
301
            the definition and role of rulebook refers to
302
            https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The
303
            default value is None.
304 305
        name(str|None): For detailed information, please refer
           to :ref:`api_guide_Name`. Usually name is no need to set and
306 307 308
           None by default.

    Returns:
309 310
        A SparseCooTensor representing the conv3d, whose data type is
        the same with input.
311 312 313 314 315 316 317 318 319 320 321 322 323

    Examples:
        .. code-block:: python

            import paddle
            from paddle.fluid.framework import _test_eager_guard

            with _test_eager_guard():
              indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
              values = [[1], [2], [3], [4]]
              indices = paddle.to_tensor(indices, dtype='int32')
              values = paddle.to_tensor(values, dtype='float32')
              dense_shape = [1, 1, 3, 4, 1]
324
              sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
325
              weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
326
              y = paddle.sparse.nn.functional.subm_conv3d(sparse_x, weight)
327 328 329 330
              print(y.shape)
              #(1, 1, 3, 4, 1)
    """
    return _conv3d(x, weight, bias, stride, padding, dilation, groups, True,
331
                   key, data_format, name)