未验证 提交 61953b90 编写于 作者: Z zhangkaihuo 提交者: GitHub

[cherry-pick]Fix english documents of sparse api (#47496)

Fix english documents of sparse api
上级 601626ac
...@@ -241,11 +241,9 @@ def add(x, y, name=None): ...@@ -241,11 +241,9 @@ def add(x, y, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.device.set_device("cpu") paddle.device.set_device("cpu")
with _test_eager_guard():
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr() sparse_x = x.to_sparse_csr()
...@@ -268,10 +266,9 @@ def add(x, y, name=None): ...@@ -268,10 +266,9 @@ def add(x, y, name=None):
inputs = {'x': x, 'y': y} inputs = {'x': x, 'y': y}
helper = LayerHelper(op_type) helper = LayerHelper(op_type)
out = helper.create_sparse_variable_for_type_inference(x.dtype) out = helper.create_sparse_variable_for_type_inference(x.dtype)
helper.append_op(type=op_type, helper.append_op(
inputs=inputs, type=op_type, inputs=inputs, outputs={'out': out}, attrs={}
outputs={'out': out}, )
attrs={})
return out return out
...@@ -298,11 +295,9 @@ def subtract(x, y, name=None): ...@@ -298,11 +295,9 @@ def subtract(x, y, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.device.set_device("cpu") paddle.device.set_device("cpu")
with _test_eager_guard():
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr() sparse_x = x.to_sparse_csr()
...@@ -343,11 +338,9 @@ def multiply(x, y, name=None): ...@@ -343,11 +338,9 @@ def multiply(x, y, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.device.set_device("cpu") paddle.device.set_device("cpu")
with _test_eager_guard():
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr() sparse_x = x.to_sparse_csr()
...@@ -391,11 +384,9 @@ def divide(x, y, name=None): ...@@ -391,11 +384,9 @@ def divide(x, y, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.device.set_device("cpu") paddle.device.set_device("cpu")
with _test_eager_guard():
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr() sparse_x = x.to_sparse_csr()
......
...@@ -17,7 +17,12 @@ from paddle import _C_ops, _legacy_C_ops ...@@ -17,7 +17,12 @@ from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import core, dygraph_only from paddle.fluid.framework import core, dygraph_only
from paddle.fluid.framework import _current_expected_place, _get_paddle_place from paddle.fluid.framework import _current_expected_place, _get_paddle_place
from paddle.tensor import to_tensor, max from paddle.tensor import to_tensor, max
from paddle.fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype from paddle.fluid.data_feeder import (
check_variable_and_dtype,
check_type,
check_dtype,
convert_dtype,
)
from paddle import in_dynamic_mode from paddle import in_dynamic_mode
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
...@@ -51,8 +56,8 @@ def _get_place(place): ...@@ -51,8 +56,8 @@ def _get_place(place):
if place is None: if place is None:
place = _current_expected_place() place = _current_expected_place()
elif not isinstance( elif not isinstance(
place, place, (core.Place, core.CPUPlace, core.CUDAPinnedPlace, core.CUDAPlace)
(core.Place, core.CPUPlace, core.CUDAPinnedPlace, core.CUDAPlace)): ):
raise ValueError( raise ValueError(
"'place' must be any of paddle.Place, paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace" "'place' must be any of paddle.Place, paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace"
) )
...@@ -66,12 +71,9 @@ def _check_indices_dtype(dtype): ...@@ -66,12 +71,9 @@ def _check_indices_dtype(dtype):
) )
def sparse_coo_tensor(indices, def sparse_coo_tensor(
values, indices, values, shape=None, dtype=None, place=None, stop_gradient=True
shape=None, ):
dtype=None,
place=None,
stop_gradient=True):
r""" r"""
Constructs a sparse ``paddle.Tensor`` in coordinate format according to the indices Constructs a sparse ``paddle.Tensor`` in coordinate format according to the indices
and values of the specified non-zero elements. and values of the specified non-zero elements.
...@@ -107,9 +109,7 @@ def sparse_coo_tensor(indices, ...@@ -107,9 +109,7 @@ def sparse_coo_tensor(indices,
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 1, 2], [1, 2, 0]] indices = [[0, 1, 2], [1, 2, 0]]
values = [1.0, 2.0, 3.0] values = [1.0, 2.0, 3.0]
dense_shape = [3, 3] dense_shape = [3, 3]
...@@ -125,10 +125,9 @@ def sparse_coo_tensor(indices, ...@@ -125,10 +125,9 @@ def sparse_coo_tensor(indices,
place = _get_place(place) place = _get_place(place)
if not isinstance(indices, core.eager.Tensor): if not isinstance(indices, core.eager.Tensor):
indices = to_tensor(indices, indices = to_tensor(
dtype=None, indices, dtype=None, place=place, stop_gradient=True
place=place, )
stop_gradient=True)
if not isinstance(values, core.eager.Tensor): if not isinstance(values, core.eager.Tensor):
values = to_tensor(values, dtype, place, stop_gradient) values = to_tensor(values, dtype, place, stop_gradient)
if len(indices.shape) != 2: if len(indices.shape) != 2:
...@@ -141,8 +140,10 @@ def sparse_coo_tensor(indices, ...@@ -141,8 +140,10 @@ def sparse_coo_tensor(indices,
if nnz != values.shape[0]: if nnz != values.shape[0]:
raise ValueError( raise ValueError(
"the indices and values must have same number of non-zero, but get {} and {}" "the indices and values must have same number of non-zero, but get {} and {}".format(
.format(nnz, values.shape[0])) nnz, values.shape[0]
)
)
dense_dim = len(values.shape) - 1 dense_dim = len(values.shape) - 1
...@@ -162,11 +163,15 @@ def sparse_coo_tensor(indices, ...@@ -162,11 +163,15 @@ def sparse_coo_tensor(indices,
if shape < min_shape: if shape < min_shape:
raise ValueError( raise ValueError(
"the minimun shape required is {}, but get {}".format( "the minimun shape required is {}, but get {}".format(
min_shape, shape)) min_shape, shape
)
)
if len(shape) != sparse_dim + dense_dim: if len(shape) != sparse_dim + dense_dim:
raise ValueError( raise ValueError(
"the number of dimensions(len(shape) must be sparse_dim({}) + dense_dim({}), but get {}" "the number of dimensions(len(shape) must be sparse_dim({}) + dense_dim({}), but get {}".format(
.format(sparse_dim, dense_dim, len(shape))) sparse_dim, dense_dim, len(shape)
)
)
return _C_ops.sparse_sparse_coo_tensor(values, indices, shape) return _C_ops.sparse_sparse_coo_tensor(values, indices, shape)
...@@ -178,22 +183,17 @@ def sparse_coo_tensor(indices, ...@@ -178,22 +183,17 @@ def sparse_coo_tensor(indices,
attrs = {'dense_shape': shape} attrs = {'dense_shape': shape}
helper = LayerHelper(op_type) helper = LayerHelper(op_type)
out = helper.create_sparse_variable_for_type_inference(dtype) out = helper.create_sparse_variable_for_type_inference(dtype)
helper.append_op(type=op_type, helper.append_op(
inputs=inputs, type=op_type, inputs=inputs, outputs={'out': out}, attrs=attrs
outputs={'out': out}, )
attrs=attrs)
return out return out
#TODO: need to support shape is None # TODO: need to support shape is None
@dygraph_only @dygraph_only
def sparse_csr_tensor(crows, def sparse_csr_tensor(
cols, crows, cols, values, shape, dtype=None, place=None, stop_gradient=True
values, ):
shape,
dtype=None,
place=None,
stop_gradient=True):
r""" r"""
Constructs a sparse ``paddle.Tensor`` in CSR(Compressed Sparse Row) format according to the Constructs a sparse ``paddle.Tensor`` in CSR(Compressed Sparse Row) format according to the
``crows``, ``cols`` and ``values``. ``crows``, ``cols`` and ``values``.
...@@ -233,9 +233,7 @@ def sparse_csr_tensor(crows, ...@@ -233,9 +233,7 @@ def sparse_csr_tensor(crows,
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
crows = [0, 2, 3, 5] crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1] cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5] values = [1, 2, 3, 4, 5]
...@@ -262,8 +260,10 @@ def sparse_csr_tensor(crows, ...@@ -262,8 +260,10 @@ def sparse_csr_tensor(crows,
if len(shape) != 2 and len(shape) != 3: if len(shape) != 2 and len(shape) != 3:
raise ValueError( raise ValueError(
"SparseCsrTensor only support 2-D or 3-D matrix. but get shape {}". "SparseCsrTensor only support 2-D or 3-D matrix. but get shape {}".format(
format(shape)) shape
)
)
rows = shape[len(shape) - 2] rows = shape[len(shape) - 2]
if not crows.place._equals(place): if not crows.place._equals(place):
...@@ -280,26 +280,32 @@ def sparse_csr_tensor(crows, ...@@ -280,26 +280,32 @@ def sparse_csr_tensor(crows,
if len(crows.shape) != 1 or len(cols.shape) != 1 or len(values.shape) != 1: if len(crows.shape) != 1 or len(cols.shape) != 1 or len(values.shape) != 1:
raise ValueError("The 'crows', 'cols' and 'values' must be 1-D.") raise ValueError("The 'crows', 'cols' and 'values' must be 1-D.")
if (len(cols) != len(values)): if len(cols) != len(values):
raise ValueError("the length of cols must be same as length of values") raise ValueError("the length of cols must be same as length of values")
if len(shape) == 2: if len(shape) == 2:
if crows.shape[0] != rows + 1: if crows.shape[0] != rows + 1:
raise ValueError( raise ValueError(
"The length({}) of crows must be equal to the rows({})+1 of matrix." "The length({}) of crows must be equal to the rows({})+1 of matrix.".format(
.format(crows.shape[0], rows)) crows.shape[0], rows
)
)
if crows[0] != 0: if crows[0] != 0:
raise ValueError("the 0th value of crows must be 0") raise ValueError("the 0th value of crows must be 0")
if crows[-1] != values.shape[0]: if crows[-1] != values.shape[0]:
raise ValueError( raise ValueError(
"the last value of crows must be equal the number of non-zero") "the last value of crows must be equal the number of non-zero"
)
else: else:
if crows.shape[0] % (rows + 1) != 0: if crows.shape[0] % (rows + 1) != 0:
raise ValueError( raise ValueError(
"The length({}) of crows must be divisible the rows({})+1 of matrix." "The length({}) of crows must be divisible the rows({})+1 of matrix.".format(
.format(crows.shape[0], rows)) crows.shape[0], rows
)
)
# TODO(zkh2016): check whether the value in crows and cols is legal # TODO(zkh2016): check whether the value in crows and cols is legal
return core.eager.sparse_csr_tensor(crows, cols, values, shape, return core.eager.sparse_csr_tensor(
stop_gradient) crows, cols, values, shape, stop_gradient
)
...@@ -23,7 +23,8 @@ from paddle.nn.functional.conv import _update_padding_nd ...@@ -23,7 +23,8 @@ from paddle.nn.functional.conv import _update_padding_nd
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
def _conv3d(x, def _conv3d(
x,
weight, weight,
bias=None, bias=None,
stride=1, stride=1,
...@@ -33,40 +34,55 @@ def _conv3d(x, ...@@ -33,40 +34,55 @@ def _conv3d(x,
subm=False, subm=False,
key=None, key=None,
data_format="NDHWC", data_format="NDHWC",
name=None): name=None,
):
assert groups == 1, "Currently, only support groups=1" assert groups == 1, "Currently, only support groups=1"
dims = 3 dims = 3
# Currently, only support 'NDHWC' # Currently, only support 'NDHWC'
if data_format not in ["NDHWC"]: if data_format not in ["NDHWC"]:
raise ValueError("Attr(data_format) should be 'NDHWC'. Received " raise ValueError(
"Attr(data_format): {}.".format(data_format)) "Attr(data_format) should be 'NDHWC'. Received "
"Attr(data_format): {}.".format(data_format)
)
if len(x.shape) != 5: if len(x.shape) != 5:
raise ValueError( raise ValueError(
"Input x should be 5D tensor, but received x with the shape of {}". "Input x should be 5D tensor, but received x with the shape of {}".format(
format(x.shape)) x.shape
)
)
channel_last = (data_format == "NDHWC") channel_last = data_format == "NDHWC"
channel_dim = -1 if channel_last else 1 channel_dim = -1 if channel_last else 1
if len(x.shape) != 5: if len(x.shape) != 5:
raise ValueError( raise ValueError(
"Input x should be 5D tensor, but received x with the shape of {}". "Input x should be 5D tensor, but received x with the shape of {}".format(
format(x.shape)) x.shape
)
)
num_channels = x.shape[channel_dim] num_channels = x.shape[channel_dim]
if num_channels < 0: if num_channels < 0:
raise ValueError( raise ValueError(
"The channel dimension of the input({}) should be defined. " "The channel dimension of the input({}) should be defined. "
"Received: {}.".format(x.shape, num_channels)) "Received: {}.".format(x.shape, num_channels)
)
padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims) padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims)
stride = convert_to_list(stride, dims, 'stride') stride = convert_to_list(stride, dims, 'stride')
dilation = convert_to_list(dilation, dims, 'dilation') dilation = convert_to_list(dilation, dims, 'dilation')
if in_dynamic_mode(): if in_dynamic_mode():
pre_bias = _C_ops.sparse_conv3d(x, weight, padding, dilation, stride, pre_bias = _C_ops.sparse_conv3d(
groups, subm, x,
key if key is not None else "") weight,
padding,
dilation,
stride,
groups,
subm,
key if key is not None else "",
)
if bias is not None: if bias is not None:
return add(pre_bias, bias) return add(pre_bias, bias)
else: else:
...@@ -79,27 +95,29 @@ def _conv3d(x, ...@@ -79,27 +95,29 @@ def _conv3d(x,
'strides': stride, 'strides': stride,
'groups': groups, 'groups': groups,
'subm': subm, 'subm': subm,
'key': key 'key': key,
} }
op_type = 'sparse_conv3d' op_type = 'sparse_conv3d'
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
rulebook = helper.create_variable_for_type_inference(dtype='int32', rulebook = helper.create_variable_for_type_inference(
stop_gradient=True) dtype='int32', stop_gradient=True
counter = helper.create_variable_for_type_inference(dtype='int32', )
stop_gradient=True) counter = helper.create_variable_for_type_inference(
dtype='int32', stop_gradient=True
)
pre_bias = helper.create_sparse_variable_for_type_inference(x.dtype) pre_bias = helper.create_sparse_variable_for_type_inference(x.dtype)
outputs = {"out": pre_bias, "rulebook": rulebook, "counter": counter} outputs = {"out": pre_bias, "rulebook": rulebook, "counter": counter}
helper.append_op(type=op_type, helper.append_op(
inputs=inputs, type=op_type, inputs=inputs, outputs=outputs, attrs=attrs
outputs=outputs, )
attrs=attrs)
if bias is not None: if bias is not None:
return add(pre_bias, bias) return add(pre_bias, bias)
else: else:
return pre_bias return pre_bias
def conv3d(x, def conv3d(
x,
weight, weight,
bias=None, bias=None,
stride=1, stride=1,
...@@ -107,7 +125,8 @@ def conv3d(x, ...@@ -107,7 +125,8 @@ def conv3d(x,
dilation=1, dilation=1,
groups=1, groups=1,
data_format="NDHWC", data_format="NDHWC",
name=None): name=None,
):
r""" r"""
The sparse convolution3d functional calculates the output based on the input, filter The sparse convolution3d functional calculates the output based on the input, filter
...@@ -132,36 +151,17 @@ def conv3d(x, ...@@ -132,36 +151,17 @@ def conv3d(x,
* :math:`b`: Bias value, a 1-D tensor with shape [M]. * :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, D_{in}, H_{in}, W_{in}, C_{in})`
Filter shape: :math:`(D_f, H_f, W_f, C_{in}, C_{out})`
- Output:
Output shape: :math:`(N, D_{out}, H_{out}, W_{out}, C_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args: Args:
x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data
type of input is float16 or float32 or float64. type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M], weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M],
where M is the number of filters(output channels), g is the number of groups, where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's depth, height and width respectively. kD, kH, kW are the filter's depth, height and width respectively.
bias (Tensor, optional): The bias, a Tensor of shape [M, ], currently, only support bias is None. bias (Tensor, optional): The bias, a Tensor of shape [M].
stride (int|list|tuple): The stride size. It means the stride in convolution. If stride is a stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width). list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1. Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings padding (string|int|list|tuple, optional): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list, 'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or it could be in three forms: `[pad_depth, pad_height, pad_width]` or
...@@ -171,11 +171,11 @@ def conv3d(x, ...@@ -171,11 +171,11 @@ def conv3d(x,
when `data_format` is `"NDHWC"`, `padding` can be in the form when `data_format` is `"NDHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0. Default: padding = 0.
dilation (int|list|tuple): The dilation size. It means the spacing between the kernel points. dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height, If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation. dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1. Default: dilation = 1.
groups (int): The groups number of the Conv3D Layer. According to grouped groups (int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
...@@ -184,7 +184,7 @@ def conv3d(x, ...@@ -184,7 +184,7 @@ def conv3d(x,
will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`. will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NDHWC"`. When it is `"NDHWC"`, the data is stored in the order of: The default is `"NDHWC"`. When it is `"NDHWC"`, the data is stored in the order of:
`[batch_size, input_depth, input_height, input_width, input_channels]`. `[batch_size, input_depth, input_height, input_width, input_channels]`.
name(str|None): For detailed information, please refer name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and to :ref:`api_guide_Name`. Usually name is no need to set and
None by default. None by default.
...@@ -195,9 +195,7 @@ def conv3d(x, ...@@ -195,9 +195,7 @@ def conv3d(x,
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]] values = [[1], [2], [3], [4]]
indices = paddle.to_tensor(indices, dtype='int32') indices = paddle.to_tensor(indices, dtype='int32')
...@@ -209,11 +207,23 @@ def conv3d(x, ...@@ -209,11 +207,23 @@ def conv3d(x,
print(y.shape) print(y.shape)
# (1, 1, 1, 2, 1) # (1, 1, 1, 2, 1)
""" """
return _conv3d(x, weight, bias, stride, padding, dilation, groups, False, return _conv3d(
None, data_format, name) x,
weight,
bias,
def subm_conv3d(x, stride,
padding,
dilation,
groups,
False,
None,
data_format,
name,
)
def subm_conv3d(
x,
weight, weight,
bias=None, bias=None,
stride=1, stride=1,
...@@ -222,7 +232,8 @@ def subm_conv3d(x, ...@@ -222,7 +232,8 @@ def subm_conv3d(x,
groups=1, groups=1,
data_format="NDHWC", data_format="NDHWC",
key=None, key=None,
name=None): name=None,
):
r""" r"""
The sparse submanifold convolution3d functional calculates the output based on the input, filter The sparse submanifold convolution3d functional calculates the output based on the input, filter
...@@ -247,33 +258,14 @@ def subm_conv3d(x, ...@@ -247,33 +258,14 @@ def subm_conv3d(x,
* :math:`b`: Bias value, a 1-D tensor with shape [M]. * :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, D_{in}, H_{in}, W_{in}, C_{in})`
Filter shape: :math:`(D_f, H_f, W_f, C_{in}, C_{out})`
- Output:
Output shape: :math:`(N, D_{out}, H_{out}, W_{out}, C_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args: Args:
x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data
type of input is float16 or float32 or float64. type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M], weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M],
where M is the number of filters(output channels), g is the number of groups, where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's depth, height and width respectively. kD, kH, kW are the filter's depth, height and width respectively.
bias (Tensor, optional): The bias, a Tensor of shape [M, ], currently, only support bias is None. bias (Tensor, optional): The bias, a Tensor of shape [M].
stride (int|list|tuple): The stride size. It means the stride in convolution. If stride is a stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width). list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1. Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
...@@ -286,11 +278,11 @@ def subm_conv3d(x, ...@@ -286,11 +278,11 @@ def subm_conv3d(x,
when `data_format` is `"NHWC"`, `padding` can be in the form when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0. Default: padding = 0.
dilation (int|list|tuple): The dilation size. It means the spacing between the kernel points. dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height, If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation. dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1. Default: dilation = 1.
groups (int): The groups number of the Conv3D Layer. According to grouped groups (int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
...@@ -303,7 +295,7 @@ def subm_conv3d(x, ...@@ -303,7 +295,7 @@ def subm_conv3d(x,
the definition and role of rulebook refers to the definition and role of rulebook refers to
https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The
default value is None. default value is None.
name(str|None): For detailed information, please refer name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and to :ref:`api_guide_Name`. Usually name is no need to set and
None by default. None by default.
...@@ -315,9 +307,7 @@ def subm_conv3d(x, ...@@ -315,9 +307,7 @@ def subm_conv3d(x,
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]] values = [[1], [2], [3], [4]]
indices = paddle.to_tensor(indices, dtype='int32') indices = paddle.to_tensor(indices, dtype='int32')
...@@ -329,5 +319,16 @@ def subm_conv3d(x, ...@@ -329,5 +319,16 @@ def subm_conv3d(x,
print(y.shape) print(y.shape)
#(1, 1, 3, 4, 1) #(1, 1, 3, 4, 1)
""" """
return _conv3d(x, weight, bias, stride, padding, dilation, groups, True, return _conv3d(
key, data_format, name) x,
weight,
bias,
stride,
padding,
dilation,
groups,
True,
key,
data_format,
name,
)
...@@ -19,13 +19,15 @@ from paddle.nn.functional.pooling import _update_padding_nd ...@@ -19,13 +19,15 @@ from paddle.nn.functional.pooling import _update_padding_nd
__all__ = [] __all__ = []
def max_pool3d(x, def max_pool3d(
x,
kernel_size, kernel_size,
stride=None, stride=None,
padding=0, padding=0,
ceil_mode=False, ceil_mode=False,
data_format="NDHWC", data_format="NDHWC",
name=None): name=None,
):
""" """
Implements sparse max pooling 3d operation. Implements sparse max pooling 3d operation.
See more details in :ref:`api_sparse_pooling_MaxPool3d` . See more details in :ref:`api_sparse_pooling_MaxPool3d` .
...@@ -37,18 +39,18 @@ def max_pool3d(x, ...@@ -37,18 +39,18 @@ def max_pool3d(x,
is a tuple or list, it must contain three integers, is a tuple or list, it must contain three integers,
(kernel_size_Depth, kernel_size_Height, kernel_size_Width). (kernel_size_Depth, kernel_size_Height, kernel_size_Width).
Otherwise, the pool kernel size will be the cube of an int. Otherwise, the pool kernel size will be the cube of an int.
stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list, stride (int|list|tuple, optional): The pool stride size. If pool stride size is a tuple or list,
it must contain three integers, [stride_Depth, stride_Height, stride_Width). it must contain three integers, [stride_Depth, stride_Height, stride_Width).
Otherwise, the pool stride size will be a cube of an int. Otherwise, the pool stride size will be a cube of an int.
padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms. padding (string|int|list|tuple, optional): The padding size. Padding could be in one of the following forms.
1. A string in ['valid', 'same']. 1. A string in ['valid', 'same'].
2. An int, which means the feature map is zero padded by size of `padding` on every sides. 2. An int, which means the feature map is zero padded by size of `padding` on every sides.
3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension. 3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side. 4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0). 5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
The default value is 0. The default value is 0.
ceil_mode (bool): ${ceil_mode_comment} ceil_mode (bool, optional): ${ceil_mode_comment}
data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`. data_format (string, optional): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of: The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`. Currently only support `"NDHWC"` . `[batch_size, input_channels, input_depth, input_height, input_width]`. Currently only support `"NDHWC"` .
name(str, optional): For detailed information, please refer name(str, optional): For detailed information, please refer
...@@ -62,9 +64,7 @@ def max_pool3d(x, ...@@ -62,9 +64,7 @@ def max_pool3d(x,
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.randn((1, 4, 4, 4, 3)) dense_x = paddle.randn((1, 4, 4, 4, 3))
sparse_x = dense_x.to_sparse_coo(4) sparse_x = dense_x.to_sparse_coo(4)
kernel_sizes = [3, 3, 3] kernel_sizes = [3, 3, 3]
...@@ -75,9 +75,12 @@ def max_pool3d(x, ...@@ -75,9 +75,12 @@ def max_pool3d(x,
""" """
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode" assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
assert x.is_sparse_coo( assert (
x.is_sparse_coo()
), "Currently, sparse.relu only support the input of SparseCooTensor" ), "Currently, sparse.relu only support the input of SparseCooTensor"
assert data_format == 'NDHWC', "Currently, sparse.max_pool3d only support data format of 'NDHWC'" assert (
data_format == 'NDHWC'
), "Currently, sparse.max_pool3d only support data format of 'NDHWC'"
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
if stride is None: if stride is None:
...@@ -87,12 +90,11 @@ def max_pool3d(x, ...@@ -87,12 +90,11 @@ def max_pool3d(x,
channel_last = True channel_last = True
padding, padding_algorithm = _update_padding_nd(padding, padding, padding_algorithm = _update_padding_nd(
3, padding, 3, channel_last=channel_last, ceil_mode=ceil_mode
channel_last=channel_last, )
ceil_mode=ceil_mode)
#TODO(zkh2016): remove the dependency on dilation from the backend # TODO(zkh2016): remove the dependency on dilation from the backend
dilation = [1, 1, 1] dilation = [1, 1, 1]
return _C_ops.sparse_maxpool(x, kernel_size, padding, dilation, stride) return _C_ops.sparse_maxpool(x, kernel_size, padding, dilation, stride)
...@@ -23,8 +23,8 @@ __all__ = [] ...@@ -23,8 +23,8 @@ __all__ = []
class _Conv3D(Layer): class _Conv3D(Layer):
def __init__(
def __init__(self, self,
in_channels, in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
...@@ -37,9 +37,12 @@ class _Conv3D(Layer): ...@@ -37,9 +37,12 @@ class _Conv3D(Layer):
padding_mode='zeros', padding_mode='zeros',
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format="NDHWC"): data_format="NDHWC",
):
super(_Conv3D, self).__init__() super(_Conv3D, self).__init__()
assert weight_attr is not False, "weight_attr should not be False in Conv." assert (
weight_attr is not False
), "weight_attr should not be False in Conv."
self._param_attr = weight_attr self._param_attr = weight_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._groups = groups self._groups = groups
...@@ -49,47 +52,56 @@ class _Conv3D(Layer): ...@@ -49,47 +52,56 @@ class _Conv3D(Layer):
self._subm = subm self._subm = subm
self._key = key self._key = key
assert padding_mode == 'zeros', "Currently, only support padding_mode='zeros'" assert (
padding_mode == 'zeros'
), "Currently, only support padding_mode='zeros'"
assert groups == 1, "Currently, only support groups=1" assert groups == 1, "Currently, only support groups=1"
valid_format = {'NDHWC'} valid_format = {'NDHWC'}
if data_format not in valid_format: if data_format not in valid_format:
raise ValueError( raise ValueError(
"data_format must be one of {}, but got data_format='{}'". "data_format must be one of {}, but got data_format='{}'".format(
format(valid_format, data_format)) valid_format, data_format
)
)
channel_last = data_format == "NDHWC" channel_last = data_format == "NDHWC"
dims = 3 dims = 3
self._stride = utils.convert_to_list(stride, dims, 'stride') self._stride = utils.convert_to_list(stride, dims, 'stride')
self._dilation = utils.convert_to_list(dilation, dims, 'dilation') self._dilation = utils.convert_to_list(dilation, dims, 'dilation')
self._kernel_size = utils.convert_to_list(kernel_size, dims, self._kernel_size = utils.convert_to_list(
'kernel_size') kernel_size, dims, 'kernel_size'
)
self._padding = padding self._padding = padding
self._padding_mode = padding_mode self._padding_mode = padding_mode
self._updated_padding, self._padding_algorithm = _update_padding_nd( self._updated_padding, self._padding_algorithm = _update_padding_nd(
padding, channel_last, dims) padding, channel_last, dims
)
# the sparse conv restricts the shape is [D, H, W, in_channels, out_channels] # the sparse conv restricts the shape is [D, H, W, in_channels, out_channels]
filter_shape = self._kernel_size + [ filter_shape = self._kernel_size + [
self._in_channels, self._out_channels self._in_channels,
self._out_channels,
] ]
def _get_default_param_initializer(): def _get_default_param_initializer():
filter_elem_num = np.prod(self._kernel_size) * self._in_channels filter_elem_num = np.prod(self._kernel_size) * self._in_channels
std = (2.0 / filter_elem_num)**0.5 std = (2.0 / filter_elem_num) ** 0.5
return Normal(0.0, std) return Normal(0.0, std)
self.weight = self.create_parameter( self.weight = self.create_parameter(
shape=filter_shape, shape=filter_shape,
attr=self._param_attr, attr=self._param_attr,
default_initializer=_get_default_param_initializer()) default_initializer=_get_default_param_initializer(),
self.bias = self.create_parameter(attr=self._bias_attr, )
shape=[self._out_channels], self.bias = self.create_parameter(
is_bias=True) attr=self._bias_attr, shape=[self._out_channels], is_bias=True
)
def forward(self, x): def forward(self, x):
out = F.conv._conv3d(x, out = F.conv._conv3d(
x,
self.weight, self.weight,
bias=self.bias, bias=self.bias,
stride=self._stride, stride=self._stride,
...@@ -98,7 +110,8 @@ class _Conv3D(Layer): ...@@ -98,7 +110,8 @@ class _Conv3D(Layer):
groups=self._groups, groups=self._groups,
subm=self._subm, subm=self._subm,
key=self._key, key=self._key,
data_format=self._data_format) data_format=self._data_format,
)
return out return out
def extra_repr(self): def extra_repr(self):
...@@ -208,9 +221,7 @@ class Conv3D(_Conv3D): ...@@ -208,9 +221,7 @@ class Conv3D(_Conv3D):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]] values = [[1], [2], [3], [4]]
indices = paddle.to_tensor(indices, dtype='int32') indices = paddle.to_tensor(indices, dtype='int32')
...@@ -223,7 +234,8 @@ class Conv3D(_Conv3D): ...@@ -223,7 +234,8 @@ class Conv3D(_Conv3D):
# (1, 1, 1, 2, 1) # (1, 1, 1, 2, 1)
""" """
def __init__(self, def __init__(
self,
in_channels, in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
...@@ -234,8 +246,10 @@ class Conv3D(_Conv3D): ...@@ -234,8 +246,10 @@ class Conv3D(_Conv3D):
padding_mode='zeros', padding_mode='zeros',
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format="NDHWC"): data_format="NDHWC",
super(Conv3D, self).__init__(in_channels, ):
super(Conv3D, self).__init__(
in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
stride=stride, stride=stride,
...@@ -247,13 +261,14 @@ class Conv3D(_Conv3D): ...@@ -247,13 +261,14 @@ class Conv3D(_Conv3D):
padding_mode=padding_mode, padding_mode=padding_mode,
weight_attr=weight_attr, weight_attr=weight_attr,
bias_attr=bias_attr, bias_attr=bias_attr,
data_format=data_format) data_format=data_format,
)
class SubmConv3D(_Conv3D): class SubmConv3D(_Conv3D):
r""" r"""
**Sparse Submanifold Convlution3d Layer** **Submanifold Sparse Convlution3d Layer**
The Sparse submanifold convolution3d layer calculates the output based on the input, filter The submanifold sparse convolution3d layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional SparseCooTensors with a shape of Output(Output) are multidimensional SparseCooTensors with a shape of
:math:`[N, D, H, W, C]` . Where N is batch size, C is the number of :math:`[N, D, H, W, C]` . Where N is batch size, C is the number of
...@@ -264,7 +279,7 @@ class SubmConv3D(_Conv3D): ...@@ -264,7 +279,7 @@ class SubmConv3D(_Conv3D):
.. math:: .. math::
Out =(W \ast X + b Out = W \ast X + b
In the above equation: In the above equation:
...@@ -345,9 +360,7 @@ class SubmConv3D(_Conv3D): ...@@ -345,9 +360,7 @@ class SubmConv3D(_Conv3D):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]] values = [[1], [2], [3], [4]]
dense_shape = [1, 1, 3, 4, 1] dense_shape = [1, 1, 3, 4, 1]
...@@ -360,7 +373,8 @@ class SubmConv3D(_Conv3D): ...@@ -360,7 +373,8 @@ class SubmConv3D(_Conv3D):
# (1, 1, 3, 4, 1) # (1, 1, 3, 4, 1)
""" """
def __init__(self, def __init__(
self,
in_channels, in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
...@@ -372,8 +386,10 @@ class SubmConv3D(_Conv3D): ...@@ -372,8 +386,10 @@ class SubmConv3D(_Conv3D):
key=None, key=None,
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format="NDHWC"): data_format="NDHWC",
super(SubmConv3D, self).__init__(in_channels, ):
super(SubmConv3D, self).__init__(
in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
stride=stride, stride=stride,
...@@ -385,4 +401,5 @@ class SubmConv3D(_Conv3D): ...@@ -385,4 +401,5 @@ class SubmConv3D(_Conv3D):
padding_mode=padding_mode, padding_mode=padding_mode,
weight_attr=weight_attr, weight_attr=weight_attr,
bias_attr=bias_attr, bias_attr=bias_attr,
data_format=data_format) data_format=data_format,
)
...@@ -83,9 +83,7 @@ class BatchNorm(paddle.nn.BatchNorm1D): ...@@ -83,9 +83,7 @@ class BatchNorm(paddle.nn.BatchNorm1D):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
paddle.seed(123) paddle.seed(123)
channels = 3 channels = 3
x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32') x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32')
...@@ -271,6 +269,8 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): ...@@ -271,6 +269,8 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm):
will create ParamAttr as bias_attr. If the Initializer of the bias_attr will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. If it is set to False, this layer will not is not set, the bias is initialized zero. If it is set to False, this layer will not
have trainable bias parameter. Default: None. have trainable bias parameter. Default: None.
data_format(str, optional): Specify the input data format, may be "NCHW". Default "NCHW".
name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
Shapes: Shapes:
input: Tensor that the dimension from 2 to 5. input: Tensor that the dimension from 2 to 5.
...@@ -283,10 +283,8 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): ...@@ -283,10 +283,8 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm):
# required: gpu # required: gpu
import paddle import paddle
import paddle.sparse.nn as nn import paddle.sparse.nn as nn
import numpy as np
x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') x = paddle.to_tensor([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]], dtype='float32')
x = paddle.to_tensor(x)
x = x.to_sparse_coo(len(x.shape)-1) x = x.to_sparse_coo(len(x.shape)-1)
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
......
...@@ -61,9 +61,7 @@ class MaxPool3D(Layer): ...@@ -61,9 +61,7 @@ class MaxPool3D(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.randn((2, 3, 6, 6, 3)) dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4) sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.sparse.nn.MaxPool3D( max_pool3d = paddle.sparse.nn.MaxPool3D(
...@@ -73,14 +71,16 @@ class MaxPool3D(Layer): ...@@ -73,14 +71,16 @@ class MaxPool3D(Layer):
""" """
def __init__(self, def __init__(
self,
kernel_size, kernel_size,
stride=None, stride=None,
padding=0, padding=0,
return_mask=False, return_mask=False,
ceil_mode=False, ceil_mode=False,
data_format="NDHWC", data_format="NDHWC",
name=None): name=None,
):
super(MaxPool3D, self).__init__() super(MaxPool3D, self).__init__()
self.ksize = kernel_size self.ksize = kernel_size
self.stride = stride self.stride = stride
...@@ -91,14 +91,17 @@ class MaxPool3D(Layer): ...@@ -91,14 +91,17 @@ class MaxPool3D(Layer):
self.name = name self.name = name
def forward(self, x): def forward(self, x):
return F.max_pool3d(x, return F.max_pool3d(
x,
kernel_size=self.ksize, kernel_size=self.ksize,
stride=self.stride, stride=self.stride,
padding=self.padding, padding=self.padding,
ceil_mode=self.ceil_mode, ceil_mode=self.ceil_mode,
data_format=self.data_format, data_format=self.data_format,
name=self.name) name=self.name,
)
def extra_repr(self): def extra_repr(self):
return 'kernel_size={ksize}, stride={stride}, padding={padding}'.format( return 'kernel_size={ksize}, stride={stride}, padding={padding}'.format(
**self.__dict__) **self.__dict__
)
...@@ -15,7 +15,11 @@ ...@@ -15,7 +15,11 @@
import numpy as np import numpy as np
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import dygraph_only, core, convert_np_dtype_to_dtype_ from paddle.fluid.framework import (
dygraph_only,
core,
convert_np_dtype_to_dtype_,
)
__all__ = [] __all__ = []
...@@ -517,12 +521,14 @@ def abs(x, name=None): ...@@ -517,12 +521,14 @@ def abs(x, name=None):
@dygraph_only @dygraph_only
def coalesce(x): def coalesce(x, name=None):
r""" r"""
the coalesced operator include sorted and merge, after coalesced, the indices of x is sorted and unique. the coalesced operator include sorted and merge, after coalesced, the indices of x is sorted and unique.
Parameters: Parameters:
x (Tensor): the input SparseCooTensor. x (Tensor): the input SparseCooTensor.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
Tensor: return the SparseCooTensor after coalesced. Tensor: return the SparseCooTensor after coalesced.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册