未验证 提交 1431265d 编写于 作者: Z zhangkaihuo 提交者: GitHub

[Sparse] fix doc (#46967)

上级 7aa608a5
......@@ -239,17 +239,15 @@ def add(x, y, name=None):
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.device.set_device("cpu")
with _test_eager_guard():
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.sparse.add(sparse_x, sparse_y)
print(sparse_z.to_dense())
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.sparse.add(sparse_x, sparse_y)
print(sparse_z.to_dense())
# [[ 0., -1., 0., 0.],
# [ 0., 2., -6., 0.],
......@@ -295,17 +293,15 @@ def subtract(x, y, name=None):
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.device.set_device("cpu")
with _test_eager_guard():
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.sparse.subtract(sparse_x, sparse_y)
print(sparse_z.to_dense())
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.sparse.subtract(sparse_x, sparse_y)
print(sparse_z.to_dense())
# [[ 0., -1., 0., 4.],
# [ 0., -2., 0., 0.],
......@@ -340,17 +336,15 @@ def multiply(x, y, name=None):
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.device.set_device("cpu")
with _test_eager_guard():
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.sparse.multiply(sparse_x, sparse_y)
print(sparse_z.to_dense())
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.sparse.multiply(sparse_x, sparse_y)
print(sparse_z.to_dense())
# [[ 0., 0., 0., -4.],
# [ 0., 0., 9., 0.],
......@@ -388,17 +382,15 @@ def divide(x, y, name=None):
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.device.set_device("cpu")
with _test_eager_guard():
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.sparse.divide(sparse_x, sparse_y)
print(sparse_z.to_dense())
x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32')
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.sparse.divide(sparse_x, sparse_y)
print(sparse_z.to_dense())
# [[ nan , -inf. , nan , -1. ],
# [ nan , 0. , 1. , nan ],
......
......@@ -98,18 +98,16 @@ def sparse_coo_tensor(
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 1, 2], [1, 2, 0]]
values = [1.0, 2.0, 3.0]
dense_shape = [3, 3]
coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape)
# print(coo)
# Tensor(shape=[2, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# indices=[[0, 1, 2],
# [1, 2, 0]],
# values=[1., 2., 3.])
indices = [[0, 1, 2], [1, 2, 0]]
values = [1.0, 2.0, 3.0]
dense_shape = [3, 3]
coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape)
# print(coo)
# Tensor(shape=[2, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# indices=[[0, 1, 2],
# [1, 2, 0]],
# values=[1., 2., 3.])
"""
if in_dynamic_mode():
......@@ -218,19 +216,17 @@ def sparse_csr_tensor(
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
# print(csr)
# Tensor(shape=[3, 4], dtype=paddle.int64, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 2, 3, 5],
# cols=[1, 3, 2, 0, 1],
# values=[1, 2, 3, 4, 5])
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
# print(csr)
# Tensor(shape=[3, 4], dtype=paddle.int64, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 2, 3, 5],
# cols=[1, 3, 2, 0, 1],
# values=[1, 2, 3, 4, 5])
"""
place = _get_place(place)
......
......@@ -149,36 +149,17 @@ def conv3d(
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, D_{in}, H_{in}, W_{in}, C_{in})`
Filter shape: :math:`(D_f, H_f, W_f, C_{in}, C_{out})`
- Output:
Output shape: :math:`(N, D_{out}, H_{out}, W_{out}, C_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args:
x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data
type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M],
where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's depth, height and width respectively.
bias (Tensor, optional): The bias, a Tensor of shape [M, ], currently, only support bias is None.
stride (int|list|tuple): The stride size. It means the stride in convolution. If stride is a
bias (Tensor, optional): The bias, a Tensor of shape [M].
stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
padding (string|int|list|tuple, optional): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
......@@ -188,11 +169,11 @@ def conv3d(
when `data_format` is `"NDHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|list|tuple): The dilation size. It means the spacing between the kernel points.
dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv3D Layer. According to grouped
groups (int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
......@@ -201,7 +182,7 @@ def conv3d(
will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NDHWC"`. When it is `"NDHWC"`, the data is stored in the order of:
`[batch_size, input_depth, input_height, input_width, input_channels]`.
name(str|None): For detailed information, please refer
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
......@@ -212,19 +193,17 @@ def conv3d(
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
y = paddle.sparse.nn.functional.conv3d(sparse_x, weight)
print(y.shape)
# (1, 1, 1, 2, 1)
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
y = paddle.sparse.nn.functional.conv3d(sparse_x, weight)
print(y.shape)
# (1, 1, 1, 2, 1)
"""
return _conv3d(
x,
......@@ -277,33 +256,14 @@ def subm_conv3d(
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, D_{in}, H_{in}, W_{in}, C_{in})`
Filter shape: :math:`(D_f, H_f, W_f, C_{in}, C_{out})`
- Output:
Output shape: :math:`(N, D_{out}, H_{out}, W_{out}, C_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args:
x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data
type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M],
where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's depth, height and width respectively.
bias (Tensor, optional): The bias, a Tensor of shape [M, ], currently, only support bias is None.
stride (int|list|tuple): The stride size. It means the stride in convolution. If stride is a
bias (Tensor, optional): The bias, a Tensor of shape [M].
stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
......@@ -316,11 +276,11 @@ def subm_conv3d(
when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|list|tuple): The dilation size. It means the spacing between the kernel points.
dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv3D Layer. According to grouped
groups (int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
......@@ -333,7 +293,7 @@ def subm_conv3d(
the definition and role of rulebook refers to
https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The
default value is None.
name(str|None): For detailed information, please refer
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
......@@ -345,19 +305,17 @@ def subm_conv3d(
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
y = paddle.sparse.nn.functional.subm_conv3d(sparse_x, weight)
print(y.shape)
#(1, 1, 3, 4, 1)
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
y = paddle.sparse.nn.functional.subm_conv3d(sparse_x, weight)
print(y.shape)
#(1, 1, 3, 4, 1)
"""
return _conv3d(
x,
......
......@@ -39,18 +39,18 @@ def max_pool3d(
is a tuple or list, it must contain three integers,
(kernel_size_Depth, kernel_size_Height, kernel_size_Width).
Otherwise, the pool kernel size will be the cube of an int.
stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
stride (int|list|tuple, optional): The pool stride size. If pool stride size is a tuple or list,
it must contain three integers, [stride_Depth, stride_Height, stride_Width).
Otherwise, the pool stride size will be a cube of an int.
padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
padding (string|int|list|tuple, optional): The padding size. Padding could be in one of the following forms.
1. A string in ['valid', 'same'].
2. An int, which means the feature map is zero padded by size of `padding` on every sides.
3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
The default value is 0.
ceil_mode (bool): ${ceil_mode_comment}
data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
ceil_mode (bool, optional): ${ceil_mode_comment}
data_format (string, optional): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`. Currently only support `"NDHWC"` .
name(str, optional): For detailed information, please refer
......@@ -64,16 +64,14 @@ def max_pool3d(
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.randn((1, 4, 4, 4, 3))
sparse_x = dense_x.to_sparse_coo(4)
kernel_sizes = [3, 3, 3]
paddings = [0, 0, 0]
strides = [1, 1, 1]
out = paddle.sparse.nn.functional.max_pool3d(sparse_x, kernel_sizes, stride=strides, padding=paddings)
#[1, 2, 2, 2, 3]
dense_x = paddle.randn((1, 4, 4, 4, 3))
sparse_x = dense_x.to_sparse_coo(4)
kernel_sizes = [3, 3, 3]
paddings = [0, 0, 0]
strides = [1, 1, 1]
out = paddle.sparse.nn.functional.max_pool3d(sparse_x, kernel_sizes, stride=strides, padding=paddings)
#[1, 2, 2, 2, 3]
"""
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
......
......@@ -221,19 +221,17 @@ class Conv3D(_Conv3D):
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
conv = paddle.sparse.nn.Conv3D(1, 1, (1, 3, 3))
y = conv(sparse_x)
print(y.shape)
# (1, 1, 1, 2, 1)
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
conv = paddle.sparse.nn.Conv3D(1, 1, (1, 3, 3))
y = conv(sparse_x)
print(y.shape)
# (1, 1, 1, 2, 1)
"""
def __init__(
......@@ -269,8 +267,8 @@ class Conv3D(_Conv3D):
class SubmConv3D(_Conv3D):
r"""
**Sparse Submanifold Convlution3d Layer**
The Sparse submanifold convolution3d layer calculates the output based on the input, filter
**Submanifold Sparse Convlution3d Layer**
The submanifold sparse convolution3d layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional SparseCooTensors with a shape of
:math:`[N, D, H, W, C]` . Where N is batch size, C is the number of
......@@ -281,7 +279,7 @@ class SubmConv3D(_Conv3D):
.. math::
Out =(W \ast X + b
Out = W \ast X + b
In the above equation:
......@@ -362,19 +360,17 @@ class SubmConv3D(_Conv3D):
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]]
dense_shape = [1, 1, 3, 4, 1]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
subm_conv = paddle.sparse.nn.SubmConv3D(1, 1, (1, 3, 3))
y = subm_conv(sparse_x)
print(y.shape)
# (1, 1, 3, 4, 1)
indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
values = [[1], [2], [3], [4]]
dense_shape = [1, 1, 3, 4, 1]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
subm_conv = paddle.sparse.nn.SubmConv3D(1, 1, (1, 3, 3))
y = subm_conv(sparse_x)
print(y.shape)
# (1, 1, 3, 4, 1)
"""
def __init__(
......
......@@ -83,18 +83,16 @@ class BatchNorm(paddle.nn.BatchNorm1D):
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
paddle.seed(123)
channels = 3
x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32')
dense_x = paddle.to_tensor(x_data)
sparse_x = dense_x.to_sparse_coo(4)
batch_norm = paddle.sparse.nn.BatchNorm(channels)
batch_norm_out = batch_norm(sparse_x)
print(batch_norm_out.shape)
# [1, 6, 6, 6, 3]
paddle.seed(123)
channels = 3
x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32')
dense_x = paddle.to_tensor(x_data)
sparse_x = dense_x.to_sparse_coo(4)
batch_norm = paddle.sparse.nn.BatchNorm(channels)
batch_norm_out = batch_norm(sparse_x)
print(batch_norm_out.shape)
# [1, 6, 6, 6, 3]
"""
def __init__(
......@@ -270,6 +268,8 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm):
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. If it is set to False, this layer will not
have trainable bias parameter. Default: None.
data_format(str, optional): Specify the input data format, may be "NCHW". Default "NCHW".
name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
Shapes:
input: Tensor that the dimension from 2 to 5.
......@@ -282,10 +282,8 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm):
# required: gpu
import paddle
import paddle.sparse.nn as nn
import numpy as np
x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
x = paddle.to_tensor(x)
x = paddle.to_tensor([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]], dtype='float32')
x = x.to_sparse_coo(len(x.shape)-1)
if paddle.is_compiled_with_cuda():
......
......@@ -61,15 +61,13 @@ class MaxPool3D(Layer):
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.sparse.nn.MaxPool3D(
kernel_size=3, data_format='NDHWC')
out = max_pool3d(sparse_x)
#shape=[2, 1, 2, 2, 3]
dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.sparse.nn.MaxPool3D(
kernel_size=3, data_format='NDHWC')
out = max_pool3d(sparse_x)
#shape=[2, 1, 2, 2, 3]
"""
......
......@@ -521,12 +521,14 @@ def abs(x, name=None):
@dygraph_only
def coalesce(x):
def coalesce(x, name=None):
r"""
the coalesced operator include sorted and merge, after coalesced, the indices of x is sorted and unique.
Parameters:
x (Tensor): the input SparseCooTensor.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: return the SparseCooTensor after coalesced.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册