From 61953b90517e99c97f1e8c4dbaf81aa46d0c38ea Mon Sep 17 00:00:00 2001 From: zhangkaihuo Date: Tue, 1 Nov 2022 17:08:41 +0800 Subject: [PATCH] [cherry-pick]Fix english documents of sparse api (#47496) Fix english documents of sparse api --- python/paddle/sparse/binary.py | 123 ++++--- python/paddle/sparse/creation.py | 180 +++++----- python/paddle/sparse/nn/functional/conv.py | 307 +++++++++--------- python/paddle/sparse/nn/functional/pooling.py | 60 ++-- python/paddle/sparse/nn/layer/conv.py | 269 ++++++++------- python/paddle/sparse/nn/layer/norm.py | 28 +- python/paddle/sparse/nn/layer/pooling.py | 51 +-- python/paddle/sparse/unary.py | 74 +++-- 8 files changed, 558 insertions(+), 534 deletions(-) diff --git a/python/paddle/sparse/binary.py b/python/paddle/sparse/binary.py index b65e30afd1c..3d2a3af8ec8 100644 --- a/python/paddle/sparse/binary.py +++ b/python/paddle/sparse/binary.py @@ -33,13 +33,13 @@ _int_dtype_ = [ @dygraph_only def matmul(x, y, name=None): """ - Note: + Note: This API is only supported from ``CUDA 11.0`` . - Applies matrix multiplication of two Tensors. - + Applies matrix multiplication of two Tensors. + The supported input/output Tensor layout are as follows: - + Note: x[SparseCsrTensor] @ y[SparseCsrTensor] -> out[SparseCsrTensor] x[SparseCsrTensor] @ y[DenseTensor] -> out[DenseTensor] @@ -49,14 +49,14 @@ def matmul(x, y, name=None): It supports backward propagation. Dimensions `x` and `y` must be >= 2D. Automatic broadcasting of Tensor is not supported. - the shape of `x` should be `[*, M, K]` , and the shape of `y` should be `[*, K, N]` , where `*` + the shape of `x` should be `[*, M, K]` , and the shape of `y` should be `[*, K, N]` , where `*` is zero or more batch dimensions. Args: x (Tensor): The input tensor. It can be SparseCooTensor/SparseCsrTensor. The data type can be float32 or float64. y (Tensor): The input tensor. It can be SparseCooTensor/SparseCsrTensor/DenseTensor. The data type can be float32 or float64. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - + Returns: Tensor: Its layout is determined by that of `x` and `y` . @@ -72,9 +72,9 @@ def matmul(x, y, name=None): cols = [1, 2, 0] values = [1., 2., 3.] csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, [3, 3]) - # Tensor(shape=[3, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True, - # crows=[0, 1, 2, 3], - # cols=[1, 2, 0], + # Tensor(shape=[3, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True, + # crows=[0, 1, 2, 3], + # cols=[1, 2, 0], # values=[1., 2., 3.]) dense = paddle.ones([3, 2]) out = paddle.sparse.matmul(csr, dense) @@ -87,9 +87,9 @@ def matmul(x, y, name=None): indices = [[0, 1, 2], [1, 2, 0]] values = [1., 2., 3.] coo = paddle.sparse.sparse_coo_tensor(indices, values, [3, 3]) - # Tensor(shape=[3, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True, + # Tensor(shape=[3, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True, # indices=[[0, 1, 2], - # [1, 2, 0]], + # [1, 2, 0]], # values=[1., 2., 3.]) dense = paddle.ones([3, 2]) out = paddle.sparse.matmul(coo, dense) @@ -104,13 +104,13 @@ def matmul(x, y, name=None): @dygraph_only def masked_matmul(x, y, mask, name=None): """ - Note: + Note: This API is only supported from ``CUDA 11.3`` . - Applies matrix multiplication of two Dense Tensors. - + Applies matrix multiplication of two Dense Tensors. + The supported input/output Tensor layout are as follows: - + Note: x[DenseTensor] @ y[DenseTensor] * mask[SparseCooTensor] -> out[SparseCooTensor] x[DenseTensor] @ y[DenseTensor] * mask[SparseCsrTensor] -> out[SparseCsrTensor] @@ -153,9 +153,9 @@ def masked_matmul(x, y, mask, name=None): y = paddle.rand([5, 4]) out = paddle.sparse.masked_matmul(x, y, mask) - # Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True, - # crows=[0, 2, 3, 5], - # cols=[1, 3, 2, 0, 1], + # Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True, + # crows=[0, 2, 3, 5], + # cols=[1, 3, 2, 0, 1], # values=[0.98986477, 0.97800624, 1.14591956, 0.68561077, 0.94714981]) """ @@ -165,11 +165,11 @@ def masked_matmul(x, y, mask, name=None): @dygraph_only def mv(x, vec, name=None): """ - Note: + Note: This API is only supported from ``CUDA 11.0`` . - Applies matrix-vector product of Sparse Matrix 'x' and Dense vector 'vec' . - + Applies matrix-vector product of Sparse Matrix 'x' and Dense vector 'vec' . + The supported input/output Tensor layout are as follows: Note: @@ -178,39 +178,39 @@ def mv(x, vec, name=None): It supports backward propagation. - The shape of `x` should be `[M, N]` , and the shape of `y` should be `[N]` , + The shape of `x` should be `[M, N]` , and the shape of `y` should be `[N]` , and the shape of `out` will be `[M]` . Args: x (Tensor): The input 2D tensor. It must be SparseCooTensor/SparseCsrTensor. The data type can be float32 or float64. y (Tensor): The input 1D tensor. It must be DenseTensor vector. The data type can be float32 or float64. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - + Returns: Tensor: 1D Tensor. Examples: .. code-block:: python - + # required: gpu import paddle - from paddle.fluid.framework import _test_eager_guard + from paddle.fluid.framework import _test_eager_guard paddle.seed(100) # csr @ dense -> dense - with _test_eager_guard(): + with _test_eager_guard(): crows = [0, 2, 3, 5] cols = [1, 3, 2, 0, 1] values = [1., 2., 3., 4., 5.] dense_shape = [3, 4] csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape) - # Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True, - # crows=[0, 2, 3, 5], - # cols=[1, 3, 2, 0, 1], + # Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True, + # crows=[0, 2, 3, 5], + # cols=[1, 3, 2, 0, 1], # values=[1., 2., 3., 4., 5.]) vec = paddle.randn([4]) - + out = paddle.sparse.mv(csr, vec) # Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True, # [-3.85499096, -2.42975140, -1.75087738]) @@ -241,17 +241,15 @@ def add(x, y, name=None): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard paddle.device.set_device("cpu") - with _test_eager_guard(): - x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') - y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') - sparse_x = x.to_sparse_csr() - sparse_y = y.to_sparse_csr() - sparse_z = paddle.sparse.add(sparse_x, sparse_y) - print(sparse_z.to_dense()) + x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') + y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') + sparse_x = x.to_sparse_csr() + sparse_y = y.to_sparse_csr() + sparse_z = paddle.sparse.add(sparse_x, sparse_y) + print(sparse_z.to_dense()) # [[ 0., -1., 0., 0.], # [ 0., 2., -6., 0.], @@ -268,10 +266,9 @@ def add(x, y, name=None): inputs = {'x': x, 'y': y} helper = LayerHelper(op_type) out = helper.create_sparse_variable_for_type_inference(x.dtype) - helper.append_op(type=op_type, - inputs=inputs, - outputs={'out': out}, - attrs={}) + helper.append_op( + type=op_type, inputs=inputs, outputs={'out': out}, attrs={} + ) return out @@ -298,17 +295,15 @@ def subtract(x, y, name=None): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard paddle.device.set_device("cpu") - with _test_eager_guard(): - x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') - y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') - sparse_x = x.to_sparse_csr() - sparse_y = y.to_sparse_csr() - sparse_z = paddle.sparse.subtract(sparse_x, sparse_y) - print(sparse_z.to_dense()) + x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') + y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') + sparse_x = x.to_sparse_csr() + sparse_y = y.to_sparse_csr() + sparse_z = paddle.sparse.subtract(sparse_x, sparse_y) + print(sparse_z.to_dense()) # [[ 0., -1., 0., 4.], # [ 0., -2., 0., 0.], @@ -343,17 +338,15 @@ def multiply(x, y, name=None): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard paddle.device.set_device("cpu") - with _test_eager_guard(): - x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') - y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') - sparse_x = x.to_sparse_csr() - sparse_y = y.to_sparse_csr() - sparse_z = paddle.sparse.multiply(sparse_x, sparse_y) - print(sparse_z.to_dense()) + x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') + y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') + sparse_x = x.to_sparse_csr() + sparse_y = y.to_sparse_csr() + sparse_z = paddle.sparse.multiply(sparse_x, sparse_y) + print(sparse_z.to_dense()) # [[ 0., 0., 0., -4.], # [ 0., 0., 9., 0.], @@ -391,17 +384,15 @@ def divide(x, y, name=None): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard paddle.device.set_device("cpu") - with _test_eager_guard(): - x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') - y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') - sparse_x = x.to_sparse_csr() - sparse_y = y.to_sparse_csr() - sparse_z = paddle.sparse.divide(sparse_x, sparse_y) - print(sparse_z.to_dense()) + x = paddle.to_tensor([[0, -1, 0, 2], [0, 0, -3, 0], [4, 5, 0, 0]], 'float32') + y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32') + sparse_x = x.to_sparse_csr() + sparse_y = y.to_sparse_csr() + sparse_z = paddle.sparse.divide(sparse_x, sparse_y) + print(sparse_z.to_dense()) # [[ nan , -inf. , nan , -1. ], # [ nan , 0. , 1. , nan ], diff --git a/python/paddle/sparse/creation.py b/python/paddle/sparse/creation.py index 844ed1bddf3..1237dff3522 100644 --- a/python/paddle/sparse/creation.py +++ b/python/paddle/sparse/creation.py @@ -17,7 +17,12 @@ from paddle import _C_ops, _legacy_C_ops from paddle.fluid.framework import core, dygraph_only from paddle.fluid.framework import _current_expected_place, _get_paddle_place from paddle.tensor import to_tensor, max -from paddle.fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype +from paddle.fluid.data_feeder import ( + check_variable_and_dtype, + check_type, + check_dtype, + convert_dtype, +) from paddle import in_dynamic_mode from paddle.fluid.layer_helper import LayerHelper @@ -51,8 +56,8 @@ def _get_place(place): if place is None: place = _current_expected_place() elif not isinstance( - place, - (core.Place, core.CPUPlace, core.CUDAPinnedPlace, core.CUDAPlace)): + place, (core.Place, core.CPUPlace, core.CUDAPinnedPlace, core.CUDAPlace) + ): raise ValueError( "'place' must be any of paddle.Place, paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace" ) @@ -66,14 +71,11 @@ def _check_indices_dtype(dtype): ) -def sparse_coo_tensor(indices, - values, - shape=None, - dtype=None, - place=None, - stop_gradient=True): +def sparse_coo_tensor( + indices, values, shape=None, dtype=None, place=None, stop_gradient=True +): r""" - Constructs a sparse ``paddle.Tensor`` in coordinate format according to the indices + Constructs a sparse ``paddle.Tensor`` in coordinate format according to the indices and values of the specified non-zero elements. Args: @@ -82,15 +84,15 @@ def sparse_coo_tensor(indices, values(list|tuple|ndarray|Tensor): Initial values for the tensor. Can be a scalar, list, tuple, numpy\.ndarray, paddle\.Tensor. shape(list|tuple, optional): The shape of the sparse tensor also represents the shape of - original dense tensor. If not provided the smallest shape will be inferred to + original dense tensor. If not provided the smallest shape will be inferred to hold all elements. - dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be 'bool' , 'float16' , + dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be 'bool' , 'float16' , 'float32' , 'float64' , 'int8' , 'int16' , 'int32' , 'int64' , 'uint8', - 'complex64' , 'complex128'. Default: None, infers dtype from ``data`` + 'complex64' , 'complex128'. Default: None, infers dtype from ``data`` except for python float number which gets dtype from ``get_default_type`` . - place(CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional): The place to allocate Tensor. Can be - CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. If ``place`` is - string, It can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where ``x`` is the index of the GPUs. + place(CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional): The place to allocate Tensor. Can be + CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. If ``place`` is + string, It can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where ``x`` is the index of the GPUs. stop_gradient(bool, optional): Whether to block the gradient propagation of Autograd. Default: True. Returns: @@ -98,37 +100,34 @@ def sparse_coo_tensor(indices, Raises: TypeError: If the data type of ``values`` is not list, tuple, numpy.ndarray, paddle.Tensor - ValueError: If ``values`` is tuple|list, it can't contain nested tuple|list with different lengths , such as: [[1, 2], [3, 4, 5]]. If the ``indices`` is not a 2-D. + ValueError: If ``values`` is tuple|list, it can't contain nested tuple|list with different lengths , such as: [[1, 2], [3, 4, 5]]. If the ``indices`` is not a 2-D. TypeError: If ``dtype`` is not bool, float16, float32, float64, int8, int16, int32, int64, uint8, complex64, complex128 - ValueError: If ``place`` is not paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace or specified pattern string. + ValueError: If ``place`` is not paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace or specified pattern string. Examples: .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - - with _test_eager_guard(): - indices = [[0, 1, 2], [1, 2, 0]] - values = [1.0, 2.0, 3.0] - dense_shape = [3, 3] - coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape) - # print(coo) - # Tensor(shape=[2, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True, - # indices=[[0, 1, 2], - # [1, 2, 0]], - # values=[1., 2., 3.]) + + indices = [[0, 1, 2], [1, 2, 0]] + values = [1.0, 2.0, 3.0] + dense_shape = [3, 3] + coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape) + # print(coo) + # Tensor(shape=[2, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True, + # indices=[[0, 1, 2], + # [1, 2, 0]], + # values=[1., 2., 3.]) """ if in_dynamic_mode(): place = _get_place(place) if not isinstance(indices, core.eager.Tensor): - indices = to_tensor(indices, - dtype=None, - place=place, - stop_gradient=True) + indices = to_tensor( + indices, dtype=None, place=place, stop_gradient=True + ) if not isinstance(values, core.eager.Tensor): values = to_tensor(values, dtype, place, stop_gradient) if len(indices.shape) != 2: @@ -141,8 +140,10 @@ def sparse_coo_tensor(indices, if nnz != values.shape[0]: raise ValueError( - "the indices and values must have same number of non-zero, but get {} and {}" - .format(nnz, values.shape[0])) + "the indices and values must have same number of non-zero, but get {} and {}".format( + nnz, values.shape[0] + ) + ) dense_dim = len(values.shape) - 1 @@ -162,11 +163,15 @@ def sparse_coo_tensor(indices, if shape < min_shape: raise ValueError( "the minimun shape required is {}, but get {}".format( - min_shape, shape)) + min_shape, shape + ) + ) if len(shape) != sparse_dim + dense_dim: raise ValueError( - "the number of dimensions(len(shape) must be sparse_dim({}) + dense_dim({}), but get {}" - .format(sparse_dim, dense_dim, len(shape))) + "the number of dimensions(len(shape) must be sparse_dim({}) + dense_dim({}), but get {}".format( + sparse_dim, dense_dim, len(shape) + ) + ) return _C_ops.sparse_sparse_coo_tensor(values, indices, shape) @@ -178,45 +183,40 @@ def sparse_coo_tensor(indices, attrs = {'dense_shape': shape} helper = LayerHelper(op_type) out = helper.create_sparse_variable_for_type_inference(dtype) - helper.append_op(type=op_type, - inputs=inputs, - outputs={'out': out}, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs={'out': out}, attrs=attrs + ) return out -#TODO: need to support shape is None +# TODO: need to support shape is None @dygraph_only -def sparse_csr_tensor(crows, - cols, - values, - shape, - dtype=None, - place=None, - stop_gradient=True): +def sparse_csr_tensor( + crows, cols, values, shape, dtype=None, place=None, stop_gradient=True +): r""" - Constructs a sparse ``paddle.Tensor`` in CSR(Compressed Sparse Row) format according to the + Constructs a sparse ``paddle.Tensor`` in CSR(Compressed Sparse Row) format according to the ``crows``, ``cols`` and ``values``. Currently, the crows and cols of each batch must be incrementd. Args: - crows(list|tuple|ndarray|Tensor): 1-D array, each element in the rows represents the - starting position of the first non-zero element of each row in values. - Can be a list, tuple, numpy\.ndarray, paddle\.Tensor. + crows(list|tuple|ndarray|Tensor): 1-D array, each element in the rows represents the + starting position of the first non-zero element of each row in values. + Can be a list, tuple, numpy\.ndarray, paddle\.Tensor. cols(list|tuple|ndarray|Tensor): 1-D array, the column of non-zero elements. - Can be a list, tuple, numpy\.ndarray, paddle\.Tensor. + Can be a list, tuple, numpy\.ndarray, paddle\.Tensor. values(list|tuple|ndarray|Tensor): 1-D array, the non-zero elements. Can be a scalar, list, tuple, numpy\.ndarray, paddle\.Tensor. shape(list|tuple, optional): The shape of the sparse tensor also represents the shape of - original dense tensor. + original dense tensor. hold all elements. - dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be 'bool' , 'float16' , + dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be 'bool' , 'float16' , 'float32' , 'float64' , 'int8' , 'int16' , 'int32' , 'int64' , 'uint8', - 'complex64' , 'complex128'. Default: None, infers dtype from ``data`` + 'complex64' , 'complex128'. Default: None, infers dtype from ``data`` except for python float number which gets dtype from ``get_default_type`` . - place(CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional): The place to allocate Tensor. Can be - CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. If ``place`` is - string, It can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where ``x`` is the index of the GPUs. + place(CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional): The place to allocate Tensor. Can be + CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. If ``place`` is + string, It can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where ``x`` is the index of the GPUs. stop_gradient(bool, optional): Whether to block the gradient propagation of Autograd. Default: True. Returns: @@ -224,28 +224,26 @@ def sparse_csr_tensor(crows, Raises: TypeError: If the data type of ``values`` is not list, tuple, numpy.ndarray, paddle.Tensor - ValueError: If ``values`` is tuple|list, it can't contain nested tuple|list with different lengths , such as: [[1, 2], [3, 4, 5]]. If the ``crow``, ``cols`` and ``values`` is not a 2-D. + ValueError: If ``values`` is tuple|list, it can't contain nested tuple|list with different lengths , such as: [[1, 2], [3, 4, 5]]. If the ``crow``, ``cols`` and ``values`` is not a 2-D. TypeError: If ``dtype`` is not bool, float16, float32, float64, int8, int16, int32, int64, uint8, complex64, complex128 - ValueError: If ``place`` is not paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace or specified pattern string. + ValueError: If ``place`` is not paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace or specified pattern string. Examples: .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - - with _test_eager_guard(): - crows = [0, 2, 3, 5] - cols = [1, 3, 2, 0, 1] - values = [1, 2, 3, 4, 5] - dense_shape = [3, 4] - csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape) - # print(csr) - # Tensor(shape=[3, 4], dtype=paddle.int64, place=Place(gpu:0), stop_gradient=True, - # crows=[0, 2, 3, 5], - # cols=[1, 3, 2, 0, 1], - # values=[1, 2, 3, 4, 5]) + + crows = [0, 2, 3, 5] + cols = [1, 3, 2, 0, 1] + values = [1, 2, 3, 4, 5] + dense_shape = [3, 4] + csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape) + # print(csr) + # Tensor(shape=[3, 4], dtype=paddle.int64, place=Place(gpu:0), stop_gradient=True, + # crows=[0, 2, 3, 5], + # cols=[1, 3, 2, 0, 1], + # values=[1, 2, 3, 4, 5]) """ place = _get_place(place) @@ -262,8 +260,10 @@ def sparse_csr_tensor(crows, if len(shape) != 2 and len(shape) != 3: raise ValueError( - "SparseCsrTensor only support 2-D or 3-D matrix. but get shape {}". - format(shape)) + "SparseCsrTensor only support 2-D or 3-D matrix. but get shape {}".format( + shape + ) + ) rows = shape[len(shape) - 2] if not crows.place._equals(place): @@ -280,26 +280,32 @@ def sparse_csr_tensor(crows, if len(crows.shape) != 1 or len(cols.shape) != 1 or len(values.shape) != 1: raise ValueError("The 'crows', 'cols' and 'values' must be 1-D.") - if (len(cols) != len(values)): + if len(cols) != len(values): raise ValueError("the length of cols must be same as length of values") if len(shape) == 2: if crows.shape[0] != rows + 1: raise ValueError( - "The length({}) of crows must be equal to the rows({})+1 of matrix." - .format(crows.shape[0], rows)) + "The length({}) of crows must be equal to the rows({})+1 of matrix.".format( + crows.shape[0], rows + ) + ) if crows[0] != 0: raise ValueError("the 0th value of crows must be 0") if crows[-1] != values.shape[0]: raise ValueError( - "the last value of crows must be equal the number of non-zero") + "the last value of crows must be equal the number of non-zero" + ) else: if crows.shape[0] % (rows + 1) != 0: raise ValueError( - "The length({}) of crows must be divisible the rows({})+1 of matrix." - .format(crows.shape[0], rows)) + "The length({}) of crows must be divisible the rows({})+1 of matrix.".format( + crows.shape[0], rows + ) + ) # TODO(zkh2016): check whether the value in crows and cols is legal - return core.eager.sparse_csr_tensor(crows, cols, values, shape, - stop_gradient) + return core.eager.sparse_csr_tensor( + crows, cols, values, shape, stop_gradient + ) diff --git a/python/paddle/sparse/nn/functional/conv.py b/python/paddle/sparse/nn/functional/conv.py index c1e1688eecf..f9d16f46839 100644 --- a/python/paddle/sparse/nn/functional/conv.py +++ b/python/paddle/sparse/nn/functional/conv.py @@ -23,50 +23,66 @@ from paddle.nn.functional.conv import _update_padding_nd from paddle.fluid.layer_helper import LayerHelper -def _conv3d(x, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - subm=False, - key=None, - data_format="NDHWC", - name=None): +def _conv3d( + x, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + subm=False, + key=None, + data_format="NDHWC", + name=None, +): assert groups == 1, "Currently, only support groups=1" dims = 3 # Currently, only support 'NDHWC' if data_format not in ["NDHWC"]: - raise ValueError("Attr(data_format) should be 'NDHWC'. Received " - "Attr(data_format): {}.".format(data_format)) + raise ValueError( + "Attr(data_format) should be 'NDHWC'. Received " + "Attr(data_format): {}.".format(data_format) + ) if len(x.shape) != 5: raise ValueError( - "Input x should be 5D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 5D tensor, but received x with the shape of {}".format( + x.shape + ) + ) - channel_last = (data_format == "NDHWC") + channel_last = data_format == "NDHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 5: raise ValueError( - "Input x should be 5D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 5D tensor, but received x with the shape of {}".format( + x.shape + ) + ) num_channels = x.shape[channel_dim] if num_channels < 0: raise ValueError( "The channel dimension of the input({}) should be defined. " - "Received: {}.".format(x.shape, num_channels)) + "Received: {}.".format(x.shape, num_channels) + ) padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims) stride = convert_to_list(stride, dims, 'stride') dilation = convert_to_list(dilation, dims, 'dilation') if in_dynamic_mode(): - pre_bias = _C_ops.sparse_conv3d(x, weight, padding, dilation, stride, - groups, subm, - key if key is not None else "") + pre_bias = _C_ops.sparse_conv3d( + x, + weight, + padding, + dilation, + stride, + groups, + subm, + key if key is not None else "", + ) if bias is not None: return add(pre_bias, bias) else: @@ -79,44 +95,47 @@ def _conv3d(x, 'strides': stride, 'groups': groups, 'subm': subm, - 'key': key + 'key': key, } op_type = 'sparse_conv3d' helper = LayerHelper(op_type, **locals()) - rulebook = helper.create_variable_for_type_inference(dtype='int32', - stop_gradient=True) - counter = helper.create_variable_for_type_inference(dtype='int32', - stop_gradient=True) + rulebook = helper.create_variable_for_type_inference( + dtype='int32', stop_gradient=True + ) + counter = helper.create_variable_for_type_inference( + dtype='int32', stop_gradient=True + ) pre_bias = helper.create_sparse_variable_for_type_inference(x.dtype) outputs = {"out": pre_bias, "rulebook": rulebook, "counter": counter} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) if bias is not None: return add(pre_bias, bias) else: return pre_bias -def conv3d(x, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - data_format="NDHWC", - name=None): +def conv3d( + x, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + data_format="NDHWC", + name=None, +): r""" The sparse convolution3d functional calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input(Input) and - Output(Output) are multidimensional SparseCooTensors with a shape of + Output(Output) are multidimensional SparseCooTensors with a shape of :math:`[N, D, H, W, C]` . Where N is batch size, C is the number of channels, D is the depth of the feature, H is the height of the feature, - and W is the width of the feature. If bias attribution is provided, - bias is added to the output of the convolution. + and W is the width of the feature. If bias attribution is provided, + bias is added to the output of the convolution. For each input :math:`X`, the equation is: @@ -132,36 +151,17 @@ def conv3d(x, * :math:`b`: Bias value, a 1-D tensor with shape [M]. * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. - Example: - - - Input: - - Input shape: :math:`(N, D_{in}, H_{in}, W_{in}, C_{in})` - - Filter shape: :math:`(D_f, H_f, W_f, C_{in}, C_{out})` - - - Output: - Output shape: :math:`(N, D_{out}, H_{out}, W_{out}, C_{out})` - - Where - - .. math:: - - D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\ - H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\ - W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1 - Args: - x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data + x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data type of input is float16 or float32 or float64. weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M], where M is the number of filters(output channels), g is the number of groups, kD, kH, kW are the filter's depth, height and width respectively. - bias (Tensor, optional): The bias, a Tensor of shape [M, ], currently, only support bias is None. - stride (int|list|tuple): The stride size. It means the stride in convolution. If stride is a - list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width). + bias (Tensor, optional): The bias, a Tensor of shape [M]. + stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a + list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width). Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1. - padding (string|int|list|tuple): The padding size. It means the number of zero-paddings + padding (string|int|list|tuple, optional): The padding size. It means the number of zero-paddings on both sides for each dimension. If `padding` is a string, either 'VALID' or 'SAME' which is the padding algorithm. If padding size is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or @@ -171,67 +171,78 @@ def conv3d(x, when `data_format` is `"NDHWC"`, `padding` can be in the form `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. Default: padding = 0. - dilation (int|list|tuple): The dilation size. It means the spacing between the kernel points. + dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points. If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height, - dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation. + dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation. Default: dilation = 1. - groups (int): The groups number of the Conv3D Layer. According to grouped + groups (int, optional): The groups number of the Conv3D Layer. According to grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Default: groups=1. Currently, only support groups=1. - data_format (str, optional): Specify the data format of the input, and the data format of the output + data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`. The default is `"NDHWC"`. When it is `"NDHWC"`, the data is stored in the order of: `[batch_size, input_depth, input_height, input_width, input_channels]`. - name(str|None): For detailed information, please refer - to :ref:`api_guide_Name`. Usually name is no need to set and + name(str, optional): For detailed information, please refer + to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: - A SparseCooTensor representing the conv3d, whose data type is the same with input. + A SparseCooTensor representing the conv3d, whose data type is the same with input. Examples: .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - - with _test_eager_guard(): - indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] - values = [[1], [2], [3], [4]] - indices = paddle.to_tensor(indices, dtype='int32') - values = paddle.to_tensor(values, dtype='float32') - dense_shape = [1, 1, 3, 4, 1] - sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True) - weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32') - y = paddle.sparse.nn.functional.conv3d(sparse_x, weight) - print(y.shape) - # (1, 1, 1, 2, 1) + + indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] + values = [[1], [2], [3], [4]] + indices = paddle.to_tensor(indices, dtype='int32') + values = paddle.to_tensor(values, dtype='float32') + dense_shape = [1, 1, 3, 4, 1] + sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True) + weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32') + y = paddle.sparse.nn.functional.conv3d(sparse_x, weight) + print(y.shape) + # (1, 1, 1, 2, 1) """ - return _conv3d(x, weight, bias, stride, padding, dilation, groups, False, - None, data_format, name) - - -def subm_conv3d(x, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - data_format="NDHWC", - key=None, - name=None): + return _conv3d( + x, + weight, + bias, + stride, + padding, + dilation, + groups, + False, + None, + data_format, + name, + ) + + +def subm_conv3d( + x, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + data_format="NDHWC", + key=None, + name=None, +): r""" The sparse submanifold convolution3d functional calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input(Input) and - Output(Output) are multidimensional SparseCooTensors with a shape of + Output(Output) are multidimensional SparseCooTensors with a shape of :math:`[N, D, H, W, C]` . Where N is batch size, C is the number of channels, D is the depth of the feature, H is the height of the feature, - and W is the width of the feature. If bias attribution is provided, - bias is added to the output of the convolution. + and W is the width of the feature. If bias attribution is provided, + bias is added to the output of the convolution. For each input :math:`X`, the equation is: @@ -247,36 +258,17 @@ def subm_conv3d(x, * :math:`b`: Bias value, a 1-D tensor with shape [M]. * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. - Example: - - - Input: - - Input shape: :math:`(N, D_{in}, H_{in}, W_{in}, C_{in})` - - Filter shape: :math:`(D_f, H_f, W_f, C_{in}, C_{out})` - - - Output: - Output shape: :math:`(N, D_{out}, H_{out}, W_{out}, C_{out})` - - Where - - .. math:: - - D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\ - H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\ - W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1 - Args: - x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data + x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data type of input is float16 or float32 or float64. weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M], where M is the number of filters(output channels), g is the number of groups, kD, kH, kW are the filter's depth, height and width respectively. - bias (Tensor, optional): The bias, a Tensor of shape [M, ], currently, only support bias is None. - stride (int|list|tuple): The stride size. It means the stride in convolution. If stride is a - list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width). + bias (Tensor, optional): The bias, a Tensor of shape [M]. + stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a + list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width). Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1. - padding (string|int|list|tuple): The padding size. It means the number of zero-paddings + padding (string|int|list|tuple): The padding size. It means the number of zero-paddings on both sides for each dimension. If `padding` is a string, either 'VALID' or 'SAME' which is the padding algorithm. If padding size is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or @@ -286,48 +278,57 @@ def subm_conv3d(x, when `data_format` is `"NHWC"`, `padding` can be in the form `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. Default: padding = 0. - dilation (int|list|tuple): The dilation size. It means the spacing between the kernel points. + dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points. If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height, - dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation. + dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation. Default: dilation = 1. - groups (int): The groups number of the Conv3D Layer. According to grouped + groups (int, optional): The groups number of the Conv3D Layer. According to grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Currently, only support groups=1. - data_format (str, optional): Specify the data format of the input, and the data format of the output + data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`. The default is `"NDHWC"`. When it is `"NDHWC"`, the data is stored in the order of: `[batch_size, input_depth, input_height, input_width, input_channels]`. - key(str, optional): the key is used to save or use the same rulebook, + key(str, optional): the key is used to save or use the same rulebook, the definition and role of rulebook refers to - https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The + https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The default value is None. - name(str|None): For detailed information, please refer - to :ref:`api_guide_Name`. Usually name is no need to set and + name(str, optional): For detailed information, please refer + to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: - A SparseCooTensor representing the conv3d, whose data type is - the same with input. + A SparseCooTensor representing the conv3d, whose data type is + the same with input. Examples: .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - - with _test_eager_guard(): - indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] - values = [[1], [2], [3], [4]] - indices = paddle.to_tensor(indices, dtype='int32') - values = paddle.to_tensor(values, dtype='float32') - dense_shape = [1, 1, 3, 4, 1] - sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True) - weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32') - y = paddle.sparse.nn.functional.subm_conv3d(sparse_x, weight) - print(y.shape) - #(1, 1, 3, 4, 1) + + indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] + values = [[1], [2], [3], [4]] + indices = paddle.to_tensor(indices, dtype='int32') + values = paddle.to_tensor(values, dtype='float32') + dense_shape = [1, 1, 3, 4, 1] + sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True) + weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32') + y = paddle.sparse.nn.functional.subm_conv3d(sparse_x, weight) + print(y.shape) + #(1, 1, 3, 4, 1) """ - return _conv3d(x, weight, bias, stride, padding, dilation, groups, True, - key, data_format, name) + return _conv3d( + x, + weight, + bias, + stride, + padding, + dilation, + groups, + True, + key, + data_format, + name, + ) diff --git a/python/paddle/sparse/nn/functional/pooling.py b/python/paddle/sparse/nn/functional/pooling.py index 6ff39014610..615a27d3df9 100644 --- a/python/paddle/sparse/nn/functional/pooling.py +++ b/python/paddle/sparse/nn/functional/pooling.py @@ -19,13 +19,15 @@ from paddle.nn.functional.pooling import _update_padding_nd __all__ = [] -def max_pool3d(x, - kernel_size, - stride=None, - padding=0, - ceil_mode=False, - data_format="NDHWC", - name=None): +def max_pool3d( + x, + kernel_size, + stride=None, + padding=0, + ceil_mode=False, + data_format="NDHWC", + name=None, +): """ Implements sparse max pooling 3d operation. See more details in :ref:`api_sparse_pooling_MaxPool3d` . @@ -37,47 +39,48 @@ def max_pool3d(x, is a tuple or list, it must contain three integers, (kernel_size_Depth, kernel_size_Height, kernel_size_Width). Otherwise, the pool kernel size will be the cube of an int. - stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list, + stride (int|list|tuple, optional): The pool stride size. If pool stride size is a tuple or list, it must contain three integers, [stride_Depth, stride_Height, stride_Width). Otherwise, the pool stride size will be a cube of an int. - padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms. + padding (string|int|list|tuple, optional): The padding size. Padding could be in one of the following forms. 1. A string in ['valid', 'same']. 2. An int, which means the feature map is zero padded by size of `padding` on every sides. 3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension. 4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side. 5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0). The default value is 0. - ceil_mode (bool): ${ceil_mode_comment} - data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`. + ceil_mode (bool, optional): ${ceil_mode_comment} + data_format (string, optional): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`. The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. Currently only support `"NDHWC"` . name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - + Returns: Tensor: The output tensor of pooling result. The data type is same as input tensor. - + Examples: .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - with _test_eager_guard(): - dense_x = paddle.randn((1, 4, 4, 4, 3)) - sparse_x = dense_x.to_sparse_coo(4) - kernel_sizes = [3, 3, 3] - paddings = [0, 0, 0] - strides = [1, 1, 1] - out = paddle.sparse.nn.functional.max_pool3d(sparse_x, kernel_sizes, stride=strides, padding=paddings) - #[1, 2, 2, 2, 3] + dense_x = paddle.randn((1, 4, 4, 4, 3)) + sparse_x = dense_x.to_sparse_coo(4) + kernel_sizes = [3, 3, 3] + paddings = [0, 0, 0] + strides = [1, 1, 1] + out = paddle.sparse.nn.functional.max_pool3d(sparse_x, kernel_sizes, stride=strides, padding=paddings) + #[1, 2, 2, 2, 3] """ assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode" - assert x.is_sparse_coo( + assert ( + x.is_sparse_coo() ), "Currently, sparse.relu only support the input of SparseCooTensor" - assert data_format == 'NDHWC', "Currently, sparse.max_pool3d only support data format of 'NDHWC'" + assert ( + data_format == 'NDHWC' + ), "Currently, sparse.max_pool3d only support data format of 'NDHWC'" kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') if stride is None: @@ -87,12 +90,11 @@ def max_pool3d(x, channel_last = True - padding, padding_algorithm = _update_padding_nd(padding, - 3, - channel_last=channel_last, - ceil_mode=ceil_mode) + padding, padding_algorithm = _update_padding_nd( + padding, 3, channel_last=channel_last, ceil_mode=ceil_mode + ) - #TODO(zkh2016): remove the dependency on dilation from the backend + # TODO(zkh2016): remove the dependency on dilation from the backend dilation = [1, 1, 1] return _C_ops.sparse_maxpool(x, kernel_size, padding, dilation, stride) diff --git a/python/paddle/sparse/nn/layer/conv.py b/python/paddle/sparse/nn/layer/conv.py index cafa4a9252b..faf0f86f2d1 100644 --- a/python/paddle/sparse/nn/layer/conv.py +++ b/python/paddle/sparse/nn/layer/conv.py @@ -23,23 +23,26 @@ __all__ = [] class _Conv3D(Layer): - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - subm=False, - key=None, - padding_mode='zeros', - weight_attr=None, - bias_attr=None, - data_format="NDHWC"): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + subm=False, + key=None, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format="NDHWC", + ): super(_Conv3D, self).__init__() - assert weight_attr is not False, "weight_attr should not be False in Conv." + assert ( + weight_attr is not False + ), "weight_attr should not be False in Conv." self._param_attr = weight_attr self._bias_attr = bias_attr self._groups = groups @@ -49,56 +52,66 @@ class _Conv3D(Layer): self._subm = subm self._key = key - assert padding_mode == 'zeros', "Currently, only support padding_mode='zeros'" + assert ( + padding_mode == 'zeros' + ), "Currently, only support padding_mode='zeros'" assert groups == 1, "Currently, only support groups=1" valid_format = {'NDHWC'} if data_format not in valid_format: raise ValueError( - "data_format must be one of {}, but got data_format='{}'". - format(valid_format, data_format)) + "data_format must be one of {}, but got data_format='{}'".format( + valid_format, data_format + ) + ) channel_last = data_format == "NDHWC" dims = 3 self._stride = utils.convert_to_list(stride, dims, 'stride') self._dilation = utils.convert_to_list(dilation, dims, 'dilation') - self._kernel_size = utils.convert_to_list(kernel_size, dims, - 'kernel_size') + self._kernel_size = utils.convert_to_list( + kernel_size, dims, 'kernel_size' + ) self._padding = padding self._padding_mode = padding_mode self._updated_padding, self._padding_algorithm = _update_padding_nd( - padding, channel_last, dims) + padding, channel_last, dims + ) # the sparse conv restricts the shape is [D, H, W, in_channels, out_channels] filter_shape = self._kernel_size + [ - self._in_channels, self._out_channels + self._in_channels, + self._out_channels, ] def _get_default_param_initializer(): filter_elem_num = np.prod(self._kernel_size) * self._in_channels - std = (2.0 / filter_elem_num)**0.5 + std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std) self.weight = self.create_parameter( shape=filter_shape, attr=self._param_attr, - default_initializer=_get_default_param_initializer()) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[self._out_channels], - is_bias=True) + default_initializer=_get_default_param_initializer(), + ) + self.bias = self.create_parameter( + attr=self._bias_attr, shape=[self._out_channels], is_bias=True + ) def forward(self, x): - out = F.conv._conv3d(x, - self.weight, - bias=self.bias, - stride=self._stride, - padding=self._updated_padding, - dilation=self._dilation, - groups=self._groups, - subm=self._subm, - key=self._key, - data_format=self._data_format) + out = F.conv._conv3d( + x, + self.weight, + bias=self.bias, + stride=self._stride, + padding=self._updated_padding, + dilation=self._dilation, + groups=self._groups, + subm=self._subm, + key=self._key, + data_format=self._data_format, + ) return out def extra_repr(self): @@ -122,11 +135,11 @@ class Conv3D(_Conv3D): **Sparse Convlution3d Layer** The Sparse convolution3d layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input(Input) and - Output(Output) are multidimensional SparseCooTensors with a shape of + Output(Output) are multidimensional SparseCooTensors with a shape of :math:`[N, D, H, W, C]` . Where N is batch size, C is the number of channels, D is the depth of the feature, H is the height of the feature, - and W is the width of the feature. If bias attribution is provided, - bias is added to the output of the convolution. + and W is the width of the feature. If bias attribution is provided, + bias is added to the output of the convolution. For each input :math:`X`, the equation is: .. math:: @@ -150,7 +163,7 @@ class Conv3D(_Conv3D): stride_D = stride_H = stride_W = stride. The default value is 1. padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms. 1. a string in ['valid', 'same']. - 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` + 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...]. 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions. 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0). @@ -208,63 +221,65 @@ class Conv3D(_Conv3D): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - - with _test_eager_guard(): - indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] - values = [[1], [2], [3], [4]] - indices = paddle.to_tensor(indices, dtype='int32') - values = paddle.to_tensor(values, dtype='float32') - dense_shape = [1, 1, 3, 4, 1] - sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True) - conv = paddle.sparse.nn.Conv3D(1, 1, (1, 3, 3)) - y = conv(sparse_x) - print(y.shape) - # (1, 1, 1, 2, 1) + + indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] + values = [[1], [2], [3], [4]] + indices = paddle.to_tensor(indices, dtype='int32') + values = paddle.to_tensor(values, dtype='float32') + dense_shape = [1, 1, 3, 4, 1] + sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True) + conv = paddle.sparse.nn.Conv3D(1, 1, (1, 3, 3)) + y = conv(sparse_x) + print(y.shape) + # (1, 1, 1, 2, 1) """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - padding_mode='zeros', - weight_attr=None, - bias_attr=None, - data_format="NDHWC"): - super(Conv3D, self).__init__(in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - subm=False, - key=None, - padding_mode=padding_mode, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format) + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format="NDHWC", + ): + super(Conv3D, self).__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + subm=False, + key=None, + padding_mode=padding_mode, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + ) class SubmConv3D(_Conv3D): r""" - **Sparse Submanifold Convlution3d Layer** - The Sparse submanifold convolution3d layer calculates the output based on the input, filter + **Submanifold Sparse Convlution3d Layer** + The submanifold sparse convolution3d layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input(Input) and - Output(Output) are multidimensional SparseCooTensors with a shape of + Output(Output) are multidimensional SparseCooTensors with a shape of :math:`[N, D, H, W, C]` . Where N is batch size, C is the number of channels, D is the depth of the feature, H is the height of the feature, - and W is the width of the feature. If bias attribution is provided, + and W is the width of the feature. If bias attribution is provided, bias is added to the output of the convolution. For each input :math:`X`, the equation is: .. math:: - Out =(W \ast X + b + Out = W \ast X + b In the above equation: @@ -283,7 +298,7 @@ class SubmConv3D(_Conv3D): stride_D = stride_H = stride_W = stride. The default value is 1. padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms. 1. a string in ['valid', 'same']. - 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` + 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...]. 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions. 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0). @@ -297,7 +312,7 @@ class SubmConv3D(_Conv3D): of the input channels, while the second half of the filters is only connected to the second half of the input channels. The default value is 1. padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Currently only support ``'zeros'``. - key(str, optional): the key is used to save or use the same rulebook, + key(str, optional): the key is used to save or use the same rulebook, the definition and role of rulebook refers to https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The default value is None. @@ -345,44 +360,46 @@ class SubmConv3D(_Conv3D): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - - with _test_eager_guard(): - indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] - values = [[1], [2], [3], [4]] - dense_shape = [1, 1, 3, 4, 1] - indices = paddle.to_tensor(indices, dtype='int32') - values = paddle.to_tensor(values, dtype='float32') - sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True) - subm_conv = paddle.sparse.nn.SubmConv3D(1, 1, (1, 3, 3)) - y = subm_conv(sparse_x) - print(y.shape) - # (1, 1, 3, 4, 1) + + indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] + values = [[1], [2], [3], [4]] + dense_shape = [1, 1, 3, 4, 1] + indices = paddle.to_tensor(indices, dtype='int32') + values = paddle.to_tensor(values, dtype='float32') + sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True) + subm_conv = paddle.sparse.nn.SubmConv3D(1, 1, (1, 3, 3)) + y = subm_conv(sparse_x) + print(y.shape) + # (1, 1, 3, 4, 1) """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - padding_mode='zeros', - key=None, - weight_attr=None, - bias_attr=None, - data_format="NDHWC"): - super(SubmConv3D, self).__init__(in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - subm=True, - key=key, - padding_mode=padding_mode, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format) + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + key=None, + weight_attr=None, + bias_attr=None, + data_format="NDHWC", + ): + super(SubmConv3D, self).__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + subm=True, + key=key, + padding_mode=padding_mode, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + ) diff --git a/python/paddle/sparse/nn/layer/norm.py b/python/paddle/sparse/nn/layer/norm.py index 2b0dba5a591..34ed96f9e43 100644 --- a/python/paddle/sparse/nn/layer/norm.py +++ b/python/paddle/sparse/nn/layer/norm.py @@ -83,18 +83,16 @@ class BatchNorm(paddle.nn.BatchNorm1D): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - - with _test_eager_guard(): - paddle.seed(123) - channels = 3 - x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32') - dense_x = paddle.to_tensor(x_data) - sparse_x = dense_x.to_sparse_coo(4) - batch_norm = paddle.sparse.nn.BatchNorm(channels) - batch_norm_out = batch_norm(sparse_x) - print(batch_norm_out.shape) - # [1, 6, 6, 6, 3] + + paddle.seed(123) + channels = 3 + x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32') + dense_x = paddle.to_tensor(x_data) + sparse_x = dense_x.to_sparse_coo(4) + batch_norm = paddle.sparse.nn.BatchNorm(channels) + batch_norm_out = batch_norm(sparse_x) + print(batch_norm_out.shape) + # [1, 6, 6, 6, 3] """ def __init__( @@ -271,6 +269,8 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): will create ParamAttr as bias_attr. If the Initializer of the bias_attr is not set, the bias is initialized zero. If it is set to False, this layer will not have trainable bias parameter. Default: None. + data_format(str, optional): Specify the input data format, may be "NCHW". Default "NCHW". + name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. Shapes: input: Tensor that the dimension from 2 to 5. @@ -283,10 +283,8 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): # required: gpu import paddle import paddle.sparse.nn as nn - import numpy as np - x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') - x = paddle.to_tensor(x) + x = paddle.to_tensor([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]], dtype='float32') x = x.to_sparse_coo(len(x.shape)-1) if paddle.is_compiled_with_cuda(): diff --git a/python/paddle/sparse/nn/layer/pooling.py b/python/paddle/sparse/nn/layer/pooling.py index ee15bf6f793..340e7e5e1fc 100644 --- a/python/paddle/sparse/nn/layer/pooling.py +++ b/python/paddle/sparse/nn/layer/pooling.py @@ -61,26 +61,26 @@ class MaxPool3D(Layer): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - with _test_eager_guard(): - dense_x = paddle.randn((2, 3, 6, 6, 3)) - sparse_x = dense_x.to_sparse_coo(4) - max_pool3d = paddle.sparse.nn.MaxPool3D( - kernel_size=3, data_format='NDHWC') - out = max_pool3d(sparse_x) - #shape=[2, 1, 2, 2, 3] + dense_x = paddle.randn((2, 3, 6, 6, 3)) + sparse_x = dense_x.to_sparse_coo(4) + max_pool3d = paddle.sparse.nn.MaxPool3D( + kernel_size=3, data_format='NDHWC') + out = max_pool3d(sparse_x) + #shape=[2, 1, 2, 2, 3] """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - return_mask=False, - ceil_mode=False, - data_format="NDHWC", - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + return_mask=False, + ceil_mode=False, + data_format="NDHWC", + name=None, + ): super(MaxPool3D, self).__init__() self.ksize = kernel_size self.stride = stride @@ -91,14 +91,17 @@ class MaxPool3D(Layer): self.name = name def forward(self, x): - return F.max_pool3d(x, - kernel_size=self.ksize, - stride=self.stride, - padding=self.padding, - ceil_mode=self.ceil_mode, - data_format=self.data_format, - name=self.name) + return F.max_pool3d( + x, + kernel_size=self.ksize, + stride=self.stride, + padding=self.padding, + ceil_mode=self.ceil_mode, + data_format=self.data_format, + name=self.name, + ) def extra_repr(self): return 'kernel_size={ksize}, stride={stride}, padding={padding}'.format( - **self.__dict__) + **self.__dict__ + ) diff --git a/python/paddle/sparse/unary.py b/python/paddle/sparse/unary.py index 7e2883bddd4..32825c32b36 100644 --- a/python/paddle/sparse/unary.py +++ b/python/paddle/sparse/unary.py @@ -15,7 +15,11 @@ import numpy as np from paddle import _C_ops, _legacy_C_ops -from paddle.fluid.framework import dygraph_only, core, convert_np_dtype_to_dtype_ +from paddle.fluid.framework import ( + dygraph_only, + core, + convert_np_dtype_to_dtype_, +) __all__ = [] @@ -33,7 +37,7 @@ _int_dtype_ = [ def sin(x, name=None): """ Calculate elementwise sin of SparseTensor, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: out = sin(x) @@ -54,7 +58,7 @@ def sin(x, name=None): dense_x = paddle.to_tensor([-2., 0., 1.]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.sin(sparse_x) - + """ return _C_ops.sparse_sin(x) @@ -63,7 +67,7 @@ def sin(x, name=None): def tan(x, name=None): """ Calculate elementwise tan of SparseTensor, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: out = tan(x) @@ -84,7 +88,7 @@ def tan(x, name=None): dense_x = paddle.to_tensor([-2., 0., 1.]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.tan(sparse_x) - + """ return _C_ops.sparse_tan(x) @@ -93,7 +97,7 @@ def tan(x, name=None): def asin(x, name=None): """ Calculate elementwise asin of SparseTensor, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: out = asin(x) @@ -114,7 +118,7 @@ def asin(x, name=None): dense_x = paddle.to_tensor([-2., 0., 1.]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.asin(sparse_x) - + """ return _C_ops.sparse_asin(x) @@ -154,7 +158,7 @@ def transpose(x, perm, name=None): def atan(x, name=None): """ Calculate elementwise atan of SparseTensor, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: out = atan(x) @@ -175,7 +179,7 @@ def atan(x, name=None): dense_x = paddle.to_tensor([-2., 0., 1.]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.atan(sparse_x) - + """ return _C_ops.sparse_atan(x) @@ -184,7 +188,7 @@ def atan(x, name=None): def sinh(x, name=None): """ Calculate elementwise sinh of SparseTensor, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: out = sinh(x) @@ -205,7 +209,7 @@ def sinh(x, name=None): dense_x = paddle.to_tensor([-2., 0., 1.]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.sinh(sparse_x) - + """ return _C_ops.sparse_sinh(x) @@ -214,7 +218,7 @@ def sinh(x, name=None): def asinh(x, name=None): """ Calculate elementwise asinh of SparseTensor, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: out = asinh(x) @@ -235,7 +239,7 @@ def asinh(x, name=None): dense_x = paddle.to_tensor([-2., 0., 1.]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.asinh(sparse_x) - + """ return _C_ops.sparse_asinh(x) @@ -244,7 +248,7 @@ def asinh(x, name=None): def atanh(x, name=None): """ Calculate elementwise atanh of SparseTensor, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: out = atanh(x) @@ -265,7 +269,7 @@ def atanh(x, name=None): dense_x = paddle.to_tensor([-2., 0., 1.]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.atanh(sparse_x) - + """ return _C_ops.sparse_atanh(x) @@ -274,7 +278,7 @@ def atanh(x, name=None): def tanh(x, name=None): """ Calculate elementwise tanh of SparseTensor, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: out = tanh(x) @@ -291,11 +295,11 @@ def tanh(x, name=None): .. code-block:: python import paddle - + dense_x = paddle.to_tensor([-2., 0., 1.]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.tanh(sparse_x) - + """ return _C_ops.sparse_tanh(x) @@ -304,7 +308,7 @@ def tanh(x, name=None): def square(x, name=None): """ Calculate elementwise square of SparseTensor, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: out = square(x) @@ -321,11 +325,11 @@ def square(x, name=None): .. code-block:: python import paddle - + dense_x = paddle.to_tensor([-2., 0., 1.]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.square(sparse_x) - + """ return _C_ops.sparse_square(x) @@ -334,7 +338,7 @@ def square(x, name=None): def sqrt(x, name=None): """ Calculate elementwise sqrt of SparseTensor, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: out = sqrt(x) @@ -355,7 +359,7 @@ def sqrt(x, name=None): dense_x = paddle.to_tensor([-2., 0., 1.]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.sqrt(sparse_x) - + """ return _C_ops.sparse_sqrt(x) @@ -385,7 +389,7 @@ def log1p(x, name=None): dense_x = paddle.to_tensor([-2, 0, 1], dtype='float32') sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.log1p(sparse_x) - + """ return _C_ops.sparse_log1p(x) @@ -398,7 +402,7 @@ def cast(x, index_dtype=None, value_dtype=None, name=None): Parameters: x (Tensor): The input Sparse Tensor with data type float32, float64. - index_dtype (np.dtype|str, optional): Data type of the index of SparseCooTensor, + index_dtype (np.dtype|str, optional): Data type of the index of SparseCooTensor, or crows/cols of SparseCsrTensor. Can be uint8, int8, int16, int32, int64. value_dtype (np.dtype|str, optional): Data type of the value of SparseCooTensor, SparseCsrTensor. Can be bool, float16, float32, float64, int8, int32, int64, uint8. @@ -416,7 +420,7 @@ def cast(x, index_dtype=None, value_dtype=None, name=None): dense_x = paddle.to_tensor([-2, 0, 1]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.cast(sparse_x, 'int32', 'float64') - + """ if index_dtype and not isinstance(index_dtype, core.VarDesc.VarType): index_dtype = convert_np_dtype_to_dtype_(index_dtype) @@ -451,7 +455,7 @@ def pow(x, factor, name=None): dense_x = paddle.to_tensor([-2, 0, 3], dtype='float32') sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.pow(sparse_x, 2) - + """ return _C_ops.sparse_pow(x, float(factor)) @@ -481,7 +485,7 @@ def neg(x, name=None): dense_x = paddle.to_tensor([-2, 0, 3], dtype='float32') sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.neg(sparse_x) - + """ return _C_ops.sparse_scale(x, -1.0, 0.0, True) @@ -511,18 +515,20 @@ def abs(x, name=None): dense_x = paddle.to_tensor([-2, 0, 3], dtype='float32') sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.abs(sparse_x) - + """ return _C_ops.sparse_abs(x) @dygraph_only -def coalesce(x): +def coalesce(x, name=None): r""" the coalesced operator include sorted and merge, after coalesced, the indices of x is sorted and unique. Parameters: x (Tensor): the input SparseCooTensor. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: return the SparseCooTensor after coalesced. @@ -540,7 +546,7 @@ def coalesce(x): #[[0, 1], [1, 2]] print(sp_x.values()) #[3.0, 3.0] - """ + """ return _C_ops.sparse_coalesce(x) @@ -570,7 +576,7 @@ def rad2deg(x, name=None): dense_x = paddle.to_tensor([3.142, 0., -3.142]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.rad2deg(sparse_x) - + """ if x.dtype in _int_dtype_: x = _C_ops.sparse_cast(x, None, core.VarDesc.VarType.FP32) @@ -582,7 +588,7 @@ def deg2rad(x, name=None): """ Convert each of the elements of input x from degrees to angles in radians, requiring x to be a SparseCooTensor or SparseCsrTensor. - + .. math:: deg2rad(x) = \pi * x / 180 @@ -603,7 +609,7 @@ def deg2rad(x, name=None): dense_x = paddle.to_tensor([-180, 0, 180]) sparse_x = dense_x.to_sparse_coo(1) out = paddle.sparse.deg2rad(sparse_x) - + """ if x.dtype in _int_dtype_: x = _C_ops.sparse_cast(x, None, core.VarDesc.VarType.FP32) -- GitLab