未验证 提交 ade9bc04 编写于 作者: W wawltor 提交者: GitHub

Fix some english doc api, shenze tools group (#20300)

* test=document_fix
Fix english doc api, invloves the op of greater_equal,greater_than,less_equal,not_equal,
rank,rsqrt,diag,linspace,reduce_all,reduce_any,sign,where,zeros_like,unique_with_counts.

* Fix some format problem in the op of sign and greather_than.
test=develop
test=document_fix

* Fix the example of zeros_like, and update api.spec
test=develop
test=document_fix
上级 b347ca08
......@@ -167,8 +167,8 @@ paddle.fluid.layers.reduce_mean (ArgSpec(args=['input', 'dim', 'keep_dim', 'name
paddle.fluid.layers.reduce_max (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '10023caec4d7f78c3b901f023a1feaa7'))
paddle.fluid.layers.reduce_min (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '1a1c91625ce3c32646f69ca10d4d1da7'))
paddle.fluid.layers.reduce_prod (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'b386471f0476c80c61d8c8672278063d'))
paddle.fluid.layers.reduce_all (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '8ab17ab51f68a6e76302b27f928cedf3'))
paddle.fluid.layers.reduce_any (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '0483ac3b7a99e879ccde583ae8d7a60d'))
paddle.fluid.layers.reduce_all (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'e365c540ec448a73e2b2e75796ed029e'))
paddle.fluid.layers.reduce_any (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'fbc9e73da7a2964ba5693864aed36abb'))
paddle.fluid.layers.sequence_first_step (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '227a75392ae194de0504f5c6812dade9'))
paddle.fluid.layers.sequence_last_step (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '34372f58331247749e8b0a1663cf233b'))
paddle.fluid.layers.sequence_slice (ArgSpec(args=['input', 'offset', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '39fbc5437be389f6c0c769f82fc1fba2'))
......@@ -245,7 +245,7 @@ paddle.fluid.layers.pad2d (ArgSpec(args=['input', 'paddings', 'mode', 'pad_value
paddle.fluid.layers.unstack (ArgSpec(args=['x', 'axis', 'num'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'b0c4ca08d4eb295189e1b107c920d093'))
paddle.fluid.layers.sequence_enumerate (ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'b870fed41abd2aecf929ece65f555fa1'))
paddle.fluid.layers.unique (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=('int32',)), ('document', 'cab0b06e5683875f12f0efc62fa230a9'))
paddle.fluid.layers.unique_with_counts (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=('int32',)), ('document', '1cb59c65b41766116944b8ed1e6ad345'))
paddle.fluid.layers.unique_with_counts (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=('int32',)), ('document', '4496682f302007019e458a2f30d8a7c3'))
paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e93a1b102ab64b247c1b774e60d4c0d0'))
paddle.fluid.layers.sequence_concat (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'f47f9d207ac60b6f294087bcb1b64ae8'))
paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '463e4713806e5adaa4d20a41e2218453'))
......@@ -266,7 +266,7 @@ paddle.fluid.layers.sum (ArgSpec(args=['x'], varargs=None, keywords=None, defaul
paddle.fluid.layers.slice (ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None), ('document', '8c622791994a0d657d8c6c9cefa5bf34'))
paddle.fluid.layers.strided_slice (ArgSpec(args=['input', 'axes', 'starts', 'ends', 'strides'], varargs=None, keywords=None, defaults=None), ('document', '340d8d656272ea396b441aab848429a2'))
paddle.fluid.layers.shape (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'bf61c8f79d795a8371bdb3b5468aa82b'))
paddle.fluid.layers.rank (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '096df0e0273145ab80ed119a4c294db3'))
paddle.fluid.layers.rank (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'a4492cf0393c6f70e4e25c681dcd73f4'))
paddle.fluid.layers.size (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'cf2e156beae36378722666c4c33bebfe'))
paddle.fluid.layers.logical_and (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '12db97c6c459c0f240ec7006737174f2'))
paddle.fluid.layers.logical_or (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '15adbc561618b7db69671e02009bea67'))
......@@ -303,8 +303,8 @@ paddle.fluid.layers.npair_loss (ArgSpec(args=['anchor', 'positive', 'labels', 'l
paddle.fluid.layers.pixel_shuffle (ArgSpec(args=['x', 'upscale_factor'], varargs=None, keywords=None, defaults=None), ('document', '7e5cac851fd9bad344230e1044b6a565'))
paddle.fluid.layers.fsp_matrix (ArgSpec(args=['x', 'y'], varargs=None, keywords=None, defaults=None), ('document', '3a4eb7cce366f5fd8bc38b42b6af5ba1'))
paddle.fluid.layers.continuous_value_model (ArgSpec(args=['input', 'cvm', 'use_cvm'], varargs=None, keywords=None, defaults=(True,)), ('document', 'b335b531931cc8b2d19c65980eadfc1e'))
paddle.fluid.layers.where (ArgSpec(args=['condition'], varargs=None, keywords=None, defaults=None), ('document', 'b1e1487760295e1ff55307b880a99e18'))
paddle.fluid.layers.sign (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'b56afe9ae3fc553c95d907fd7ef6c314'))
paddle.fluid.layers.where (ArgSpec(args=['condition'], varargs=None, keywords=None, defaults=None), ('document', '68810eedf448f2cb3abd46518dd46c39'))
paddle.fluid.layers.sign (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '9f19288d9a8dabcfd0bbb4fc032fa521'))
paddle.fluid.layers.deformable_conv (ArgSpec(args=['input', 'offset', 'mask', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'deformable_groups', 'im2col_step', 'param_attr', 'bias_attr', 'modulated', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, None, None, True, None)), ('document', '3e090f9e90b9c24d07348243bf137b56'))
paddle.fluid.layers.unfold (ArgSpec(args=['x', 'kernel_sizes', 'strides', 'paddings', 'dilations', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None)), ('document', '3f884662ad443d9ecc2b3734b4f61ad6'))
paddle.fluid.layers.deformable_roi_pooling (ArgSpec(args=['input', 'rois', 'trans', 'no_trans', 'spatial_scale', 'group_size', 'pooled_height', 'pooled_width', 'part_size', 'sample_per_part', 'trans_std', 'position_sensitive', 'name'], varargs=None, keywords=None, defaults=(False, 1.0, [1, 1], 1, 1, None, 1, 0.1, False, None)), ('document', 'e0e7bf35da2287efb015546f1b8350df'))
......@@ -339,10 +339,10 @@ paddle.fluid.layers.has_inf (ArgSpec(args=['x'], varargs=None, keywords=None, de
paddle.fluid.layers.has_nan (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '129cf426e71452fe8276d616a6dc21ae'))
paddle.fluid.layers.isfinite (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'b9fff4ffc8d11934cde099f4c39bf841'))
paddle.fluid.layers.range (ArgSpec(args=['start', 'end', 'step', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '3e982b788b95f959eafeeb0696a3cbde'))
paddle.fluid.layers.linspace (ArgSpec(args=['start', 'stop', 'num', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '3663d1148946eed4c1c34c81be586b9e'))
paddle.fluid.layers.zeros_like (ArgSpec(args=['x', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd88a23bcdc443719b3953593f7cef14a'))
paddle.fluid.layers.linspace (ArgSpec(args=['start', 'stop', 'num', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '156e653497804566a43f6a53d48b08c4'))
paddle.fluid.layers.zeros_like (ArgSpec(args=['x', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '5432543db3ff898451aa3af6bb38ab56'))
paddle.fluid.layers.ones_like (ArgSpec(args=['x', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd18d42059c6b189cbd3fab2fcb206c15'))
paddle.fluid.layers.diag (ArgSpec(args=['diagonal'], varargs=None, keywords=None, defaults=None), ('document', '88a15e15f0098d549f07a01eaebf9ce3'))
paddle.fluid.layers.diag (ArgSpec(args=['diagonal'], varargs=None, keywords=None, defaults=None), ('document', '10f13aae2f0d3db88353d3ec7db4869b'))
paddle.fluid.layers.eye (ArgSpec(args=['num_rows', 'num_columns', 'batch_shape', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 'float32')), ('document', '60cdc70ae43ba69fae36d720ef3016a1'))
paddle.fluid.layers.While ('paddle.fluid.layers.control_flow.While', ('document', '50110155608a00f43d3d3fd1be41dcb4'))
paddle.fluid.layers.While.__init__ (ArgSpec(args=['self', 'cond', 'is_test', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
......@@ -355,11 +355,11 @@ paddle.fluid.layers.increment (ArgSpec(args=['x', 'value', 'in_place'], varargs=
paddle.fluid.layers.array_write (ArgSpec(args=['x', 'i', 'array'], varargs=None, keywords=None, defaults=(None,)), ('document', '3f913b5069ad40bd85d89b33e4aa5939'))
paddle.fluid.layers.create_array (ArgSpec(args=['dtype'], varargs=None, keywords=None, defaults=None), ('document', '556de793fdf24d515f3fc91260e2c048'))
paddle.fluid.layers.less_than (ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords=None, defaults=(None, None)), ('document', '04af32422c3a3d8f6040aeb406c82768'))
paddle.fluid.layers.less_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '7b6d952a9f6340a044cfb91c16aad842'))
paddle.fluid.layers.greater_than (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '55710e2fafeda70cd1b53d7509712499'))
paddle.fluid.layers.greater_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '14bff27b2be5e60eaa30e41925265beb'))
paddle.fluid.layers.less_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '04e5623dd39b4437b9b08e0ce11071ca'))
paddle.fluid.layers.greater_than (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '135352e24251238122bb7823dd4a49aa'))
paddle.fluid.layers.greater_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '44bdacd11299d72c0a52d2181e7ae6ca'))
paddle.fluid.layers.equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '788aa651e8b9fec79d16931ef3a33e90'))
paddle.fluid.layers.not_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '57adebb8858ffab6be2d86d0522b85dc'))
paddle.fluid.layers.not_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '8b76aaac4ba7cf9111750b9c2c9418cb'))
paddle.fluid.layers.array_read (ArgSpec(args=['array', 'i'], varargs=None, keywords=None, defaults=None), ('document', 'caf0d94349cdc28e1bda3b8a19411ac0'))
paddle.fluid.layers.array_length (ArgSpec(args=['array'], varargs=None, keywords=None, defaults=None), ('document', '6f24a9b872027634ad758ea2826c9727'))
paddle.fluid.layers.IfElse ('paddle.fluid.layers.control_flow.IfElse', ('document', 'a389f88e19c3b332c3afcbf7df4488a5'))
......
......@@ -1042,24 +1042,28 @@ def less_than(x, y, force_cpu=None, cond=None):
@templatedoc()
def less_equal(x, y, cond=None):
"""
This layer returns the truth value of :math:`x <= y` elementwise, which is equivalent to the overloaded operator `<=`.
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
Args:
x(Variable): First operand of *less_equal*
y(Variable): Second operand of *less_equal*
cond(Variable|None): Optional output variable to store the result of *less_equal*
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the input shape and data type of \
this tensor is the same as input :attr:`x`. If is not :attr:`None`, the op will set the variable as output tensor, the input shape \
and data type of this tensor should be the same as input :attr:`x`. Default value is :attr:`None`.
Returns:
Variable: The tensor variable storing the output of *less_equal*.
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([1, 3], dtype='int32'))
limit = fluid.layers.assign(np.array([1, 2], dtype='int32'))
out = fluid.layers.less_equal(x=label, y=limit) #out=[True, False]
out1 = label<= limit #out1=[True, False]
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
limit = fluid.layers.fill_constant(shape=[1], value=1, dtype='int64')
out = fluid.layers.less_equal(x=label, y=limit)
"""
helper = LayerHelper("less_equal", **locals())
if cond is None:
......@@ -1082,24 +1086,27 @@ def less_equal(x, y, cond=None):
@templatedoc()
def greater_than(x, y, cond=None):
"""
This layer returns the truth value of :math:`x > y` elementwise, which is equivalent to the overloaded operator `>`.
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
Args:
x(Variable): First operand of *greater_than*
y(Variable): Second operand of *greater_than*
cond(Variable|None): Optional output variable to store the result of *greater_than*
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the shape and data type of this \
tensor is the same as input :attr:`x` . If is not :attr:`None`, the op will set the variable as output tensor, the shape and data type \
of this tensor should be the same as input :attr:`x` . Default value is :attr:`None`.
Returns:
Variable: The tensor variable storing the output of *greater_than*.
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
limit = fluid.layers.fill_constant(shape=[1], value=1, dtype='int64')
out = fluid.layers.greater_than(x=label, y=limit)
import numpy as np
label = fluid.layers.assign(np.array([2, 3], dtype='int32'))
limit = fluid.layers.assign(np.array([3, 2], dtype='int32'))
out = fluid.layers.greater_than(x=label, y=limit) #out=[False, True]
out1 = label > limit #out1=[False, True]
"""
helper = LayerHelper("greater_than", **locals())
if cond is None:
......@@ -1122,24 +1129,28 @@ def greater_than(x, y, cond=None):
@templatedoc()
def greater_equal(x, y, cond=None):
"""
This layer returns the truth value of :math:`x >= y` elementwise, which is equivalent to the overloaded operator `>=`.
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
Args:
x(Variable): First operand of *greater_equal*
y(Variable): Second operand of *greater_equal*
cond(Variable|None): Optional output variable to store the result of *greater_equal*
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None` , the op will create a variable as output tensor, the shape and data type of this \
tensor is the same as input :attr:`x`. If is not :attr:`None` , the op will set the variable as output tensor, the shape and data \
type of this tensor is the same as input :attr:`x`. Default value is :attr:`None`.
Returns:
Variable: The tensor variable storing the output of *greater_equal*.
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
limit = fluid.layers.fill_constant(shape=[1], value=1, dtype='int64')
out = fluid.layers.greater_equal(x=label, y=limit)
label = fluid.layers.assign(np.array([2, 2], dtype='int32'))
limit = fluid.layers.assign(np.array([2, 3], dtype='int32'))
out = fluid.layers.greater_equal(x=label, y=limit) #out=[True, False]
out_1 = label >= limit #out1=[True, False]
"""
helper = LayerHelper("greater_equal", **locals())
......@@ -1193,15 +1204,17 @@ def equal(x, y, cond=None):
def not_equal(x, y, cond=None):
"""
This layer returns the truth value of :math:`x != y` elementwise, which is equivalent to the overloader operator `!=`.
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
Args:
x(Variable): First operand of *not_equal*
y(Variable): Second operand of *not_equal*
cond(Variable|None): Optional output variable to store the result of *not_equal*
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the shape and data type of this \
tensor is the same as input :attr:`x`. If is not :attr:`None`, the op will set the variable as output tensor, the shape and data \
type of this tensor should be the same as input :attr:`x`. Default value is :attr:`None`.
Returns:
Variable: The tensor variable storing the output of *not_equal*.
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`.
Examples:
.. code-block:: python
......
......@@ -6092,23 +6092,23 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
def reduce_all(input, dim=None, keep_dim=False, name=None):
"""
Computes the ``logical and`` of tensor elements over the given dimension.
This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor.
dim (list|int|None): The dimension along which the logical and is computed.
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true.
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
will be named automatically. The default value is None.
Returns:
Variable: The reduced Tensor variable.
Variable, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
Examples:
.. code-block:: python
......@@ -6126,7 +6126,10 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
out = layers.reduce_all(x) # False
out = layers.reduce_all(x, dim=0) # [True, False]
out = layers.reduce_all(x, dim=-1) # [False, True]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = layers.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
helper = LayerHelper('reduce_all', **locals())
......@@ -6147,23 +6150,22 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
def reduce_any(input, dim=None, keep_dim=False, name=None):
"""
Computes the ``logical or`` of tensor elements over the given dimension.
This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor.
dim (list|int|None): The dimension along which the logical or is computed.
If :attr:`None`, compute the logical or over all elements of
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true.
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The reduced Tensor variable.
Variable, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
Examples:
.. code-block:: python
......@@ -6181,8 +6183,11 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
out = layers.reduce_any(x) # True
out = layers.reduce_any(x, dim=0) # [True, False]
out = layers.reduce_any(x, dim=-1) # [True, False]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = layers.reduce_any(x, dim=1,
keep_dim=True) # [[True], [False]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
helper = LayerHelper('reduce_any', **locals())
......@@ -12535,23 +12540,21 @@ def shape(input):
def rank(input):
"""
**Rank Layer**
Returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.
The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.
Args:
input (Variable): The input variable.
input (Variable): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.
Returns:
Variable: The rank of the input variable.
Variable, the output data type is int32.: The 0-D tensor with the dimensions of the input variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[3, 100, 100], dtype="float32")
rank = fluid.layers.rank(input) # 4
input = fluid.data(name="input", shape=[3, 100, 100], dtype="float32")
rank = fluid.layers.rank(input) # rank=(3,)
"""
ndims = len(input.shape)
......@@ -15203,14 +15206,11 @@ def where(condition):
"""
Return an int64 tensor with rank 2, specifying the coordinate of true element in `condition`.
Output's first dimension is the number of true element, second dimension is rank(number of dimension) of `condition`.
If there is zero true element, then an empty tensor will be generated.
Args:
condition(Variable): A bool tensor with rank at least 1.
condition(Variable): A bool tensor with rank at least 1, the data type is bool.
Returns:
Variable: The tensor variable storing a 2-D tensor.
Variable, the output data type is int64. : The tensor variable storing a 2-D tensor, which involves all coordinate.
Examples:
.. code-block:: python
......@@ -15247,15 +15247,14 @@ def where(condition):
def sign(x):
"""
**sign**
This function returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
Args:
x(Variable|numpy.ndarray): The input tensor.
x(Variable|numpy.ndarray): The input variable could be N-D tensor or N-D numpy array, \
the input data type is float32 or float64.
Returns:
Variable: The output sign tensor with identical shape and dtype to `x`.
Variable, the output data type is the same as input data type. : The output sign tensor with identical shape to input :attr:`x`.
Examples:
.. code-block:: python
......@@ -15263,9 +15262,8 @@ def sign(x):
import paddle.fluid as fluid
import numpy as np
# [1, 0, -1]
data = fluid.layers.sign(np.array([3, 0, -2], dtype='float32'))
# [1.0, 0.0, -1.0]
data = fluid.layers.sign(np.array([3.0, 0.0, -2.0], dtype='float32'))
"""
helper = LayerHelper("sign", **locals())
......@@ -15331,18 +15329,21 @@ def unique(x, dtype='int32'):
def unique_with_counts(x, dtype='int32'):
"""
**unique**
This OP return a unique tensor for `x` , and count tensor that the count of unqiue result in raw input, \
and an index tensor pointing to this unique tensor.
Return a unique tensor for `x` and an index tensor pointing to this unique tensor.
**NOTICE**: This op just be supported in device of CPU, and support the variable type of Tensor only.
Args:
x(Variable): A 1-D input tensor.
dtype(np.dtype|core.VarDesc.VarType|str): The type of index tensor: int32, int64.
x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64.
dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Defalut value is int32.
Returns:
tuple: (out, index, count). `out` is the unique tensor for `x`, with identical dtype to `x`, and \
`index` is an index tensor pointing to `out`, by which user can recover the original `x` tensor, \
`count` is count of unqiue element in the `x`.
tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \
and data type of output :attr:`index` and :attr:`count` will be int32 or int64.: The :attr:`out` is unique tensor for input :attr:`x`,\
the data shape is :math:`[K]`, the `K` may be different to the `N` in shape of :attr:`x`. :attr:`index` is an index tensor pointing\
to :attr:`out`, the data shape is :math:`[N]` , the data shape is the same as input :attr:`x`. :attr:`count` is count of unqiue element in\
the :attr:`x`, the data shape is :math:`[K]`, the data shape is the same as output :attr:`out`.
Examples:
.. code-block:: python
......@@ -15352,6 +15353,7 @@ def unique_with_counts(x, dtype='int32'):
x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
out, index, count = fluid.layers.unique_with_counts(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
# count is [1, 3, 1, 1]
# x.shape=(6,) out.shape=(4,), index.shape=(6,), count.shape=(4,)
"""
if not (dtype == 'int32' or dtype == 'int64'):
raise TypeError(
......
......@@ -896,18 +896,21 @@ def range(start, end, step, dtype):
def linspace(start, stop, num, dtype):
"""
Return fixed number of evenly spaced values within a given interval.
First entry is start, and last entry is stop. In the case when Num is 1, only Start is returned. Like linspace function of numpy.
This OP return fixed number of evenly spaced values within a given interval.
Args:
start(float|Variable): First entry in the sequence. It is a float scalar, or a tensor of shape [1] with type 'float32'|'float64'.
stop(float|Variable): Last entry in the sequence. It is a float scalar, or a tensor of shape [1] with type 'float32'|'float64'.
num(int|Variable): Number of entry in the sequence. It is an int scalar, or a tensor of shape [1] with type int32.
dtype(string): 'float32'|'float64', the data type of the output tensor.
start(float|Variable): The input :attr:`start` is start variable of range. It is a float scalar, \
or a tensor of shape [1] with input data type float32, float64.
stop(float|Variable): The input :attr:`stop` is start variable of range. It is a float scalar, \
or a tensor of shape [1] with input data type float32, float64.
num(int|Variable): The input :attr:`num` is given num of the sequence. It is an int scalar, \
or a tensor of shape [1] with type int32.
dtype(string): The data type of output tensor, it could be 'float32' and 'float64'.
Returns:
Variable: The tensor variable storing a 1-D tensor.
Variable, the output data type will be float32, float64.: The 1-D tensor with fixed number of evenly spaced values, \
the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
the value with input :attr:`start`.
Examples:
.. code-block:: python
......@@ -939,23 +942,24 @@ def linspace(start, stop, num, dtype):
def zeros_like(x, out=None):
"""
**zeros_like**
This function creates a zeros tensor which has identical shape and dtype
This OP creates a zeros tensor which has identical shape and dtype
with `x`.
Args:
x(Variable): The input tensor which specifies shape and dtype.
out(Variable): The output tensor.
x(Variable): The input tensor which specifies shape and dtype, the input data dtype could be bool, float32, float64, int32, int64.
out(Variable, optional): If is :attr:`None` , the op will create the variable as output, the data type and shape of \
this variable will be same as input :attr:`x`. If is a tensor, the data type and shape need to be same as input :attr:`x`.
The defalut value is :attr:`None` .
Returns:
Variable: The tensor variable storing the output.
Variable: The N-D tensor, the element in tensor is related to input data type, if the input data type is bool, \
the output value is False, otherwise is zero. The output shape is the same as the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
x = fluid.data(name='x', dtype='float32', shape=[3])
data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]
"""
......@@ -971,15 +975,15 @@ def zeros_like(x, out=None):
def diag(diagonal):
"""
**diag**
This function creates a square matrix which has diagonal values specified by `diagonal`.
This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Args:
diagonal(Variable|numpy.ndarray): The input tensor specifying diagonal values, should be of rank 1.
diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Returns:
Variable: The tensor variable storing the square matrix.
Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Examples:
.. code-block:: python
......@@ -990,7 +994,9 @@ def diag(diagonal):
import paddle.fluid as fluid
import numpy as np
data = fluid.layers.diag(np.arange(3, 6, dtype='int32'))
diagonal = np.arange(3, 6, dtype='int32')
data = fluid.layers.diag(diagonal)
# diagonal.shape=(3,) data.shape=(3, 3)
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册