提交 3d006fa6 编写于 作者: L liym27 提交者: Aurelius84

fix English document of expand/pow/reshape/slice. test=develop, test=document_fix (#20280)

上级 2c71de24
...@@ -194,7 +194,7 @@ paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', ...@@ -194,7 +194,7 @@ paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label',
paddle.fluid.layers.smooth_l1 (ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'cbe8940643ac80ef75e1abdfbdb09e88')) paddle.fluid.layers.smooth_l1 (ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'cbe8940643ac80ef75e1abdfbdb09e88'))
paddle.fluid.layers.one_hot (ArgSpec(args=['input', 'depth', 'allow_out_of_range'], varargs=None, keywords=None, defaults=(False,)), ('document', 'cdf5dc2078f1e20dc61dd0bec7e28a29')) paddle.fluid.layers.one_hot (ArgSpec(args=['input', 'depth', 'allow_out_of_range'], varargs=None, keywords=None, defaults=(False,)), ('document', 'cdf5dc2078f1e20dc61dd0bec7e28a29'))
paddle.fluid.layers.autoincreased_step_counter (ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)), ('document', '98e7927f09ee2270535b29f048e481ec')) paddle.fluid.layers.autoincreased_step_counter (ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)), ('document', '98e7927f09ee2270535b29f048e481ec'))
paddle.fluid.layers.reshape (ArgSpec(args=['x', 'shape', 'actual_shape', 'act', 'inplace', 'name'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', 'ca73fdc4551c5765c92eb00f24874289')) paddle.fluid.layers.reshape (ArgSpec(args=['x', 'shape', 'actual_shape', 'act', 'inplace', 'name'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', 'd7a6d59e464a7ef1184eb6caefeb49f1'))
paddle.fluid.layers.squeeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ebbac07662a6e22e8e299ced880c7775')) paddle.fluid.layers.squeeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ebbac07662a6e22e8e299ced880c7775'))
paddle.fluid.layers.unsqueeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b9bd3129d36a70e7c4385df51ff71c62')) paddle.fluid.layers.unsqueeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b9bd3129d36a70e7c4385df51ff71c62'))
paddle.fluid.layers.lod_reset (ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None)), ('document', '74498d37dd622ac472cb36887fce09ea')) paddle.fluid.layers.lod_reset (ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None)), ('document', '74498d37dd622ac472cb36887fce09ea'))
...@@ -228,7 +228,7 @@ paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], ...@@ -228,7 +228,7 @@ paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'],
paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '6fc86ed23b420c8a0f6c043563cf3937')) paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '6fc86ed23b420c8a0f6c043563cf3937'))
paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '9af1926c06711eacef9e82d7a9e4d308')) paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '9af1926c06711eacef9e82d7a9e4d308'))
paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '538fc860b2a1734e118b94e4a1a3ee67')) paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '538fc860b2a1734e118b94e4a1a3ee67'))
paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'ca34f88ff61cf2a7f4c97a493d6000d0')) paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '00d437d1e0d9450ea75a0495b93b54a7'))
paddle.fluid.layers.stanh (ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.67, 1.7159, None)), ('document', 'd3f742178a7263adf5929153d104883d')) paddle.fluid.layers.stanh (ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.67, 1.7159, None)), ('document', 'd3f742178a7263adf5929153d104883d'))
paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', '607d79ca873bee40eed1c79a96611591')) paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', '607d79ca873bee40eed1c79a96611591'))
paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'e0dc7bc66cba939033bc028d7a62c5f4')) paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'e0dc7bc66cba939033bc028d7a62c5f4'))
...@@ -244,7 +244,7 @@ paddle.fluid.layers.unstack (ArgSpec(args=['x', 'axis', 'num'], varargs=None, ke ...@@ -244,7 +244,7 @@ paddle.fluid.layers.unstack (ArgSpec(args=['x', 'axis', 'num'], varargs=None, ke
paddle.fluid.layers.sequence_enumerate (ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'b870fed41abd2aecf929ece65f555fa1')) paddle.fluid.layers.sequence_enumerate (ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'b870fed41abd2aecf929ece65f555fa1'))
paddle.fluid.layers.unique (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=('int32',)), ('document', 'cab0b06e5683875f12f0efc62fa230a9')) paddle.fluid.layers.unique (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=('int32',)), ('document', 'cab0b06e5683875f12f0efc62fa230a9'))
paddle.fluid.layers.unique_with_counts (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=('int32',)), ('document', '1cb59c65b41766116944b8ed1e6ad345')) paddle.fluid.layers.unique_with_counts (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=('int32',)), ('document', '1cb59c65b41766116944b8ed1e6ad345'))
paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7b97042c3ba55fb5fec6a06308523b73')) paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e93a1b102ab64b247c1b774e60d4c0d0'))
paddle.fluid.layers.sequence_concat (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'f47f9d207ac60b6f294087bcb1b64ae8')) paddle.fluid.layers.sequence_concat (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'f47f9d207ac60b6f294087bcb1b64ae8'))
paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '463e4713806e5adaa4d20a41e2218453')) paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '463e4713806e5adaa4d20a41e2218453'))
paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '0c9c260e7738165a099f6a76da0b7814')) paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '0c9c260e7738165a099f6a76da0b7814'))
...@@ -261,7 +261,7 @@ paddle.fluid.layers.gaussian_random (ArgSpec(args=['shape', 'mean', 'std', 'seed ...@@ -261,7 +261,7 @@ paddle.fluid.layers.gaussian_random (ArgSpec(args=['shape', 'mean', 'std', 'seed
paddle.fluid.layers.sampling_id (ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', 'c39b647b6cf08e058d96ee503d5284fe')) paddle.fluid.layers.sampling_id (ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', 'c39b647b6cf08e058d96ee503d5284fe'))
paddle.fluid.layers.gaussian_random_batch_size_like (ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32')), ('document', 'b24d0b21361c4bb8ef2cec8c26fb12b2')) paddle.fluid.layers.gaussian_random_batch_size_like (ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32')), ('document', 'b24d0b21361c4bb8ef2cec8c26fb12b2'))
paddle.fluid.layers.sum (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'f4b60847cb0f1ae00823ba6fb1b11310')) paddle.fluid.layers.sum (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'f4b60847cb0f1ae00823ba6fb1b11310'))
paddle.fluid.layers.slice (ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None), ('document', '315b4870f294e33a27ecbdf440bed3ff')) paddle.fluid.layers.slice (ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None), ('document', '8c622791994a0d657d8c6c9cefa5bf34'))
paddle.fluid.layers.strided_slice (ArgSpec(args=['input', 'axes', 'starts', 'ends', 'strides'], varargs=None, keywords=None, defaults=None), ('document', '340d8d656272ea396b441aab848429a2')) paddle.fluid.layers.strided_slice (ArgSpec(args=['input', 'axes', 'starts', 'ends', 'strides'], varargs=None, keywords=None, defaults=None), ('document', '340d8d656272ea396b441aab848429a2'))
paddle.fluid.layers.shape (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'bf61c8f79d795a8371bdb3b5468aa82b')) paddle.fluid.layers.shape (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'bf61c8f79d795a8371bdb3b5468aa82b'))
paddle.fluid.layers.rank (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '096df0e0273145ab80ed119a4c294db3')) paddle.fluid.layers.rank (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '096df0e0273145ab80ed119a4c294db3'))
......
...@@ -8120,12 +8120,12 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): ...@@ -8120,12 +8120,12 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
""" """
Gives a new shape to the input Tensor without changing its data. This operator changes the shape of ``x`` without changing its data.
The target shape can be given by :attr:`shape` or :attr:`actual_shape`. The target shape can be given by ``shape`` or ``actual_shape``.
:attr:`shape` is a list of integer or tensor variable while :attr:`actual_shape` is a tensor When ``shape`` and ``actual_shape`` are set at the same time,
variable. :attr:`actual_shape` has a higher priority than :attr:`shape` ``actual_shape`` has a higher priority than ``shape``
if it is provided and it only contains integer, while :attr:`shape` still should be set correctly to but at this time ``shape`` can only be an integer list or tuple, and ``shape`` still should be set correctly to
gurantee shape inference in compile-time. gurantee shape inference in compile-time.
Some tricks exist when specifying the target shape. Some tricks exist when specifying the target shape.
...@@ -8136,7 +8136,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): ...@@ -8136,7 +8136,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
2. 0 means the actual dimension value is going to be copied from the 2. 0 means the actual dimension value is going to be copied from the
corresponding dimension of x. The indice of 0s in shape can not exceed corresponding dimension of x. The indice of 0s in shape can not exceed
Rank(X). the dimension of x.
Here are some examples to explain it. Here are some examples to explain it.
...@@ -8157,38 +8157,36 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): ...@@ -8157,38 +8157,36 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
besides -1, 0 means the actual dimension value is going to be copied from besides -1, 0 means the actual dimension value is going to be copied from
the corresponding dimension of x. the corresponding dimension of x.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the future and only use :attr:`shape` instead. **Note**:
The parameter ``actual_shape`` will be deprecated in the future and only use ``shape`` instead to represent the target shape.
Args: Args:
x(variable): The input tensor. x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
shape(list|tuple|Variable): The new shape. At most one dimension of the new shape can shape(list|tuple|Variable): Define the target shape. At most one dimension of the target shape can be -1.
be -1. If :attr:`shape` is a list or tuple, it can contain Variable or not and The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
the shape of Variable must be [1]. If ``shape`` is an Variable, it should be an 1-D Tensor .
actual_shape(variable, optional): An 1-D ``Tensor`` or ``LoDTensor`` . The data type is ``int32`` . If provided, reshape
actual_shape(variable): An optional input. If provided, reshape according to this given shape rather than ``shape`` specifying shape.
according to this given shape rather than That is to say ``actual_shape`` has a higher priority
:attr:`shape` specifying shape. That is to than ``shape(list|tuple)`` but not ``shape(Variable)``. \
say :attr:`actual_shape` has a higher priority This argument ``actual_shape`` will be removed in a future version. \
than :attr:`shape(list|tuple)` but not :attr:`shape(Variable)`. \ Instructions for updating: ``actual_shape`` will be removed in future versions and replaced by ``shape``.
This argument :attr:`actual_shape` will be removed in a future version. \ act (str, optional): The non-linear activation to be applied to the reshaped input. Default None.
Instructions for updating: :attr:`actual_shape` is deprecated, inplace(bool, optional): If ``inplace`` is True, the input and output of ``layers.reshape``
only use :attr:`shape` instead. are the same variable. Otherwise, the input and output of
act (str): The non-linear activation to be applied to the reshaped tensor ``layers.reshape`` are different variable. Default False. Note that if ``x``
variable. is more than one OPs' input, ``inplace`` must be False.
inplace(bool): If ``inplace`` is `True`, the input and output of ``layers.reshape`` name(str, optional): The default value is None. Normally there is no need for user to set this property.
are the same variable, otherwise, the input and output of For more information, please refer to :ref:`api_guide_Name` .
``layers.reshape`` are different variables. Note that if :attr:`x`
is more than one layer's input, ``inplace`` must be :attr:`False`.
name (str): The name of this layer. It is optional.
Returns: Returns:
Variable: The reshaped tensor variable if :attr:`act` is None. It is a \ Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. It is a new tensor variable if ``inplace`` is ``False``, otherwise it is ``x``. If ``act`` is None, return the reshaped tensor variable, otherwise return the activated tensor variable.
new tensor variable if :attr:`inplace` is :attr:`False`, \
otherwise it is :attr:`x`. If :attr:`act` is not None, return \
the activated tensor variable.
Raises: Raises:
TypeError: if actual_shape is neither Variable nor None. TypeError: If actual_shape is neither Variable nor None.
ValueError: If more than one elements of ``shape`` is -1.
ValueError: If the element of ``shape`` is 0, the corresponding dimension should be less than or equal to the dimension of ``x``.
ValueError: If the elements in ``shape`` is negative except -1.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -8197,16 +8195,18 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): ...@@ -8197,16 +8195,18 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
# example 1: # example 1:
# attr shape is a list which doesn't contain tensor Variable. # attr shape is a list which doesn't contain tensor Variable.
data_1 = fluid.layers.data( data_1 = fluid.data(
name='data_1', shape=[2, 4, 6], dtype='float32') name='data_1', shape=[2, 4, 6], dtype='float32')
reshaped_1 = fluid.layers.reshape( reshaped_1 = fluid.layers.reshape(
x=data_1, shape=[-1, 0, 3, 2], inplace=True) x=data_1, shape=[-1, 0, 3, 2], inplace=True)
# the shape of reshaped_1 is [2,4,3,2].
# example 2: # example 2:
# attr shape is a list which contains tensor Variable. # attr shape is a list which contains tensor Variable.
data_2 = fluid.layers.fill_constant([2,25], "int32", 3) data_2 = fluid.layers.fill_constant([2,25], "int32", 3)
dim = fluid.layers.fill_constant([1], "int32", 5) dim = fluid.layers.fill_constant([1], "int32", 5)
reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10]) reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10])
# the shape of reshaped_2 is [5,10].
""" """
if not isinstance(x, Variable): if not isinstance(x, Variable):
raise TypeError( raise TypeError(
...@@ -11094,15 +11094,17 @@ def relu6(x, threshold=6.0, name=None): ...@@ -11094,15 +11094,17 @@ def relu6(x, threshold=6.0, name=None):
@templatedoc() @templatedoc()
def pow(x, factor=1.0, name=None): def pow(x, factor=1.0, name=None):
""" """
${comment} This is Pow Activation Operator.
:math:`out = x^{factor}`
Args: Args:
x(${x_type}): ${x_comment} x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
factor(float|Variable|1.0): The exponential factor of Pow. factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. The exponential factor of Pow. Default 1.0.
name(str|None): A name for this layer(optional). If set None, the layer name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
will be named automatically.
Returns: Returns:
output(${out_type}): ${out_comment} Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``.
Examples: Examples:
...@@ -11110,14 +11112,16 @@ def pow(x, factor=1.0, name=None): ...@@ -11110,14 +11112,16 @@ def pow(x, factor=1.0, name=None):
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3,10,32,32], dtype="float32") x = fluid.data(name="x", shape=[32,32], dtype="float32")
# example 1: argument factor is float # example 1: argument factor is float
y_1 = fluid.layers.pow(x, factor=2.0) y_1 = fluid.layers.pow(x, factor=2.0)
# y_1 is x^{2.0}
# example 2: argument factor is Variable # example 2: argument factor is Variable
factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0) factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
y_2 = fluid.layers.pow(x, factor=factor_tensor) y_2 = fluid.layers.pow(x, factor=factor_tensor)
# y_2 is x^{3.0}
""" """
helper = LayerHelper('pow', **locals()) helper = LayerHelper('pow', **locals())
inputs = {'X': x} inputs = {'X': x}
...@@ -11803,9 +11807,10 @@ def unstack(x, axis=0, num=None): ...@@ -11803,9 +11807,10 @@ def unstack(x, axis=0, num=None):
def expand(x, expand_times, name=None): def expand(x, expand_times, name=None):
"""Expand operator tiles the input by given times number. You should set times """
number for each dimension by providing attribute 'expand_times'. The rank of X This operation tiles ``x`` multiple times according to the parameter ``expand_times``.
should be in [1, 6]. Please note that size of 'expand_times' must be the same The times number for each dimension of ``x`` is set by the parameter ``expand_times``.
The rank of ``x`` should be less than or equal to 6. Please note that size of ``expand_times`` must be the same
with X's rank. Following is a using case: with X's rank. Following is a using case:
...@@ -11828,12 +11833,18 @@ def expand(x, expand_times, name=None): ...@@ -11828,12 +11833,18 @@ def expand(x, expand_times, name=None):
] ]
Args: Args:
x (Variable): A tensor with rank in [1, 6]. x (Variable): A ``Tensor`` or ``LoDTensor`` with dimension in [1, 6]. The data type is ``bool``, ``float32``, ``float64`` or ``int32`` .
expand_times (list|tuple|Variable): Expand times number for each dimension. expand_times (list|tuple|Variable): The data type is ``int32`` . If ``expand_times`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``expand_times`` is an Variable, it should be an 1-D Tensor.
Expand times number for each dimension of ``x`` .
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns: Returns:
Variable: The expanded variable which is a LoDTensor. After expanding, size of each dimension of Output(Out) is equal to ithe size of the corresponding dimension of Input(X) multiplying the corresponding value given by expand_times. Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. After expanding, size of each dimension of output is equal to the size of the corresponding dimension of ``x`` multiplying the corresponding value given by ``expand_times`` .
Raises:
TypeError: The type of ``expand_times`` must be list, tuple or Variable.
ValueError: The elements of ``expand_times`` cannot be negative.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -11843,11 +11854,13 @@ def expand(x, expand_times, name=None): ...@@ -11843,11 +11854,13 @@ def expand(x, expand_times, name=None):
# example 1: # example 1:
data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0) data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0)
expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2]) expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2])
# the shape of expanded_1 is [2, 6, 2].
# example 2: # example 2:
data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3) data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3)
expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4) expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4)
expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times) expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times)
# the shape of expanded_2 is [48, 56].
""" """
if not isinstance(expand_times, (list, tuple, Variable)): if not isinstance(expand_times, (list, tuple, Variable)):
...@@ -12138,18 +12151,17 @@ def sum(x): ...@@ -12138,18 +12151,17 @@ def sum(x):
@templatedoc() @templatedoc()
def slice(input, axes, starts, ends): def slice(input, axes, starts, ends):
""" """
Slice Operator. This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
Produces a slice of the input tensor along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses `axes`, `starts` and `ends` attributes to specify the start and Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes, it uses this information end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed for any of to slice the input data tensor. If a negative value is passed to
the start or end indices, it represents number of elements before the end ``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
of that dimension. If the value passed to start or end is larger than axis :math:`i-1` (here 0 is the initial position).
the n (the number of elements in this dimension), it represents n. If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of axes must be equal to starts\' and ends\'. to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``.
Following examples will explain how slice works: Following examples will explain how slice works:
.. code-block:: text .. code-block:: text
...@@ -12162,31 +12174,40 @@ def slice(input, axes, starts, ends): ...@@ -12162,31 +12174,40 @@ def slice(input, axes, starts, ends):
ends = [2, 3] ends = [2, 3]
Then: Then:
result = [ [5, 6, 7], ] result = [ [5, 6, 7], ]
Case2: Case2:
Given: Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1] axes = [0, 1]
starts = [0, 1] starts = [0, 1]
ends = [-1, 1000] ends = [-1, 1000] # -1 denotes the reverse 0th position of dimension 0.
Then: Then:
result = [ [2, 3, 4], ] result = [ [2, 3, 4], ] # result = data[0:1, 1:4]
Args: Args:
input (Variable): ${input_comment}. input (Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``.
axes (List): ${axes_comment} axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
starts (List|Variable): ${starts_comment} It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
ends (List|Variable): ${ends_comment} starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
Returns: Returns:
out (Variable): ${out_comment} Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Variable.
TypeError: The type of ``ends`` must be list, tuple or Variable.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
input = fluid.layers.data( input = fluid.data(
name="input", shape=[3, 4, 5, 6], dtype='float32') name="input", shape=[4, 5, 6], dtype='float32')
# example 1: # example 1:
# attr starts is a list which doesn't contain tensor Variable. # attr starts is a list which doesn't contain tensor Variable.
...@@ -12194,11 +12215,13 @@ def slice(input, axes, starts, ends): ...@@ -12194,11 +12215,13 @@ def slice(input, axes, starts, ends):
starts = [-3, 0, 2] starts = [-3, 0, 2]
ends = [3, 2, 4] ends = [3, 2, 4]
sliced_1 = fluid.layers.slice(input, axes=axes, starts=starts, ends=ends) sliced_1 = fluid.layers.slice(input, axes=axes, starts=starts, ends=ends)
# sliced_1 is input[0:3, 0:2, 2:4].
# example 2: # example 2:
# attr starts is a list which contain tensor Variable. # attr starts is a list which contain tensor Variable.
minus_3 = fluid.layers.fill_constant([1], "int32", -3) minus_3 = fluid.layers.fill_constant([1], "int32", -3)
sliced_2 = fluid.layers.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends) sliced_2 = fluid.layers.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends)
# sliced_2 is input[0:3, 0:2, 2:4].
""" """
if not isinstance(starts, (list, tuple, Variable)): if not isinstance(starts, (list, tuple, Variable)):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册