From 71fb3bfd097011968ff4f9c34d3ef68f13252213 Mon Sep 17 00:00:00 2001 From: liym27 <33742067+liym27@users.noreply.github.com> Date: Thu, 10 Oct 2019 16:19:25 +0800 Subject: [PATCH] [cherry-pick]fix English document of expand/pow/reshape/slice. test=release/1.6, test=document_fix (#20280) (#20356) --- paddle/fluid/API.spec | 8 +- python/paddle/fluid/layers/nn.py | 159 ++++++++++++++++++------------- 2 files changed, 95 insertions(+), 72 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 886047920a..d99332bc47 100755 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -194,7 +194,7 @@ paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', paddle.fluid.layers.smooth_l1 (ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'ecb75c1b00c4c76c98b482f633b7a10c')) paddle.fluid.layers.one_hot (ArgSpec(args=['input', 'depth', 'allow_out_of_range'], varargs=None, keywords=None, defaults=(False,)), ('document', 'cdf5dc2078f1e20dc61dd0bec7e28a29')) paddle.fluid.layers.autoincreased_step_counter (ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)), ('document', '98e7927f09ee2270535b29f048e481ec')) -paddle.fluid.layers.reshape (ArgSpec(args=['x', 'shape', 'actual_shape', 'act', 'inplace', 'name'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', 'ca73fdc4551c5765c92eb00f24874289')) +paddle.fluid.layers.reshape (ArgSpec(args=['x', 'shape', 'actual_shape', 'act', 'inplace', 'name'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', 'd7a6d59e464a7ef1184eb6caefeb49f1')) paddle.fluid.layers.squeeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ebbac07662a6e22e8e299ced880c7775')) paddle.fluid.layers.unsqueeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b9bd3129d36a70e7c4385df51ff71c62')) paddle.fluid.layers.lod_reset (ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None)), ('document', '74498d37dd622ac472cb36887fce09ea')) @@ -228,7 +228,7 @@ paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '6fc86ed23b420c8a0f6c043563cf3937')) paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '9af1926c06711eacef9e82d7a9e4d308')) paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '538fc860b2a1734e118b94e4a1a3ee67')) -paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'ca34f88ff61cf2a7f4c97a493d6000d0')) +paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '00d437d1e0d9450ea75a0495b93b54a7')) paddle.fluid.layers.stanh (ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.67, 1.7159, None)), ('document', 'd3f742178a7263adf5929153d104883d')) paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', '607d79ca873bee40eed1c79a96611591')) paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'e0dc7bc66cba939033bc028d7a62c5f4')) @@ -244,7 +244,7 @@ paddle.fluid.layers.unstack (ArgSpec(args=['x', 'axis', 'num'], varargs=None, ke paddle.fluid.layers.sequence_enumerate (ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'b870fed41abd2aecf929ece65f555fa1')) paddle.fluid.layers.unique (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=('int32',)), ('document', 'cab0b06e5683875f12f0efc62fa230a9')) paddle.fluid.layers.unique_with_counts (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=('int32',)), ('document', '1cb59c65b41766116944b8ed1e6ad345')) -paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7b97042c3ba55fb5fec6a06308523b73')) +paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e93a1b102ab64b247c1b774e60d4c0d0')) paddle.fluid.layers.sequence_concat (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'f47f9d207ac60b6f294087bcb1b64ae8')) paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '463e4713806e5adaa4d20a41e2218453')) paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '0c9c260e7738165a099f6a76da0b7814')) @@ -261,7 +261,7 @@ paddle.fluid.layers.gaussian_random (ArgSpec(args=['shape', 'mean', 'std', 'seed paddle.fluid.layers.sampling_id (ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', 'c39b647b6cf08e058d96ee503d5284fe')) paddle.fluid.layers.gaussian_random_batch_size_like (ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32')), ('document', 'b24d0b21361c4bb8ef2cec8c26fb12b2')) paddle.fluid.layers.sum (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'f4b60847cb0f1ae00823ba6fb1b11310')) -paddle.fluid.layers.slice (ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None), ('document', '315b4870f294e33a27ecbdf440bed3ff')) +paddle.fluid.layers.slice (ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None), ('document', '8c622791994a0d657d8c6c9cefa5bf34')) paddle.fluid.layers.strided_slice (ArgSpec(args=['input', 'axes', 'starts', 'ends', 'strides'], varargs=None, keywords=None, defaults=None), ('document', '340d8d656272ea396b441aab848429a2')) paddle.fluid.layers.shape (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'bf61c8f79d795a8371bdb3b5468aa82b')) paddle.fluid.layers.rank (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '096df0e0273145ab80ed119a4c294db3')) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9b9b4fe766..20f97ce5ad 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -8067,12 +8067,12 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): """ - Gives a new shape to the input Tensor without changing its data. + This operator changes the shape of ``x`` without changing its data. - The target shape can be given by :attr:`shape` or :attr:`actual_shape`. - :attr:`shape` is a list of integer or tensor variable while :attr:`actual_shape` is a tensor - variable. :attr:`actual_shape` has a higher priority than :attr:`shape` - if it is provided and it only contains integer, while :attr:`shape` still should be set correctly to + The target shape can be given by ``shape`` or ``actual_shape``. + When ``shape`` and ``actual_shape`` are set at the same time, + ``actual_shape`` has a higher priority than ``shape`` + but at this time ``shape`` can only be an integer list or tuple, and ``shape`` still should be set correctly to gurantee shape inference in compile-time. Some tricks exist when specifying the target shape. @@ -8083,7 +8083,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): 2. 0 means the actual dimension value is going to be copied from the corresponding dimension of x. The indice of 0s in shape can not exceed - Rank(X). + the dimension of x. Here are some examples to explain it. @@ -8104,38 +8104,36 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): besides -1, 0 means the actual dimension value is going to be copied from the corresponding dimension of x. - **Warning:** the parameter :attr:`actual_shape` will be deprecated in the future and only use :attr:`shape` instead. + **Note**: + The parameter ``actual_shape`` will be deprecated in the future and only use ``shape`` instead to represent the target shape. Args: - x(variable): The input tensor. - shape(list|tuple|Variable): The new shape. At most one dimension of the new shape can - be -1. If :attr:`shape` is a list or tuple, it can contain Variable or not and - the shape of Variable must be [1]. - - actual_shape(variable): An optional input. If provided, reshape - according to this given shape rather than - :attr:`shape` specifying shape. That is to - say :attr:`actual_shape` has a higher priority - than :attr:`shape(list|tuple)` but not :attr:`shape(Variable)`. \ - This argument :attr:`actual_shape` will be removed in a future version. \ - Instructions for updating: :attr:`actual_shape` is deprecated, - only use :attr:`shape` instead. - act (str): The non-linear activation to be applied to the reshaped tensor - variable. - inplace(bool): If ``inplace`` is `True`, the input and output of ``layers.reshape`` - are the same variable, otherwise, the input and output of - ``layers.reshape`` are different variables. Note that if :attr:`x` - is more than one layer's input, ``inplace`` must be :attr:`False`. - name (str): The name of this layer. It is optional. + x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32``, ``float64``, ``int32`` or ``int64``. + shape(list|tuple|Variable): Define the target shape. At most one dimension of the target shape can be -1. + The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. + If ``shape`` is an Variable, it should be an 1-D Tensor . + actual_shape(variable, optional): An 1-D ``Tensor`` or ``LoDTensor`` . The data type is ``int32`` . If provided, reshape + according to this given shape rather than ``shape`` specifying shape. + That is to say ``actual_shape`` has a higher priority + than ``shape(list|tuple)`` but not ``shape(Variable)``. \ + This argument ``actual_shape`` will be removed in a future version. \ + Instructions for updating: ``actual_shape`` will be removed in future versions and replaced by ``shape``. + act (str, optional): The non-linear activation to be applied to the reshaped input. Default None. + inplace(bool, optional): If ``inplace`` is True, the input and output of ``layers.reshape`` + are the same variable. Otherwise, the input and output of + ``layers.reshape`` are different variable. Default False. Note that if ``x`` + is more than one OPs' input, ``inplace`` must be False. + name(str, optional): The default value is None. Normally there is no need for user to set this property. + For more information, please refer to :ref:`api_guide_Name` . Returns: - Variable: The reshaped tensor variable if :attr:`act` is None. It is a \ - new tensor variable if :attr:`inplace` is :attr:`False`, \ - otherwise it is :attr:`x`. If :attr:`act` is not None, return \ - the activated tensor variable. + Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. It is a new tensor variable if ``inplace`` is ``False``, otherwise it is ``x``. If ``act`` is None, return the reshaped tensor variable, otherwise return the activated tensor variable. Raises: - TypeError: if actual_shape is neither Variable nor None. + TypeError: If actual_shape is neither Variable nor None. + ValueError: If more than one elements of ``shape`` is -1. + ValueError: If the element of ``shape`` is 0, the corresponding dimension should be less than or equal to the dimension of ``x``. + ValueError: If the elements in ``shape`` is negative except -1. Examples: .. code-block:: python @@ -8144,16 +8142,18 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): # example 1: # attr shape is a list which doesn't contain tensor Variable. - data_1 = fluid.layers.data( - name='data_1', shape=[2, 4, 6], dtype='float32') + data_1 = fluid.data( + name='data_1', shape=[2, 4, 6], dtype='float32') reshaped_1 = fluid.layers.reshape( - x=data_1, shape=[-1, 0, 3, 2], inplace=True) + x=data_1, shape=[-1, 0, 3, 2], inplace=True) + # the shape of reshaped_1 is [2,4,3,2]. # example 2: # attr shape is a list which contains tensor Variable. data_2 = fluid.layers.fill_constant([2,25], "int32", 3) dim = fluid.layers.fill_constant([1], "int32", 5) reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10]) + # the shape of reshaped_2 is [5,10]. """ if not isinstance(shape, (list, tuple, Variable)): @@ -11042,15 +11042,17 @@ def relu6(x, threshold=6.0, name=None): @templatedoc() def pow(x, factor=1.0, name=None): """ - ${comment} + This is Pow Activation Operator. + + :math:`out = x^{factor}` + Args: - x(${x_type}): ${x_comment} - factor(float|Variable|1.0): The exponential factor of Pow. - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``. + factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. The exponential factor of Pow. Default 1.0. + name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: - output(${out_type}): ${out_comment} + Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. Examples: @@ -11058,14 +11060,16 @@ def pow(x, factor=1.0, name=None): import paddle.fluid as fluid - x = fluid.layers.data(name="x", shape=[3,10,32,32], dtype="float32") + x = fluid.data(name="x", shape=[32,32], dtype="float32") # example 1: argument factor is float y_1 = fluid.layers.pow(x, factor=2.0) + # y_1 is x^{2.0} # example 2: argument factor is Variable factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0) y_2 = fluid.layers.pow(x, factor=factor_tensor) + # y_2 is x^{3.0} """ helper = LayerHelper('pow', **locals()) inputs = {'X': x} @@ -11749,9 +11753,10 @@ def unstack(x, axis=0, num=None): def expand(x, expand_times, name=None): - """Expand operator tiles the input by given times number. You should set times - number for each dimension by providing attribute 'expand_times'. The rank of X - should be in [1, 6]. Please note that size of 'expand_times' must be the same + """ + This operation tiles ``x`` multiple times according to the parameter ``expand_times``. + The times number for each dimension of ``x`` is set by the parameter ``expand_times``. + The rank of ``x`` should be less than or equal to 6. Please note that size of ``expand_times`` must be the same with X's rank. Following is a using case: @@ -11774,12 +11779,18 @@ def expand(x, expand_times, name=None): ] Args: - x (Variable): A tensor with rank in [1, 6]. - expand_times (list|tuple|Variable): Expand times number for each dimension. + x (Variable): A ``Tensor`` or ``LoDTensor`` with dimension in [1, 6]. The data type is ``bool``, ``float32``, ``float64`` or ``int32`` . + expand_times (list|tuple|Variable): The data type is ``int32`` . If ``expand_times`` is a list or tuple, the elements of + it should be integers or Tensors with shape [1]. If ``expand_times`` is an Variable, it should be an 1-D Tensor. + Expand times number for each dimension of ``x`` . + name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: - Variable: The expanded variable which is a LoDTensor. After expanding, size of each dimension of Output(Out) is equal to ithe size of the corresponding dimension of Input(X) multiplying the corresponding value given by expand_times. + Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. After expanding, size of each dimension of output is equal to the size of the corresponding dimension of ``x`` multiplying the corresponding value given by ``expand_times`` . + Raises: + TypeError: The type of ``expand_times`` must be list, tuple or Variable. + ValueError: The elements of ``expand_times`` cannot be negative. Examples: .. code-block:: python @@ -11789,11 +11800,13 @@ def expand(x, expand_times, name=None): # example 1: data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0) expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2]) + # the shape of expanded_1 is [2, 6, 2]. # example 2: data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3) expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4) expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times) + # the shape of expanded_2 is [48, 56]. """ if not isinstance(expand_times, (list, tuple, Variable)): @@ -12084,18 +12097,17 @@ def sum(x): @templatedoc() def slice(input, axes, starts, ends): """ - Slice Operator. - - Produces a slice of the input tensor along multiple axes. Similar to numpy: + This operator produces a slice of ``input`` along multiple axes. Similar to numpy: https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html - Slice uses `axes`, `starts` and `ends` attributes to specify the start and - end dimension for each axis in the list of axes, it uses this information - to slice the input data tensor. If a negative value is passed for any of - the start or end indices, it represents number of elements before the end - of that dimension. If the value passed to start or end is larger than - the n (the number of elements in this dimension), it represents n. + Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and + end dimension for each axis in the list of axes and Slice uses this information + to slice the input data tensor. If a negative value is passed to + ``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the + axis :math:`i-1` (here 0 is the initial position). + If the value passed to ``starts`` or ``ends`` is greater than n + (the number of elements in this dimension), it represents n. For slicing to the end of a dimension with unknown size, it is recommended - to pass in INT_MAX. The size of axes must be equal to starts\' and ends\'. + to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``. Following examples will explain how slice works: .. code-block:: text @@ -12108,31 +12120,40 @@ def slice(input, axes, starts, ends): ends = [2, 3] Then: result = [ [5, 6, 7], ] - + Case2: Given: data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] axes = [0, 1] starts = [0, 1] - ends = [-1, 1000] + ends = [-1, 1000] # -1 denotes the reverse 0th position of dimension 0. Then: - result = [ [2, 3, 4], ] + result = [ [2, 3, 4], ] # result = data[0:1, 1:4] Args: - input (Variable): ${input_comment}. - axes (List): ${axes_comment} - starts (List|Variable): ${starts_comment} - ends (List|Variable): ${ends_comment} + input (Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``. + axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to. + It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`. + starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of + it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor. + It represents starting indices of corresponding axis in ``axes``. + ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of + it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor . + It represents ending indices of corresponding axis in ``axes``. Returns: - out (Variable): ${out_comment} + Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``input``. + + Raises: + TypeError: The type of ``starts`` must be list, tuple or Variable. + TypeError: The type of ``ends`` must be list, tuple or Variable. Examples: .. code-block:: python import paddle.fluid as fluid - input = fluid.layers.data( - name="input", shape=[3, 4, 5, 6], dtype='float32') + input = fluid.data( + name="input", shape=[4, 5, 6], dtype='float32') # example 1: # attr starts is a list which doesn't contain tensor Variable. @@ -12140,11 +12161,13 @@ def slice(input, axes, starts, ends): starts = [-3, 0, 2] ends = [3, 2, 4] sliced_1 = fluid.layers.slice(input, axes=axes, starts=starts, ends=ends) + # sliced_1 is input[0:3, 0:2, 2:4]. # example 2: # attr starts is a list which contain tensor Variable. minus_3 = fluid.layers.fill_constant([1], "int32", -3) sliced_2 = fluid.layers.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends) + # sliced_2 is input[0:3, 0:2, 2:4]. """ if not isinstance(starts, (list, tuple, Variable)): -- GitLab