未验证 提交 2ed8d556 编写于 作者: W wangchaochaohu 提交者: GitHub

fix some en doc (#20281) (#20474)

* fix some en doc test=develop test=document_fix

* fix new fluid.data API.spec test=develop test=document_fix

* fix typo test=develop, test=document_fix

* fix typo test=develop test=document_fix

* fix typo test=develop test=document_fix

* fix test=develop test=document_fix

* fix according review test=develop test=document_fix

* fix review test=develop test=document_fix

* fix according review advice test=develop test=document_fix

* fix the doc  test=develop test=document_fix

* fix API.spec test=develop test=document_fix
上级 44952ca6
......@@ -259,7 +259,7 @@ paddle.fluid.layers.sampling_id (ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype
paddle.fluid.layers.gaussian_random_batch_size_like (ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32')), ('document', '2aed0f546f220364fb1da724a3176f74'))
paddle.fluid.layers.sum (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'f4b60847cb0f1ae00823ba6fb1b11310'))
paddle.fluid.layers.slice (ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None), ('document', '8c622791994a0d657d8c6c9cefa5bf34'))
paddle.fluid.layers.strided_slice (ArgSpec(args=['input', 'axes', 'starts', 'ends', 'strides'], varargs=None, keywords=None, defaults=None), ('document', '340d8d656272ea396b441aab848429a2'))
paddle.fluid.layers.strided_slice (ArgSpec(args=['input', 'axes', 'starts', 'ends', 'strides'], varargs=None, keywords=None, defaults=None), ('document', '33b8dfd6708443ae93f1a0016ff6a5ef'))
paddle.fluid.layers.shape (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '39534cccdb8e727e287316c7c42e6663'))
paddle.fluid.layers.rank (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'a4492cf0393c6f70e4e25c681dcd73f4'))
paddle.fluid.layers.size (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'cf2e156beae36378722666c4c33bebfe'))
......@@ -314,7 +314,7 @@ paddle.fluid.layers.double_buffer (ArgSpec(args=['reader', 'place', 'name'], var
paddle.fluid.layers.py_reader (ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', 'd78a1c7344955c5caed8dc13adb7beb6'))
paddle.fluid.layers.create_py_reader_by_data (ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)), ('document', '2edf37d57862b24a7a26aa19a3573f73'))
paddle.fluid.layers.load (ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,)), ('document', '309f9e5249463e1b207a7347b2a91134'))
paddle.fluid.layers.create_tensor (ArgSpec(args=['dtype', 'name', 'persistable'], varargs=None, keywords=None, defaults=(None, False)), ('document', 'aaf0176c743c43e9bc684dd7dfac25c5'))
paddle.fluid.layers.create_tensor (ArgSpec(args=['dtype', 'name', 'persistable'], varargs=None, keywords=None, defaults=(None, False)), ('document', 'fdc2d964488e99fb0743887454c34e36'))
paddle.fluid.layers.create_parameter (ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', '021272f30e0cdf7503586815378abfb8'))
paddle.fluid.layers.create_global_var (ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None)), ('document', '47ea8b8c91879e50c9036e418b00ef4a'))
paddle.fluid.layers.cast (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '1e44a534cf7d26ab230aa9f5e4e0525a'))
......@@ -322,8 +322,8 @@ paddle.fluid.layers.tensor_array_to_tensor (ArgSpec(args=['input', 'axis', 'name
paddle.fluid.layers.concat (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'b3f30feb5dec8f110d7393ffeb30dbd9'))
paddle.fluid.layers.sums (ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '5df743d578638cd2bbb9369499b44af4'))
paddle.fluid.layers.assign (ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,)), ('document', '8bd94aef4e123986d9a8c29f67b5532b'))
paddle.fluid.layers.fill_constant_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx'], varargs=None, keywords=None, defaults=(0, 0)), ('document', '3551aa494e88d0f271e40cd45d6e3020'))
paddle.fluid.layers.fill_constant (ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'd6b76c7d2c7129f8d713ca74f1c2c287'))
paddle.fluid.layers.fill_constant_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx'], varargs=None, keywords=None, defaults=(0, 0)), ('document', '37a288e4400f6d5510e982827461c11b'))
paddle.fluid.layers.fill_constant (ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None)), ('document', '66e1e468666dd47e5b2715226cebeac0'))
paddle.fluid.layers.argmin (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '3dd54487232d05df4d70fba94b7d0b79'))
paddle.fluid.layers.argmax (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '7f47cc9aa7531b6bd37c5c96bc7f0469'))
paddle.fluid.layers.argsort (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '9792371e3b66258531225a5551de8961'))
......@@ -353,7 +353,7 @@ paddle.fluid.layers.less_than (ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], var
paddle.fluid.layers.less_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '04e5623dd39b4437b9b08e0ce11071ca'))
paddle.fluid.layers.greater_than (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '135352e24251238122bb7823dd4a49aa'))
paddle.fluid.layers.greater_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '44bdacd11299d72c0a52d2181e7ae6ca'))
paddle.fluid.layers.equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '788aa651e8b9fec79d16931ef3a33e90'))
paddle.fluid.layers.equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '781eac1f980916c68623659f639e2b8c'))
paddle.fluid.layers.not_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '8b76aaac4ba7cf9111750b9c2c9418cb'))
paddle.fluid.layers.array_read (ArgSpec(args=['array', 'i'], varargs=None, keywords=None, defaults=None), ('document', 'caf0d94349cdc28e1bda3b8a19411ac0'))
paddle.fluid.layers.array_length (ArgSpec(args=['array'], varargs=None, keywords=None, defaults=None), ('document', '6f24a9b872027634ad758ea2826c9727'))
......
......@@ -1181,20 +1181,27 @@ def equal(x, y, cond=None):
This layer returns the truth value of :math:`x == y` elementwise.
Args:
x(Variable): First operand of *equal*
y(Variable): Second operand of *equal*
cond(Variable|None): Optional output variable to store the result of *equal*
x(Variable): Tensor, data type is float32, float64, int32, int64.
y(Variable): Tensor, data type is float32, float64, int32, int64.
cond(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of *equal*.
if cond is None, a new Varibale will be created to store the result.
Returns:
Variable: The tensor variable storing the output of *equal*.
Variable: output Tensor, it's shape is the same as the input's Tensor,
and the data type is bool.
Examples:
.. code-block:: python
import paddle.fluid as fluid
label = fluid.layers.data(name="label", shape=[3,10,32,32], dtype="float32")
limit = fluid.layers.data(name="limit", shape=[3,10,32,32], dtype="float32")
less = fluid.layers.equal(x=label, y=limit)
import numpy as np
out_cond =fluid.data(name="input1", shape=[2], dtype='bool')
label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
limit = fluid.layers.assign(np.array([3, 2], dtype="int32"))
label_cond = fluid.layers.assign(np.array([1, 2], dtype="int32"))
out1 = fluid.layers.equal(x=label,y=limit) #out1=[True, False]
out2 = fluid.layers.equal(x=label_cond,y=limit, cond=out_cond) #out2=[False, True] out_cond=[False, True]
"""
helper = LayerHelper("equal", **locals())
if cond is None:
......
......@@ -12530,18 +12530,19 @@ def slice(input, axes, starts, ends):
@templatedoc()
def strided_slice(input, axes, starts, ends, strides):
"""
Strided Slice OP
The conceptualization that really helped me understand this was
that this function emulates the indexing behavior of numpy arrays.
If you're familiar with numpy arrays, you'll know that you can make
slices via input[start1:end1:step1, start2:end2:step2, ... startN:endN:stepN].
Basically, a very succinct way of writing for loops to get certain elements of the array.
strided_slice just allows you to do this fancy indexing without the syntactic sugar.
The numpy (#input[start1:end1:step1, start2:end2:step2, ... startN:endN:stepN])
example from above just becomes fluid.strided_slice(input,[0, 1, ..., N],
[start1, start2, ..., startN], [end1, end2, ..., endN], [strides1, strides2, ..., stridesN]),
the axes which controls the dimension you want to slice makes it more flexible.
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of
slicing and if the ``strides`` is negative, slice operation is in the opposite direction.
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.
Following examples will explain how strided_slice works:
.. code-block:: text
......@@ -12551,7 +12552,7 @@ def strided_slice(input, axes, starts, ends, strides):
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
strides=[1, 1]
strides = [1, 1]
Then:
result = [ [5, 6, 7], ]
......@@ -12560,25 +12561,48 @@ def strided_slice(input, axes, starts, ends, strides):
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [2, 0]
strides = [1, -1]
Then:
result = [ [8, 7, 6], ]
Case3:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [-1, 1000]
ends = [-1, 1000]
strides = [1, 3]
Then:
result = [ [2], ]
Args:
input (Variable): ${input_comment}.
axes (List): ${axes_comment}
starts (List|Variable): ${starts_comment}
ends (List|Variable): ${ends_comment}
input (Variable): An N-D ``Tensor`` or ``LoDTensor`` . The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
strides (list|tuple|Variable): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``strides`` is an Variable, it should be an 1-D Tensor .
It represents slice step of corresponding axis in ``axes``.
Returns:
out (Variable): ${out_comment}
Variable: A ``Tensor`` or ``LoDTensor`` with the same dimension as ``input``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Variable.
TypeError: The type of ``ends`` must be list, tuple or Variable.
TypeError: The type of ``strides`` must be list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(
input = fluid.data(
name="input", shape=[3, 4, 5, 6], dtype='float32')
# example 1:
......@@ -12586,13 +12610,17 @@ def strided_slice(input, axes, starts, ends, strides):
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
strides=[1, 1, 1]
sliced_1 = fluid.layers.strided_slice(input, axes=axes, starts=starts, ends=ends, strides=strides)
strides_1 = [1, 1, 1]
strides_2 = [1, 1, 2]
sliced_1 = fluid.layers.strided_slice(input, axes=axes, starts=starts, ends=ends, strides=strides_1)
# sliced_1 is input[:, 0:3:1, 0:2:1, 2:4:1].
# example 2:
# attr starts is a list which contain tensor Variable.
minus_3 = fluid.layers.fill_constant([1], "int32", -3)
sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides)
sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
# sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2].
"""
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
......
......@@ -35,17 +35,18 @@ __all__ = [
def create_tensor(dtype, name=None, persistable=False):
"""
Create an variable, which will hold a LoDTensor with data type dtype.
Create a variable, which will hold a Tensor with data type dtype.
Args:
dtype(string): 'float32'|'int32'|..., the data type of the
created tensor.
name(string): The name of the created tensor, if not set,
the name will be a random unique one.
dtype(string|numpy.dtype): the data type of Tensor to be created, the
data type is bool, float16, float32, float64, int8, int16, int32 and int64.
name(string, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
persistable(bool): Set the persistable flag of the create tensor.
default value is False.
Returns:
Variable: The tensor variable storing the created tensor.
Variable: The tensor to be created according to dtype.
Examples:
.. code-block:: python
......@@ -374,28 +375,35 @@ def assign(input, output=None):
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
"""
**fill_constant**
This function creates a tensor with specified `shape` and `dtype`, and
This OP creates a Tensor with specified `shape` and `dtype`, and
initializes it with a constant specifed by `value`.
The attribute `stop_gradient` of the created tensor is set to True.
The attribute `stop_gradient` of the created Tensor is setted to True.
Args:
shape(tuple|list|None): Shape of the output tensor.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor.
value(float): The constant value used to initialize the output tensor.
out(Variable): The output tensor.
force_cpu(True|False): data should be on CPU if set true.
shape(tuple|list): Shape of the Tensor to be created.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can
be float16, float32, float64, int32, int64.
value(float): The constant value used to initialize the Tensor to be created.
force_cpu(True): data should be on CPU if it's true, defalut value is False.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
Returns:
Variable: The tensor variable storing the output.
Variable: Tensor which is created according to shape and dtype.
Raise:
TypeError: The dtype must be one of bool, float16, float32, float64, int32 and int64
and the data type of out Tensor must be the same as the dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64')
data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') #data1=[[0],[0]]
data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
#data1=[[5], [5]] data2=[[5], [5]]
"""
helper = LayerHelper("fill_constant", **locals())
......@@ -437,34 +445,36 @@ def fill_constant_batch_size_like(input,
input_dim_idx=0,
output_dim_idx=0):
"""
${comment}
It also sets *stop_gradient* to True.
This OP creates a Tesnor accroding the shape and dtype, and initializes the
Tensor with the constants provided in ``value``. When the input is LoDTensor
and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
of the batch_size input by the input, the Stop_gradient attribute of the created
Tensor is False by default.
Args:
input(${input_type}): ${input_comment}.
shape(${shape_type}): ${shape_comment}.
dtype(${dtype_type}): ${dtype_comment}.
value(${value_type}): ${value_comment}.
input_dim_idx(${input_dim_idx_type}): ${input_dim_idx_comment}.
output_dim_idx(${output_dim_idx_type}): ${output_dim_idx_comment}.
input(Variable): Tensor which data type is float32, float64, int32 and int64.
shape(list): The shape of Tensor to be created, Tensor's shape may be changed
according the input.
dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
can be float32, float64, int32, int64.
value(float|int): The constant value used to initialize the Tensor to be created.
input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
dimension of the created Tensor is set to the batch_size value of input.
The default value is 0.
output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
the value of batch_size of input Tensor. The default value is 0.
Returns:
${out_comment}.
Variable: Tensor which will be created according to dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
like = fluid.layers.data(name='like', shape=[1], dtype='float32')
like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
data = fluid.layers.fill_constant_batch_size_like(
input=like, shape=[1], value=0, dtype='int64')
input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
"""
helper = LayerHelper("fill_constant_batch_size_like", **locals())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册