未验证 提交 e34fccbc 编写于 作者: L liu zhengxi 提交者: GitHub

update the api en doc (#20307)

* update the api en_doc of has_inf, has_nan, cumsum, double_buffer and mul, test=document_fix

* update the API.spec for apis, test=document_fix
上级 e3e22146
......@@ -270,7 +270,7 @@ paddle.fluid.layers.logical_not (ArgSpec(args=['x', 'out', 'name'], varargs=None
paddle.fluid.layers.clip (ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '4ad0d96a149f023cb72199ded4ce6e9d'))
paddle.fluid.layers.clip_by_norm (ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a5f4917fda557ceb834168cdbec6d51b'))
paddle.fluid.layers.mean (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '597257fb94d0597c404a6a5c91ab5258'))
paddle.fluid.layers.mul (ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None)), ('document', '784b7e36cea88493f9e37a41b10fbf4d'))
paddle.fluid.layers.mul (ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None)), ('document', 'a91eb670033cd103cd8b24624fef5f69'))
paddle.fluid.layers.sigmoid_cross_entropy_with_logits (ArgSpec(args=['x', 'label', 'ignore_index', 'name', 'normalize'], varargs=None, keywords=None, defaults=(-100, None, False)), ('document', '7637c974f2d749d359acae9062c4d96f'))
paddle.fluid.layers.maxout (ArgSpec(args=['x', 'groups', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '406eee439e41988c8a0304186626a0dd'))
paddle.fluid.layers.space_to_depth (ArgSpec(args=['x', 'blocksize', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '26decdea9376b6b9a0d3432d82ca207b'))
......@@ -310,7 +310,7 @@ paddle.fluid.layers.mse_loss (ArgSpec(args=['input', 'label'], varargs=None, key
paddle.fluid.layers.uniform_random (ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0)), ('document', '126ede8ce0e751244b1b54cd359c89d7'))
paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', '9d7806e31bdf727c1a23b8782a09b545'))
paddle.fluid.layers.read_file (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', '88367daf9a30c9ab83adc5d7221e23ef'))
paddle.fluid.layers.double_buffer (ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '44724c493f41a124abc7531c2740e2e3'))
paddle.fluid.layers.double_buffer (ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '556fa82daf62cbb0fb393f4125daba77'))
paddle.fluid.layers.py_reader (ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', 'd78a1c7344955c5caed8dc13adb7beb6'))
paddle.fluid.layers.create_py_reader_by_data (ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)), ('document', '2edf37d57862b24a7a26aa19a3573f73'))
paddle.fluid.layers.load (ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,)), ('document', '309f9e5249463e1b207a7347b2a91134'))
......@@ -330,8 +330,8 @@ paddle.fluid.layers.argsort (ArgSpec(args=['input', 'axis', 'name'], varargs=Non
paddle.fluid.layers.ones (ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)), ('document', '812c623ed52610b9773f9fc05413bc34'))
paddle.fluid.layers.zeros (ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)), ('document', '95379f9288c2d05356ec0e2375c6bc57'))
paddle.fluid.layers.reverse (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None), ('document', '628135603692137d52bcf5a8d8d6816d'))
paddle.fluid.layers.has_inf (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '51a0fa1cfaf2507c00a215adacdb8a63'))
paddle.fluid.layers.has_nan (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '129cf426e71452fe8276d616a6dc21ae'))
paddle.fluid.layers.has_inf (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'aca8a35516cef98af836fb6a64ac8acb'))
paddle.fluid.layers.has_nan (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '99f4cf36db08a4e23c8c3857e2af1316'))
paddle.fluid.layers.isfinite (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'b9fff4ffc8d11934cde099f4c39bf841'))
paddle.fluid.layers.range (ArgSpec(args=['start', 'end', 'step', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '3e982b788b95f959eafeeb0696a3cbde'))
paddle.fluid.layers.linspace (ArgSpec(args=['start', 'stop', 'num', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '156e653497804566a43f6a53d48b08c4'))
......@@ -404,7 +404,7 @@ paddle.fluid.layers.softplus (ArgSpec(args=['x', 'name'], varargs=None, keywords
paddle.fluid.layers.softsign (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e52b23bc455c708d7a26501db4ab8971'))
paddle.fluid.layers.softshrink (ArgSpec(args=['x', 'alpha'], varargs=None, keywords=None, defaults=(None,)), ('document', '958c7bfdfb0b5e92af6ca4a90d24e5ef'))
paddle.fluid.layers.hard_shrink (ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', '386a4103d2884b2f1312ebc1e8ee6486'))
paddle.fluid.layers.cumsum (ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '5ab9d5721a6734fe127069e4314e1309'))
paddle.fluid.layers.cumsum (ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'c1f2e4c4511da09d5d89c556ea802bd1'))
paddle.fluid.layers.thresholded_relu (ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', '94c71025bf11ab8172fd455350274138'))
paddle.fluid.layers.prior_box (ArgSpec(args=['input', 'image', 'min_sizes', 'max_sizes', 'aspect_ratios', 'variance', 'flip', 'clip', 'steps', 'offset', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, [1.0], [0.1, 0.1, 0.2, 0.2], False, False, [0.0, 0.0], 0.5, None, False)), ('document', '0fdf82762fd0a5acb2578a72771b5b44'))
paddle.fluid.layers.density_prior_box (ArgSpec(args=['input', 'image', 'densities', 'fixed_sizes', 'fixed_ratios', 'variance', 'clip', 'steps', 'offset', 'flatten_to_2d', 'name'], varargs=None, keywords=None, defaults=(None, None, None, [0.1, 0.1, 0.2, 0.2], False, [0.0, 0.0], 0.5, False, None)), ('document', '7a484a0da5e993a7734867a3dfa86571'))
......
......@@ -790,19 +790,16 @@ def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None):
def double_buffer(reader, place=None, name=None):
"""
Wrap a double buffer reader. The data will copy to target place with a
double buffer queue. If the target place is None, the place that executor
perform on will be used.
Wrap a double buffer reader. The class Reader contains DecoratedReader and FileReader. Moreover, the DecoratedReader is inherited by CustomReader and BufferedReader. This function is related to BufferedReader. The data will copy to target place with a double buffer queue. If the target place is None, the place that executor perform on will be used.
Args:
reader(Variable): the reader variable need to be wrapped.
place(Place): the place of target data. Default is the sample place of
executor perform.
name(str): Variable name. None if the user does not care.
Args:
reader (Variable): The Reader Variable need to be wrapped.
place (Place, optional): The place of target data, such as CPU, GPU, and if use GPU, it's necessary to point out which card is involved. Default is the sample place of executor perform.
name (str, optional): Variable name. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
wrapped reader with double buffer.
Variable(Reader): wrapped reader with double buffer.
Examples:
.. code-block:: python
......
......@@ -13749,20 +13749,26 @@ def merge_selected_rows(x, name=None):
return out
@templatedoc()
def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
"""
${comment}
Mul Operator.
This operator is used to perform matrix multiplication for input $x$ and $y$.
The equation is:
.. math::
Out = x * y
Both the input $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $x$.
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
x_num_col_dims(${x_num_col_dims_type}): ${x_num_col_dims_comment}
y_num_col_dims(${y_num_col_dims_type}): ${y_num_col_dims_comment}
name(basestring|None): Name of the output.
x (Variable): The first input Tensor/LoDTensor of mul_op.
y (Variable): The second input Tensor/LoDTensor of mul_op.
x_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $x$ is a tensor with more than two dimensions, $x$ will be flattened into a two-dimensional matrix first. The flattening rule is: the first `num_col_dims` will be flattened to form the first dimension of the final matrix (the height of the matrix), and the rest `rank(x) - num_col_dims` dimensions are flattened to form the second dimension of the final matrix (the width of the matrix). As a result, height of the flattened matrix is equal to the product of $x$'s first `x_num_col_dims` dimensions' sizes, and width of the flattened matrix is equal to the product of $x$'s last `rank(x) - num_col_dims` dimensions' size. For example, suppose $x$ is a 6-dimensional tensor with the shape [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default is 1.
y_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $y$ is a tensor with more than two dimensions, $y$ will be flattened into a two-dimensional matrix first. The attribute `y_num_col_dims` determines how $y$ is flattened. See comments of `x_num_col_dims` for more details. Default is 1.
name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
out(${out_type}): ${out_comment}
Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of mul op.
Examples:
.. code-block:: python
......
......@@ -137,12 +137,24 @@ def cumsum(x, axis=None, exclusive=None, reverse=None):
return _cum_sum_(**kwargs)
cumsum.__doc__ = _cum_sum_.__doc__ + """
cumsum.__doc__ = """
The cumulative sum of the elements along a given axis. By default, the first element of the result is the same of the first element of the input. If exlusive is true, the first element of the result is 0.
Args:
x (Variable): Input of cumsum operator, the Tensor/LoDTensor needed to be cumsumed.
axis (int, optional): The dimenstion to accumulate along. -1 means the last dimenstion. Default is -1.
exclusive (bool, optional): Whether to perform exclusive cumsum. Default is False.
reverse (bool, optional): If true, the cumsum is performed in the reversed direction. Default is False.
Returns:
Variable(Tensor/LoDTensor): The result of cumsum operator, output of cumsum operator.
Examples:
.. code-block:: python
>>> import paddle.fluid as fluid
>>> data = fluid.layers.data(name="input", shape=[32, 784])
>>> result = fluid.layers.cumsum(data, axis=0)
import paddle.fluid as fluid
data = fluid.layers.data(name="input", shape=[32, 784])
result = fluid.layers.cumsum(data, axis=0)
"""
__all__ += ['thresholded_relu']
......
......@@ -773,10 +773,10 @@ def has_inf(x):
Test if any of x contains an infinity number
Args:
x(variable): The Tensor/LoDTensor to be checked.
x (Variable): The Tensor/LoDTensor to be checked.
Returns:
Variable: The tensor variable storing the output, only a bool value.
Variable: The tensor variable storing the output, only a bool value, indicating that whether there is infinity number in x or not.
Examples:
.. code-block:: python
......@@ -797,10 +797,10 @@ def has_nan(x):
Test if any of x contains a NAN
Args:
x(variable): The Tensor/LoDTensor to be checked.
x (Variable): The Tensor/LoDTensor to be checked.
Returns:
Variable: The tensor variable storing the output, only a bool value.
Variable: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
Examples:
.. code-block:: python
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册