提交 439bfe8c 编写于 作者: S silingtong123 提交者: liuwei1031

fix doc, updates API documents (#20554)

* fix doc,  updates API documents of uniform_random and  uniform_random_batch_size_like (#20316)

*  fix doc, updates document of sequence_softmax, sequence_scatter, sequence_unpad (#20269)
上级 f978fbce
......@@ -140,7 +140,7 @@ paddle.fluid.layers.sequence_conv (ArgSpec(args=['input', 'num_filters', 'filter
paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name', 'data_format'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None, 'NCHW')), ('document', 'e91c63b8ac8c35982c0ac518537e44bf'))
paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name', 'data_format'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None, 'NCDHW')), ('document', 'feff9c8ebb4d4d0be5345f9042f57c8e'))
paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test', 'pad_value'], varargs=None, keywords=None, defaults=(False, 0.0)), ('document', '5a709f7ef3fdb8fc819d09dc4fbada9a'))
paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'eaa9d0bbd3d4e017c8bc4ecdac483711'))
paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', '423d8e89236962a1fe7358548b993c64'))
paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', 'f7d6a5173c92c23f9a25cbc58a0eb577'))
paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCHW')), ('document', 'daf9ae55b2d54bd5f35acb397fd1e1b5'))
paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCDHW')), ('document', 'df8edcb8dd020fdddf778c9f613dc650'))
......@@ -155,7 +155,7 @@ paddle.fluid.layers.conv3d_transpose (ArgSpec(args=['input', 'num_filters', 'out
paddle.fluid.layers.sequence_expand (ArgSpec(args=['x', 'y', 'ref_level', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '10e122eb755c2bd1f78ef2332b28f1a0'))
paddle.fluid.layers.sequence_expand_as (ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '858c432e7cbd8bb952cc2eb555457d50'))
paddle.fluid.layers.sequence_pad (ArgSpec(args=['x', 'pad_value', 'maxlen', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'df08b9c499ab3a90f95d08ab5b6c6c62'))
paddle.fluid.layers.sequence_unpad (ArgSpec(args=['x', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e478180d5bc010a84f35af958cafa62c'))
paddle.fluid.layers.sequence_unpad (ArgSpec(args=['x', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'af40ffc8474eaec0112c8fb55a88439a'))
paddle.fluid.layers.lstm_unit (ArgSpec(args=['x_t', 'hidden_t_prev', 'cell_t_prev', 'forget_bias', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(0.0, None, None, None)), ('document', 'f5a878b6166f34878376a58d7e6fa95c'))
paddle.fluid.layers.reduce_sum (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'ecb55075fdf89a866bcede85e60aebad'))
paddle.fluid.layers.reduce_mean (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '968c9b17affaf714e5021c3dc8d68c73'))
......@@ -213,7 +213,7 @@ paddle.fluid.layers.gather_nd (ArgSpec(args=['input', 'index', 'name'], varargs=
paddle.fluid.layers.scatter (ArgSpec(args=['input', 'index', 'updates', 'name', 'overwrite'], varargs=None, keywords=None, defaults=(None, True)), ('document', '3f94c3348dc79b7b40a839d31a3eaa84'))
paddle.fluid.layers.scatter_nd_add (ArgSpec(args=['ref', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '2607b5c9369fbc52f208de066a80fc25'))
paddle.fluid.layers.scatter_nd (ArgSpec(args=['index', 'updates', 'shape', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e43f1d3a938b35da246aea3e72a020ec'))
paddle.fluid.layers.sequence_scatter (ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'abe3f714120117a5a3d3e639853932bf'))
paddle.fluid.layers.sequence_scatter (ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '8e3fb174970251a1840c58ebb12b795f'))
paddle.fluid.layers.random_crop (ArgSpec(args=['x', 'shape', 'seed'], varargs=None, keywords=None, defaults=(None,)), ('document', '44f35002962cf24e14dd2958f6584e3d'))
paddle.fluid.layers.mean_iou (ArgSpec(args=['input', 'label', 'num_classes'], varargs=None, keywords=None, defaults=None), ('document', 'dea29c0c3cdbd5b498afef60e58c9d7c'))
paddle.fluid.layers.relu (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c46011e3d848bc3dc650772b25fbbf10'))
......@@ -253,7 +253,7 @@ paddle.fluid.layers.elementwise_min (ArgSpec(args=['x', 'y', 'axis', 'act', 'nam
paddle.fluid.layers.elementwise_pow (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '6fc5d7492830d60c7fa61b3bc8f0d7e7'))
paddle.fluid.layers.elementwise_mod (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '7bc28b6e147067a220f8e00b9055f62e'))
paddle.fluid.layers.elementwise_floordiv (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '0035541f94bddb5ad472f9788384ca6a'))
paddle.fluid.layers.uniform_random_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0)), ('document', 'cfa120e583cd4a5bfa120c8a26f98a28'))
paddle.fluid.layers.uniform_random_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0)), ('document', '571c963b9b49f1a323d2ea2343f10dd2'))
paddle.fluid.layers.gaussian_random (ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', 'dd4ddb66c78a2564e5d1e0e345d8286f'))
paddle.fluid.layers.sampling_id (ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', '9ac9bdc45be94494d8543b8cec5c26e0'))
paddle.fluid.layers.gaussian_random_batch_size_like (ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32')), ('document', '2aed0f546f220364fb1da724a3176f74'))
......@@ -307,7 +307,7 @@ paddle.fluid.layers.filter_by_instag (ArgSpec(args=['ins', 'ins_tag', 'filter_ta
paddle.fluid.layers.shard_index (ArgSpec(args=['input', 'index_num', 'nshards', 'shard_id', 'ignore_value'], varargs=None, keywords=None, defaults=(-1,)), ('document', '3c6b30e9cd57b38d4a5fa1ade887f779'))
paddle.fluid.layers.hard_swish (ArgSpec(args=['x', 'threshold', 'scale', 'offset', 'name'], varargs=None, keywords=None, defaults=(6.0, 6.0, 3.0, None)), ('document', 'bd763b9ca99239d624c3cb4626e3627a'))
paddle.fluid.layers.mse_loss (ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', '88b967ef5132567396062d5d654b3064'))
paddle.fluid.layers.uniform_random (ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0)), ('document', '126ede8ce0e751244b1b54cd359c89d7'))
paddle.fluid.layers.uniform_random (ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0)), ('document', '34e7c1ff0263baf9551000b6bb3bc47e'))
paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', '9d7806e31bdf727c1a23b8782a09b545'))
paddle.fluid.layers.read_file (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', 'd5b41c7b2df1b064fbd42dcf435268cd'))
paddle.fluid.layers.double_buffer (ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '556fa82daf62cbb0fb393f4125daba77'))
......
......@@ -2327,10 +2327,13 @@ def sequence_conv(input,
def sequence_softmax(input, use_cudnn=False, name=None):
"""
This function computes the softmax activation among all time-steps for each
sequence. The dimension of each time-step should be 1. Thus, the shape of
input Tensor can be either :math:`[N, 1]` or :math:`[N]`, where :math:`N`
is the sum of the length of all sequences.
**Note**:
**The input type of the OP must be LoDTensor. For Tensor, use:** :ref:`api_fluid_layers_softmax`
A LoD-tensor can be regarded as several sequences, and this op apply softmax algo on each sequence.
The shape of input Tensor can be :math:`[N, 1]` or :math:`[N]`, where :math:`N`
is the sum of the length of all sequences. Recommended usage: :math:`[N]`.
For i-th sequence in a mini-batch:
......@@ -2338,29 +2341,56 @@ def sequence_softmax(input, use_cudnn=False, name=None):
Out(X[lod[i]:lod[i+1]], :) = \\frac{\exp(X[lod[i]:lod[i+1], :])}{\sum(\exp(X[lod[i]:lod[i+1], :]))}
For example, for a mini-batch of 3 sequences with variable-length,
each containing 2, 3, 2 time-steps, the lod of which is [0, 2, 5, 7],
then softmax will be computed among :math:`X[0:2, :]`, :math:`X[2:5, :]`,
:math:`X[5:7, :]`, and :math:`N` turns out to be 7.
For example, for a LoD-Tensor with 6 sequences ([3, 2, 4, 1, 2, 3] - sequence length list in order),
the lod in the runtime is [[0, 3, 5, 9, 10, 12, 15]],
then softmax will be computed among :math:`X[0:3,:],X[3:5,:],X[5:9,:],X[9:10,:],X[10:12,:],X[12:15,:]`,
and :math:`N` turns out to be 15.
.. code-block:: text
*Case 1:
Given:
input.data = [0.7, 1, 0.6,
1.5, 1.1,
1.2, 0.2, 0.6, 1.9,
3.1,
2.5, 0.8,
0.1, 2.4, 1.3]
input.lod = [[0, 3, 5, 9, 10, 12, 15]]
then:
output.data = [0.30724832, 0.41474187, 0.2780098,
0.59868765, 0.40131235,
0.2544242, 0.09359743, 0.13963096, 0.5123474,
1.,
0.84553474, 0.15446526,
0.06995796, 0.69777346, 0.23226859]
output.lod = [[0, 3, 5, 9, 10, 12, 15]]
Args:
input (Variable): The input variable which is a LoDTensor.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \
library is installed. Default: False.
name (str|None): A name for this layer(optional). If set None, the layer
will be named automatically. Default: None.
input (Variable):A LoDTensor with shape of :math:`[N, 1]` or :math:`[N]`, Recommended usage: :math:`[N]`.
Supported data types: float32, float64.
use_cudnn (bool, optional): Use cudnn kernel or not. Effective only when the cudnn version of the paddle
library is installed and GPU is used for training or reasoning. Default: False.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: output of sequence_softmax
Variable: A LoD-Tensor which has the same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[7, 1],
x = fluid.data(name='x', shape=[7, 1],
dtype='float32', lod_level=1)
x_sequence_softmax_1 = fluid.layers.sequence_softmax(input=x)
y = fluid.data(name='y', shape=[7],
dtype='float32', lod_level=1)
x_sequence_softmax = fluid.layers.sequence_softmax(input=x)
x_sequence_softmax_2 = fluid.layers.sequence_softmax(input=y)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
......@@ -5590,15 +5620,15 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
def sequence_unpad(x, length, name=None):
"""
**Sequence Unpad Layer**
**Note**:
This layer removes the padding data in the input sequences and convert
them into sequences with actual length as output, identitied by lod
information.
**The input of the OP is Tensor and the output is LoDTensor. For padding operation, See:** :ref:`api_fluid_layers_sequence_pad`
The OP removes the padding data from the input based on the length information and returns a LoDTensor.
.. code-block:: text
Example:
Case 1:
Given input Variable **x**:
x.data = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
......@@ -5613,18 +5643,18 @@ def sequence_unpad(x, length, name=None):
after unpadding, the output Variable will be:
out.data = [[1.0, 2.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0]]
out.lod = [[2, 3, 4]]
out.lod = [[0, 2, 5, 9]]
Args:
x(Variable): Input Variable which contains the padded sequences with
equal length.
length(Variable): The Variable that specifies the actual ength of
sequences after unpadding.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
x(Variable): A Tensor which contains padding data, and its shape size can not be less than 2.
Supported data types: float32, float64, int32, int64.
length(Variable): A 1D Tensor that stores the actual length of each sample, and the Tensor
has the same shape with the 0th dimension of the X . Supported data types: int64.
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The Variable contains the unpadded sequences.
Variable: A LoDTensor whose recursive sequence length is consistent with the information of the length parameter and it has the same data type with input.
Examples:
.. code-block:: python
......@@ -5633,11 +5663,11 @@ def sequence_unpad(x, length, name=None):
import numpy
# pad data
x = fluid.layers.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
x = fluid.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = fluid.layers.assign(input=numpy.array([0.0], dtype=numpy.float32))
pad_data, len = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
# upad data
# unpad data
unpad_data = fluid.layers.sequence_unpad(x=pad_data, length=len)
"""
......@@ -10694,18 +10724,28 @@ def scatter_nd(index, updates, shape, name=None):
def sequence_scatter(input, index, updates, name=None):
"""
**Sequence Scatter Layer**
**Note**:
**The index and updates parameters of the OP must be LoDTensor.**
This operator scatters the Updates tensor to the input X. It uses the LoD
information of Ids to select the rows to update, and use the values in Ids as
the columns to update in each row of X.
Plus the updates data to the correspoding input according to the index.
Here is an example:
The updated algorithm is as follows: output[instance_index][index [pos]] = input[instance_index][index [pos]] + updates[pos],
where instance_idx is the K sample corresponding to pos in batch.
Given the following input:
The value of output[i][j] depends on whether j can be found in the i+1th interval of the index. If found,
out[i][j] = input[i][j] + update[m] [n], otherwise, out[i][j] = input[i][j].
For example, in the following example, the lod information for index is divided into three sequences. Among
them, because the element 0 can be found in the first interval of the index, it is updated with the value of
the corresponding position of the updates, out[0][0] = input[0][0]+updates[0][0] . Because element 1 cannot
be found in the third interval of index, out[2][1] = input[2][1].
.. code-block:: text
*Case 1:
Given:
input.data = [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]
......@@ -10717,37 +10757,32 @@ def sequence_scatter(input, index, updates, name=None):
updates.data = [[0.3], [0.3], [0.4], [0.1], [0.2], [0.3], [0.4], [0.0], [0.2], [0.3], [0.1], [0.4]]
updates.lod = [[ 0, 3, 8, 12]]
Then we have the output:
.. code-block:: text
Then:
out.data = [[1.3, 1.3, 1.4, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.4, 1.3, 1.2, 1.1],
[1.0, 1.0, 1.3, 1.2, 1.4, 1.1]]
out.dims = X.dims = [3, 6]
Args:
input (Variable): The source input with rank>=1.
index (Variable): A LoD Tensor. The index input of sequence scatter op
where input will be updated. The index input with rank=1. Its dtype
should be int32 or int64 as it is used as indexes.
updates (Variable): A LoD Tensor. The values to scatter to the input
tensor X, must be a LoDTensor with the same LoD information as index.
name (str|None): The output variable name. Default None.
input (Variable): A Tensor with shape of :math:`[N, k_1... k_n]`. Supported data types: float32, float64, int32, int64.
index (Variable): A LoDTensor contains index information. Its LoD level must be 1 and its data type must be int64.
updates (Variable): A LodTensor contains updates information. It has the same LoD level with the index and has the
same data type with the input. Supported data types: float32, float64, int32, int64.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: The output is a tensor with the same shape as input.
Variable: A Tensor which has been updated. It has the same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
input = layers.data( name="x", shape=[3, 6], append_batch_size=False, dtype='float32' )
index = layers.data( name='index', shape=[1], dtype='int32')
updates = layers.data( name='updates', shape=[1], dtype='float32')
input = fluid.data( name="x", shape=[None, 3, 6], dtype='float32' )
index = fluid.data( name='index', shape=[12, 1], dtype='int64', lod_level=1)
updates = fluid.data( name='updates', shape=[12, 1], dtype='float32', lod_level=1)
output = fluid.layers.sequence_scatter(input, index, updates)
"""
......@@ -12605,28 +12640,64 @@ def uniform_random_batch_size_like(input,
max=1.0,
seed=0):
"""
${comment}
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension.
.. code-block:: text
*Case 1:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 0,
input_dim_idx = 0,
result.shape[0] = input.shape[0],
then:
result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4]
*Case 2:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
input_dim_idx=1
output_dim_idx=1
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 1,
input_dim_idx = 1,
result.shape[1] = input.shape[1],
then:
result=[[-0.23133647, -0.84195036, 0.21441269],
[-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3]
Args:
input (Variable): ${input_comment}
shape (tuple|list): ${shape_comment}
input_dim_idx (Int): ${input_dim_idx_comment}
output_dim_idx (Int): ${output_dim_idx_comment}
min (Float): ${min_comment}
max (Float): ${max_comment}
seed (Int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc
input (Variable): A Tensor. Supported data types: float32, float64.
shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int.
input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default 0.
output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32.
Returns:
out (Variable): ${out_comment}
Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
input = layers.data(name="input", shape=[13, 11], dtype='float32')
out = layers.uniform_random_batch_size_like(input, [-1, 11])
# example 1:
input = fluid.data(name="input", shape=[1, 3], dtype='float32')
out_1 = fluid.layers.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4]
# example 2:
out_2 = fluid.layers.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3]
"""
helper = LayerHelper('uniform_random_batch_size_like', **locals())
......@@ -17039,8 +17110,8 @@ def mse_loss(input, label):
@templatedoc()
def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0):
"""
This operator initializes a variable with random values sampled from a
uniform distribution. The random result is in set [min, max).
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max).
Examples:
::
......@@ -17052,24 +17123,23 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0):
result=[[0.8505902, 0.8397286]]
Args:
shape (list|tuple|Variable): The shape of the output tensor, the data type of the integer is int,
and if the shape type is list or tuple, its elements can be an integer
or a tensor with the shape [1], the data type of the tensor is int64.
If the shape type is Variable,it ia a 1D tensor, the data type of the tensor is int64.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of the output tensor, such as float32, float64.
shape (list|tuple|Variable): The shape of the output Tensor, if the shape is a list or tuple,
its elements can be an integer
or a Tensor with the shape [1], and the type of the Tensor is int64.
If the shape is a Variable, it is a 1-D Tensor, and the type of the Tensor is int64.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The type of the output Tensor. Supported data types: float32, float64.
Default: float32.
min (float, optional): Minimum value of uniform random, It's a closed interval. Default -1.0.
max (float, optional): Maximun value of uniform random, It's an open interval. Default 1.0.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a
seed generated by the system. Note that if seed is not 0, this
operator will always generate the same random numbers every time.
Default 0.
Returns: a Tensor with randomly initialized results whose data type is determined by the dtype parameter
and whose dimension is determined by the shape parameter.
Return type: Variable
Returns:
Variable: A Tensor of the specified shape filled with uniform_random values.
Throw exception:
Raises:
TypeError: The shape type should be list or tupple or variable.
Examples:
......@@ -17088,7 +17158,7 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0):
# example 3:
# attr shape is a Variable, the data type must be int64
var_shape = fluid.layers.data(name='var_shape',shape=[2],append_batch_size=False)
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = fluid.layers.uniform_random(var_shape)
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册