提交 c837a0ac 编写于 作者: G gongweibao

follow comments

上级 be97c47e
......@@ -6385,6 +6385,7 @@ def expand(x, expand_times, name=None):
from paddle.fluid.framework import convert_np_dtype_to_dtype_
@templatedoc()
def uniform_random_batch_size_like(input,
shape,
dtype='float32',
......@@ -6394,22 +6395,19 @@ def uniform_random_batch_size_like(input,
max=1.0,
seed=0):
"""
UniformRandomBatchSizeLike operator.
This operator initializes a tensor with the same batch_size as the Input tensor with random values sampled from a uniform distribution.
${comment}
Args:
input (Variable): Tensor whose input_dim_idx'th dimension specifies the batch_size.
shape (tuple|list): the shape of the output.
input_dim_idx (Int): The index of input's batch size dimension.
output_dim_idx (Int): The index of output's batch size dimension.
min (Float): Minimum value of uniform random.
max (Float): Maximum value of uniform random.
seed (Int): Random seed used for generating samples. 0 means use a seed generated by the system.
Note that if seed is not 0, this operator will always generate the same random numbers every time.
input (Variable): ${input_comment}
shape (tuple|list): ${shape_comment}
input_dim_idx (Int): ${input_dim_idx_comment}
output_dim_idx (Int): ${output_dim_idx}
min (Float): ${min_comment}
max (Float): ${max_comment}
seed (Int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc
Returns:
out (Variable): Output of this operator.
out (Variable): ${out_comment}
"""
......@@ -6433,6 +6431,7 @@ def uniform_random_batch_size_like(input,
return out
@templatedoc()
def gaussian_random(shape,
mean=0.0,
std=1.0,
......@@ -6440,21 +6439,18 @@ def gaussian_random(shape,
dtype='float32',
use_mkldnn=False):
"""
GaussianRandom Operator.
Used to initialize tensors with gaussian random generator.
${comment}
Args:
shape (tuple|list): The dimension of random tensor.
mean (Float): Mean of random tensor.
std (Float): Std of random tensor.
seed (Int): Random seed of generator.0 means use system wide seed.
Note that if seed is not 0, this operator will always generate the same random numbers every time.
shape (tuple|list): ${shape_comment}
mean (Float): ${mean_comment}
std (Float): ${std_comment}
seed (Int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): Output data type.
use_mkldnn (Bool): Only used in mkldnn kernel.
Returns:
out (Variable): Output of this operator.
out (Variable): ${out_comment}
"""
......@@ -6476,23 +6472,20 @@ def gaussian_random(shape,
return out
@templatedoc()
def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
"""
SamplingId Operator.
A layer for sampling id from multinomial distribution from the input.
Sampling one id for one sample.
${comment}
Args:
x (Variable): The input tensor of softmax. 2-D with shape [batch_size, input_feature_dimensions].
min (Float): Minimum value of random.
max (Float): Maximun value of random.
seed (Float): random seed used for the random number engine.0 means use a seed generated by the system.
Note that if seed is not 0, this operator will always generate the same random numbers every time.
x (Variable): ${x_comment}
min (Float): ${min_comment}
max (Float): ${max_comment}
seed (Float): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc
Returns:
out (Variable): Output of this operator.
out (Variable): ${out_comment}
"""
......@@ -6509,6 +6502,7 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
return out
@templatedoc()
def gaussian_random_batch_size_like(input,
shape,
input_dim_idx=0,
......@@ -6518,20 +6512,20 @@ def gaussian_random_batch_size_like(input,
seed=0,
dtype='float32'):
"""
Used to initialize tensors with gaussian random generator. The defalut mean of the distribution is 0. and defalut standard deviation (std) of the distribution is 1.. Uers can set mean and std by input arguments.
${comment}
Args:
input (Variable): Tensor whose input_dim_idx'th dimension specifies the batch_size.
shape (tuple|list): the shape of the output.
input_dim_idx (Int): The index of input's batch size dimension
output_dim_idx (Int): The index of output's batch size dimension
mean (Float): The mean (or center) of the gaussian distribution.
std (Float): The standard deviation (std, or spread) of the gaussian distribution.
seed (Int): Random seed of generator.0 means use system wide seed._note that if seed is not 0, this operator will always generate the same random numbers every time.
input (Variable): ${input_comment}
shape (tuple|list): ${shape_comment}
input_dim_idx (Int): ${input_dim_idx}
output_dim_idx (Int): ${output_dim_idx_comment}
mean (Float): ${mean_comment}
std (Float): ${std_comment}
seed (Int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc
Returns:
out (Variable): Output of this operator
out (Variable): ${out_comment}
"""
helper = LayerHelper('gaussian_random_batch_size_like', **locals())
......@@ -6554,19 +6548,17 @@ def gaussian_random_batch_size_like(input,
return out
@templatedoc()
def sum(x, use_mkldnn=False):
"""
Sum operator.
This operators sums the input tensors. All the inputs can carry
the LoD (Level of Details) information. However, the output only
shares the LoD information with the first input.
${comment}
Args:
x (Variable): The input tensors of sum operator.
use_mkldnn (Bool): Only used in mkldnn kernel
x (Variable): ${x_comment}
use_mkldnn (Bool): ${use_mkldnn_comment}
Returns:
out (Variable): Output of this operator
out (Variable): ${out_comment}
"""
......@@ -6581,49 +6573,19 @@ def sum(x, use_mkldnn=False):
return out
@templatedoc()
def slice(input, axes, starts, ends):
"""
Slice Operator.
Produces a slice of the input tensor along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses `axes`, `starts` and `ends` attributes to specify the start and
end dimension for each axis in the list of axes, it uses this information
to slice the input data tensor. If a negative value is passed for any of
the start or end indices, it represents number of elements before the end
of that dimension. If the value passed to start or end is larger than
the n (the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. If axes are omitted, they are set to [0, ..., ndim-1].
Following examples will explain how slice works:
.. code-block:: text
Cast1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
Then:
result = [ [5, 6, 7], ]
Cast2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
starts = [0, 1]
ends = [-1, 1000]
Then:
result = [ [2, 3, 4], ]
${comment}
Args:
input (Variable): Tensor of data to extract slices from.
axes (List): Axes that `starts` and `ends` apply to. It's optional._if not present, will be treated as [0, 1, ..., len(`starts`) - 1].
starts (List): Starting indices of corresponding axis in `axes`.
ends (List): Starting indices of corresponding axis in `axes`.
input (Variable): ${input_comment}.
axes (List): ${axes_comment}
starts (List): ${starts_comment}
ends (List): ${ends_comment}
Returns:
out (Variable): The output of this operator.
out (Variable): ${output_comment}
"""
......@@ -6640,16 +6602,16 @@ def slice(input, axes, starts, ends):
return out
@templatedoc()
def shape(input):
"""
Shape Operator
Get the shape of input tensor. Only support CPU input Tensor now.
${comment}
Args:
input (Variable): The input tensor.
input (Variable): ${input_comment}
Returns:
out (Variable): The output of this operator.
out (Variable): ${out_comment}
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册