提交 3c185cfa 编写于 作者: F Feiyu Chan 提交者: Guo Sheng

cherry pick 91693b9c and resolve conflict in...

cherry pick 91693b9c and resolve conflict in API.spec, test=document_fix (#20477)
上级 faeb541c
......@@ -228,7 +228,7 @@ paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None
paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '00d437d1e0d9450ea75a0495b93b54a7'))
paddle.fluid.layers.stanh (ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.67, 1.7159, None)), ('document', 'd3f742178a7263adf5929153d104883d'))
paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', '607d79ca873bee40eed1c79a96611591'))
paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'e0dc7bc66cba939033bc028d7a62c5f4'))
paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '60b4dbe35f2b47f7290e79907a4eacec'))
paddle.fluid.layers.prelu (ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'cb417a61f701c937f33d057fe85203ab'))
paddle.fluid.layers.brelu (ArgSpec(args=['x', 't_min', 't_max', 'name'], varargs=None, keywords=None, defaults=(0.0, 24.0, None)), ('document', '49580538249a52c857fce75c94ad8af7'))
paddle.fluid.layers.leaky_relu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(0.02, None)), ('document', '11352d3780f62952ea3332658714758c'))
......@@ -405,7 +405,7 @@ paddle.fluid.layers.softsign (ArgSpec(args=['x', 'name'], varargs=None, keywords
paddle.fluid.layers.softshrink (ArgSpec(args=['x', 'alpha'], varargs=None, keywords=None, defaults=(None,)), ('document', '958c7bfdfb0b5e92af6ca4a90d24e5ef'))
paddle.fluid.layers.hard_shrink (ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', '386a4103d2884b2f1312ebc1e8ee6486'))
paddle.fluid.layers.cumsum (ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'c1f2e4c4511da09d5d89c556ea802bd1'))
paddle.fluid.layers.thresholded_relu (ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', '9a0464425426a9b9c1b7500ede2836c1'))
paddle.fluid.layers.thresholded_relu (ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', '94c71025bf11ab8172fd455350274138'))
paddle.fluid.layers.prior_box (ArgSpec(args=['input', 'image', 'min_sizes', 'max_sizes', 'aspect_ratios', 'variance', 'flip', 'clip', 'steps', 'offset', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, [1.0], [0.1, 0.1, 0.2, 0.2], False, False, [0.0, 0.0], 0.5, None, False)), ('document', '0fdf82762fd0a5acb2578a72771b5b44'))
paddle.fluid.layers.density_prior_box (ArgSpec(args=['input', 'image', 'densities', 'fixed_sizes', 'fixed_ratios', 'variance', 'clip', 'steps', 'offset', 'flatten_to_2d', 'name'], varargs=None, keywords=None, defaults=(None, None, None, [0.1, 0.1, 0.2, 0.2], False, [0.0, 0.0], 0.5, False, None)), ('document', '7a484a0da5e993a7734867a3dfa86571'))
paddle.fluid.layers.multi_box_head (ArgSpec(args=['inputs', 'image', 'base_size', 'num_classes', 'aspect_ratios', 'min_ratio', 'max_ratio', 'min_sizes', 'max_sizes', 'steps', 'step_w', 'step_h', 'offset', 'variance', 'flip', 'clip', 'kernel_size', 'pad', 'stride', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None, 0.5, [0.1, 0.1, 0.2, 0.2], True, False, 1, 0, 1, None, False)), ('document', 'fd58078fdfffd899b91f992ba224628f'))
......
......@@ -11486,23 +11486,71 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
@templatedoc()
def swish(x, beta=1.0, name=None):
"""
${comment}
Elementwise swish activation function. See `Searching for Activation Functions <https://arxiv.org/abs/1710.05941>`_ for more details.
Equation:
.. math::
out = \\frac{x}{1 + e^{- beta * x}}
Args:
x(${x_type}): ${x_comment}
beta(${beta_type}|1.0): ${beta_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
x(Variable): Tensor or LoDTensor, dtype: float32 or float64, the input of swish activation.
beta(float): Constant beta of swish operator, default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
output(${out_type}): ${out_comment}
Variable: Output of the swish activation, Tensor or LoDTensor, with the same dtype and shape with the input x.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3,10,32,32], dtype="float32")
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
y = fluid.layers.swish(x, beta=2.0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
data = np.random.randn(2, 3).astype("float32")
exe.run(start)
y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
data
# array([[-1.1239197 , 1.3391294 , 0.03921051],
# [ 1.1970421 , 0.02440812, 1.2055548 ]], dtype=float32)
y_np
# array([[-0.2756806 , 1.0610548 , 0.01998957],
# [ 0.9193261 , 0.01235299, 0.9276883 ]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
data = np.random.randn(2, 3).astype("float32")
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = dg.to_variable(data)
y = fluid.layers.swish(x)
y_np = y.numpy()
data
# array([[-0.0816701 , 1.1603649 , -0.88325626],
# [ 0.7522361 , 1.0978601 , 0.12987892]], dtype=float32)
y_np
# array([[-0.03916847, 0.8835007 , -0.25835553],
# [ 0.51126915, 0.82324016, 0.06915068]], dtype=float32)
"""
helper = LayerHelper('swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -12269,24 +12317,57 @@ def uniform_random_batch_size_like(input,
@templatedoc()
def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'):
"""
${comment}
Generate a random tensor whose data is drawn from a Gaussian distribution.
Args:
shape (tuple|list): ${shape_comment}
mean (Float): ${mean_comment}
std (Float): ${std_comment}
seed (Int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): Output data type.
shape (Tuple[int] | List[int]): Shape of the generated random tensor.
mean (float): Mean of the random tensor, defaults to 0.0.
std (float): Standard deviation of the random tensor, defaults to 1.0.
seed (int): ${seed_comment}
dtype(np.dtype | core.VarDesc.VarType | str): Output data type, float32 or float64.
Returns:
out (Variable): ${out_comment}
Variable: Random tensor whose data is drawn from a Gaussian distribution, dtype: flaot32 or float64 as specified.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
out = layers.gaussian_random(shape=[20, 30])
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.layers.gaussian_random((2, 3), std=2., seed=10)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
exe.run(start)
x_np, = exe.run(main, feed={}, fetch_list=[x])
x_np
# array([[2.3060477, 2.676496 , 3.9911983],
# [0.9990833, 2.8675377, 2.2279181]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = fluid.layers.gaussian_random((2, 4), mean=2., dtype="float32", seed=10)
x_np = x.numpy()
x_np
# array([[2.3060477 , 2.676496 , 3.9911983 , 0.9990833 ],
# [2.8675377 , 2.2279181 , 0.79029655, 2.8447366 ]], dtype=float32)
"""
helper = LayerHelper('gaussian_random', **locals())
......
......@@ -172,10 +172,70 @@ def thresholded_relu(x, threshold=None):
return _thresholded_relu_(**kwargs)
thresholded_relu.__doc__ = _thresholded_relu_.__doc__ + """
thresholded_relu.__doc__ = """
:strong:`Thresholded ReLU Activation Operator`
Equation:
.. math::
out = \\begin{cases}
x, &if x > threshold \\\\
0, &otherwise
\\end{cases}
Args:
x(Variable): The input of Thresholded ReLU op, Tensor or LoDTensor, dtype: float32 or float64.
threshold(float, optional): The threshold value. Note that if the arg `threshold` is not set, the threshold in the equation is 1.0.
Returns:
Variable: The output of Thresholded ReLU op, Tensor or LoDTensor, dtype: float32 or float64, the same as the input, shape: the same as the input.
Examples:
>>> import paddle.fluid as fluid
>>> data = fluid.layers.data(name="input", shape=[1])
>>> result = fluid.layers.thresholded_relu(data, threshold=0.4)
.. code-block:: python
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
y = fluid.layers.thresholded_relu(x, threshold=0.1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
data = np.random.randn(2, 3).astype("float32")
exe.run(start)
y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
data
# array([[ 0.21134382, -1.1805999 , 0.32876605],
# [-1.2210793 , -0.7365624 , 1.0013918 ]], dtype=float32)
y_np
# array([[ 0.21134382, -0. , 0.32876605],
# [-0. , -0. , 1.0013918 ]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
data = np.random.randn(2, 3).astype("float32")
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = dg.to_variable(data)
y = fluid.layers.thresholded_relu(x, threshold=0.1)
y_np = y.numpy()
data
# array([[ 0.21134382, -1.1805999 , 0.32876605],
# [-1.2210793 , -0.7365624 , 1.0013918 ]], dtype=float32)
y_np
# array([[ 0.21134382, -0. , 0.32876605],
# [-0. , -0. , 1.0013918 ]], dtype=float32)
"""
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册