activation.py 57.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from ...tensor.ops import sigmoid  # noqa: F401
Z
zhiboniu 已提交
16 17
from ...tensor.math import tanh  # noqa: F401
from ...tensor.math import tanh_  # noqa: F401
18

19
from ...fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
F
Feiyu Chan 已提交
20
from ...tensor.manipulation import chunk
21

22
from ...fluid.layer_helper import LayerHelper
J
Jiabin Yang 已提交
23
from ...fluid.framework import convert_np_dtype_to_dtype_
24
from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode
25
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
26
import paddle
27
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
Z
zhiboniu 已提交
28
from paddle.framework import core
29
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
30

31 32
__all__ = []

33

34 35 36 37 38 39 40 41 42 43 44
def celu(x, alpha=1.0, name=None):
    r"""
    celu activation.

    .. math::

        celu(x) = max(0, x) + min(0, \alpha * (e^{x/\alpha}-1))

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        alpha (float, optional): The 'alpha' value of the CELU formulation. Default is 1.0.
45
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F
            x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
            out = F.celu(x, alpha=0.2)
            # [[-0.19865242,  6.        ],
            #  [ 1.        , 15.60000038]]
    """
    if alpha == 0:
        raise ZeroDivisionError("alpha cannot be 0 for celu")

63
    if _in_legacy_dygraph():
64
        return _legacy_C_ops.celu(x, 'alpha', alpha)
65
    if in_dygraph_mode():
66
        return _C_ops.celu(x, alpha)
67 68 69 70

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'celu')
    helper = LayerHelper("celu", **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
71 72 73 74 75 76
    helper.append_op(
        type='celu',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'alpha': alpha},
    )
77 78 79
    return out


80
def elu(x, alpha=1.0, name=None):
81
    r"""
82 83
    elu activation.

84
    .. math::
85

Z
zhupengyang 已提交
86 87 88 89 90 91 92
        elu(x)=
            \left\{
                \begin{array}{lcl}
                x,& &\text{if } \ x > 0 \\
                alpha * (e^{x} - 1),& &\text{if } \ x <= 0
                \end{array}
            \right.
93 94 95 96

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
97
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
98

99 100
    Returns:
        A Tensor with the same data type and shape as ``x`` .
101

102 103 104
    Examples:
        .. code-block:: python

105 106
            import paddle
            import paddle.nn.functional as F
107

Z
zhupengyang 已提交
108
            x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
109
            out = F.elu(x, alpha=0.2)
110 111
            # [[-0.12642411  6.        ]
            #  [ 1.          15.6      ]]
112 113
    """

114
    if in_dygraph_mode():
115
        return _C_ops.elu(x, alpha)
116 117

    if _in_legacy_dygraph():
118
        return _legacy_C_ops.elu(x, 'alpha', alpha)
119 120 121 122

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
    helper = LayerHelper("elu", **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
123 124 125 126 127 128
    helper.append_op(
        type='elu',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'alpha': alpha},
    )
129 130 131
    return out


132
@inplace_apis_in_dygraph_only
133 134 135 136 137
def elu_(x, alpha=1.0, name=None):
    r"""
    Inplace version of ``elu`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_nn_cn_elu`.
    """
138
    assert alpha >= 0.0, "elu_ only support alpha >= 0, please use elu instead."
139
    if in_dygraph_mode():
140 141
        return _C_ops.elu_(x, alpha)
    return _legacy_C_ops.elu_(x, 'alpha', alpha)
142 143


144
def gelu(x, approximate=False, name=None):
145
    r"""
146 147
    gelu activation.

148 149
    The activation function of Gelu is calculated element by element. More information refers to :ref: `Gaussian Error Linear Units`.

150
    if approximate is True
151 152 153

    .. math::

154
        gelu(x) = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
155

156
    else
157 158 159

    .. math::

160
        gelu(x) = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}}))
161

162 163
    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
164 165
        approximate (bool, optional): Whether to enable approximation. Default is False.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
166

167 168
    Returns:
        A Tensor with the same data type and shape as ``x`` .
169

170 171 172
    Examples:
        .. code-block:: python

173 174
            import paddle
            import paddle.nn.functional as F
175

Z
zhupengyang 已提交
176 177 178 179 180 181 182
            x = paddle.to_tensor([[-1, 0.5], [1, 1.5]])
            out1 = F.gelu(x)
            # [[-0.15865529,  0.34573123],
            #  [ 0.84134471,  1.39978933]]
            out2 = F.gelu(x, True)
            # [[-0.15880799,  0.34571400],
            #  [ 0.84119201,  1.39957154]]
183 184
    """

185
    if in_dygraph_mode():
186
        return _C_ops.gelu(x, approximate)
187 188

    if _in_legacy_dygraph():
189
        return _legacy_C_ops.gelu(x, 'approximate', approximate)
190 191 192 193

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'gelu')
    helper = LayerHelper("gelu", **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
194 195 196 197 198 199
    helper.append_op(
        type='gelu',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'approximate': approximate},
    )
200 201 202
    return out


203
def hardshrink(x, threshold=0.5, name=None):
204
    r"""
205 206 207 208 209
    hard shrinkage activation

    .. math::

        hardshrink(x)=
210 211 212 213 214 215 216
            \left\{
                \begin{array}{rcl}
                x,&  &if \ {x > threshold}  \\
                x,&  &if \ {x < -threshold}   \\
                0,&  &if \ {others} &
                \end{array}
            \right.
217 218 219

    Args:
        x (Tensor): The input Tensor with data type float32, float64.
220 221
        threshold (float, optional): The value of threshold for hardthrink. Default is 0.5.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
222 223 224 225 226 227 228

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

229 230
            import paddle
            import paddle.nn.functional as F
231

Z
zhupengyang 已提交
232
            x = paddle.to_tensor([-1, 0.3, 2.5])
233
            out = F.hardshrink(x) # [-1., 0., 2.5]
234 235

    """
H
hong 已提交
236
    if in_dygraph_mode():
237
        return _C_ops.hardshrink(x, threshold)
H
hong 已提交
238 239

    if _in_legacy_dygraph():
240
        return _legacy_C_ops.hard_shrink(x, 'threshold', threshold)
241

242 243 244
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'hardshrink'
    )
245 246
    helper = LayerHelper('hardshrink', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
247 248 249 250 251 252
    helper.append_op(
        type='hard_shrink',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'threshold': threshold},
    )
253 254 255
    return out


256
def hardtanh(x, min=-1.0, max=1.0, name=None):
257
    r"""
258
    hardtanh activation. Calculate the `hardtanh` of input `x`.
259 260 261

    .. math::

262 263 264 265 266 267 268 269
        hardtanh(x)=
            \left\{
                \begin{array}{cll}
                    max,& & \text{if } x > max \\
                    min,& & \text{if } x < min \\
                    x,& & \text{otherwise}
                \end{array}
            \right.
270

271
    Parameters:
272 273 274
        x (Tensor): The input Tensor with data type float32, float64.
        min (float, optional): The minimum value of the linear region range. Default is -1.
        max (float, optional): The maximum value of the linear region range. Default is 1.
275
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
276 277 278 279 280 281 282 283 284 285

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

286
            x = paddle.to_tensor([-1.5, 0.3, 2.5])
287 288 289
            out = F.hardtanh(x) # [-1., 0.3, 1.]
    """

H
hong 已提交
290
    if in_dygraph_mode():
291
        return _C_ops.hardtanh(x, min, max)
H
hong 已提交
292 293

    if _in_legacy_dygraph():
294
        return _legacy_C_ops.brelu(x, 't_min', min, 't_max', max)
295

296 297 298
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'hardtanh'
    )
299 300 301

    helper = LayerHelper('hardtanh', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
302 303 304 305 306 307
    helper.append_op(
        type='brelu',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'t_min': min, 't_max': max},
    )
308 309 310
    return out


311
def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
312
    r"""
313
    hardsigmoid activation. Calculate the `hardsigmoid` of input `x`.
314 315 316 317 318 319
    A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
    which is much faster than sigmoid.

    .. math::

        hardsigmoid(x)=
320 321 322 323 324 325 326
            \left\{
                \begin{array}{lcl}
                0, & &\text{if } \ x \leq -3 \\
                1, & &\text{if } \ x \geq 3 \\
                slope * x + offset, & &\text{otherwise}
                \end{array}
            \right.
327 328 329

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
330 331
        slope (float, optional): The slope of hardsigmoid function. Default is 0.1666667.
        offset (float, optional): The offset of hardsigmoid function. Default is 0.5.
332
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
333 334 335 336 337 338 339 340 341 342 343 344 345 346

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            x = paddle.to_tensor([-4., 5., 1.])
            out = F.hardsigmoid(x) # [0., 1., 0.666667]
    """

H
hong 已提交
347
    if in_dygraph_mode():
348
        return _C_ops.hardsigmoid(x, slope, offset)
H
hong 已提交
349 350

    if _in_legacy_dygraph():
351
        return _legacy_C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
352

353 354 355
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'hardsigmoid'
    )
356 357 358

    helper = LayerHelper('hardsigmoid', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
359 360 361 362 363 364
    helper.append_op(
        type='hard_sigmoid',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'slope': slope, 'offset': offset},
    )
365 366 367 368
    return out


def hardswish(x, name=None):
369
    r"""
370 371 372
    hardswish activation. hardswish is proposed in MobileNetV3, and performs
    better in computational stability and efficiency compared to swish function.
    For more details please refer to: https://arxiv.org/pdf/1905.02244.pdf
373 374 375 376

    .. math::

        hardswish(x)=
377 378 379 380 381 382 383
            \left\{
                \begin{array}{cll}
                0 &, & \text{if } x \leq -3 \\
                x &, & \text{if } x \geq 3 \\
                \frac{x(x+3)}{6} &, & \text{otherwise}
                \end{array}
            \right.
384 385 386

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
387
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
388 389 390 391 392 393 394 395 396 397 398 399 400 401

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            x = paddle.to_tensor([-4., 5., 1.])
            out = F.hardswish(x) # [0., 5., 0.666667]
    """

402
    if _in_legacy_dygraph():
403
        return _legacy_C_ops.hard_swish(x)
404
    if in_dygraph_mode():
405
        return _C_ops.hardswish(x, 6, 6, 3)
406

407 408 409
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'hardswish'
    )
410 411 412 413 414 415 416

    helper = LayerHelper('hardswish', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='hard_swish', inputs={'X': x}, outputs={'Out': out})
    return out


417
def leaky_relu(x, negative_slope=0.01, name=None):
418
    r"""
419
    leaky_relu activation. The calculation formula is:
420

421
    .. math::
422 423 424 425 426 427 428
        leaky\_relu(x)=
        \left\{
            \begin{array}{rcl}
                x, & & if \ x >= 0 \\
                negative\_slope * x, & & otherwise \\
            \end{array}
        \right.
429 430 431 432 433

    Args:
        x (Tensor): The input Tensor with data type float32, float64.
        negative_slope (float, optional): Slope of the activation function at
            :math:`x < 0` . Default is 0.01.
434
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
435 436 437 438 439 440 441 442 443 444

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

Z
zhupengyang 已提交
445
            x = paddle.to_tensor([-2., 0., 1.])
446 447 448
            out = F.leaky_relu(x)
            print(out)
            # [-0.02, 0., 1.]
449 450

    """
451
    if in_dygraph_mode():
452
        return _C_ops.leaky_relu(x, negative_slope)
453 454

    if _in_legacy_dygraph():
455
        return _legacy_C_ops.leaky_relu(x, 'alpha', negative_slope)
456

457 458 459
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'leaky_relu'
    )
460 461
    helper = LayerHelper('leaky_relu', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
462 463 464 465 466 467
    helper.append_op(
        type='leaky_relu',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'alpha': negative_slope},
    )
468 469 470
    return out


471
def prelu(x, weight, data_format="NCHW", name=None):
472 473 474 475 476 477 478 479 480 481 482
    """
    prelu activation.

    .. math::

        prelu(x) = max(0, x) + weight * min(0, x)

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        weight (Tensor): The learnable parameter with data type same as ``x``.
            The weight shape is [1] or [in], where `in` is the input channel of ``x``.
483
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
484 485
        data_format(str, optional): Data format that specifies the layout of input.
            It may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default: "NCHW".
486 487 488 489 490 491 492 493 494 495

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

496
            data = paddle.to_tensor([[[[-2.0,  3.0, -4.0,  5.0],
Z
zhupengyang 已提交
497 498 499 500
                               [ 3.0, -4.0,  5.0, -6.0],
                               [-7.0, -8.0,  8.0,  9.0]],
                              [[ 1.0, -2.0, -3.0,  4.0],
                               [-5.0,  6.0,  7.0, -8.0],
501 502 503 504 505
                               [ 6.0,  7.0,  8.0,  9.0]]]], dtype='float32')

            w = paddle.to_tensor([0.25], dtype='float32')
            out = F.prelu(data, w)
            print(out)
506 507 508 509 510 511 512 513
            # [[[[-0.5 ,  3.  , -1.  ,  5.  ],
            #    [ 3.  , -1.  ,  5.  , -1.5 ],
            #    [-1.75, -2.  ,  8.  ,  9.  ]],
            #   [[ 1.  , -0.5 , -0.75,  4.  ],
            #    [-1.25,  6.  ,  7.  , -2.  ],
            #    [ 6.  ,  7.  ,  8.  ,  9.  ]]]]
    """
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu')
514 515 516
    check_variable_and_dtype(
        weight, 'weight', ['float16', 'float32', 'float64'], 'prelu'
    )
517

518 519 520
    assert (
        len(weight.shape) == 1
    ), "The dim count of weight shape should be 1 in prelu()."
521 522 523

    mode = 'all'
    if weight.shape[0] > 1:
524 525

        true_data_format = [
526 527 528 529 530 531 532
            'NC',
            'NCL',
            'NCHW',
            'NCDHW',
            'NLC',
            'NHWC',
            'NDHWC',
533 534 535 536
        ]
        if data_format not in true_data_format:
            raise ValueError(
                "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', "
537 538
                "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format)
            )
539 540 541

        data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC'

542 543 544
        assert (
            len(x.shape) > 1
        ), "The dim count of x should be equal or larger than 2 in prelu() when weight shape is not [1]."
545

546
        # NOTE(GuoxiaWang): support NHWC data format
547
        if data_format == 'NHWC':
548 549 550
            assert (
                weight.shape[0] == x.shape[-1]
            ), "The weight size should be equal to x input channel in prelu() when weight shape is not [1]."
551
        else:
552 553 554
            assert (
                weight.shape[0] == x.shape[1]
            ), "The weight size should be equal to x input channel in prelu() when weight shape is not [1]."
555 556
        mode = 'channel'

557
    if in_dygraph_mode():
558
        return _C_ops.prelu(x, weight, data_format, mode)
559
    if _in_legacy_dygraph():
560 561 562
        return _legacy_C_ops.prelu(
            x, weight, 'mode', mode, 'data_format', data_format
        )
563

564
    helper = LayerHelper('prelu', **locals())
565
    out = helper.create_variable_for_type_inference(x.dtype)
566 567 568 569 570 571
    helper.append_op(
        type="prelu",
        inputs={"X": x, "Alpha": weight},
        outputs={"Out": out},
        attrs={"mode": mode, "data_format": data_format},
    )
572 573 574
    return out


575
def rrelu(x, lower=1.0 / 8.0, upper=1.0 / 3.0, training=True, name=None):
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
    r"""
    rrelu activation.

    Applies the randomized leaky rectified liner unit function to improve generalization performance,
    as described in the paper:
    `Empirical Evaluation of Rectified Activations in Convolutional Network <https://arxiv.org/abs/1505.00853>`_

    During training, randomly samples the negative slope for activation values as described below:

    .. math::

        rrelu(x)=
            \left\{
                \begin{array}{rcl}
                    x, & & if \ x >= 0 \\
                    a * x, & & otherwise \\
                \end{array}
            \right.

    where :math:`x` is the input tensor,
    :math:`a` is randomly sampled from uniform distribution in range (:math:`lower`, :math:`upper`),

    In the test phase, the negative slope will take the average value of :math:`lower` and :math:`upper`:

    .. math::

        rrelu(x)=
            \left\{
                \begin{array}{rcl}
                    x, & & if \ x >= 0 \\
                    (lower + upper) * 0.5 * x, & & otherwise \\
                \end{array}
            \right.

    where :math:`x` is the input tensor,
    :math:`lower` and :math:`upper` are the bounds of uniform distribution.

    Parameters:
        x (Tensor): The input Tensor with data type float16, float32, float64.
        lower (float, optional): The lower bound of uniform distribution. Default: 0.125.
        upper (float, optional): The upper bound of uniform distribution. Default: 0.333.
        training (bool, optional): Current mode is in training or others.  Default is True.
618
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            input_tensor = paddle.to_tensor([[[[-2.0,  3.0, -4.0,  5.0],
                                            [ 3.0, -4.0,  5.0, -6.0],
                                            [-7.0, -8.0,  8.0,  9.0]],
                                            [[ 1.0, -2.0, -3.0,  4.0],
                                            [-5.0,  6.0,  7.0, -8.0],
                                            [ 6.0,  7.0,  8.0,  9.0]]]], dtype='float32')

            out = F.rrelu(input_tensor, 0.1, 0.3)
637
            print(out)
638 639 640 641 642 643 644 645 646
            #[[[[-0.20000899  3.         -0.8810822   5.        ]
            #   [ 3.         -0.55175185  5.         -1.0776101 ]
            #   [-1.0680687  -1.9896201   8.          9.        ]]
            #  [[ 1.         -0.5238267  -0.65515125  4.        ]
            #   [-1.3766339   6.          7.         -2.3465784 ]
            #   [ 6.          7.          8.          9.        ]]]]
    """

    if not in_dynamic_mode():
647 648 649
        check_variable_and_dtype(
            x, 'X', ['float16', 'float32', 'float64'], 'rrelu'
        )
650 651 652

    if not isinstance(lower, float) or not isinstance(upper, float):
        raise TypeError(
653 654 655 656
            "The lower and upper values must be float type. Received: lower {}, upper {}.".format(
                lower, upper
            )
        )
657 658 659

    if lower < 0 or lower > 1:
        raise ValueError(
660 661 662 663
            "The lower value must be no less than zero or greater than one. Received: {}.".format(
                lower
            )
        )
664 665 666

    if upper < lower:
        raise ValueError(
667 668 669 670
            "The upper value must be greater than lower value. Received: lower {}, upper {}.".format(
                lower, upper
            )
        )
671 672 673 674

    if upper > 1:
        raise ValueError(
            "The upper value must be no greater than one. Received: {}.".format(
675 676 677
                upper
            )
        )
678 679 680 681

    is_test = not training

    if _in_legacy_dygraph():
682 683 684
        out, noise = _legacy_C_ops.rrelu(
            x, 'lower', lower, 'upper', upper, 'is_test', is_test
        )
685 686 687 688 689 690
        return out

    helper = LayerHelper('rrelu', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    noise = helper.create_variable_for_type_inference(dtype=x.dtype)
    attrs = {'lower': lower, 'upper': upper, 'is_test': is_test}
691 692 693 694 695 696
    helper.append_op(
        type='rrelu',
        inputs={"X": x},
        outputs={"Out": out, "Noise": noise},
        attrs=attrs,
    )
697 698 699
    return out


700
def relu(x, name=None):
701
    """
702
    relu activation.
703

704
    .. math::
705 706 707 708

        out = max(x, 0)

    Parameters:
709
        x (Tensor): The input Tensor with data type float32, float64.
710
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
711 712

    Returns:
713
        A Tensor with the same data type and shape as ``x`` .
714 715 716 717

    Examples:
        .. code-block:: python

718 719
            import paddle
            import paddle.nn.functional as F
720

721 722 723 724
            x = paddle.to_tensor([-2, 0, 1], dtype='float32')
            out = F.relu(x)
            print(out)
            # [0., 0., 1.]
725 726
    """

727
    if in_dygraph_mode():
W
wanghuancoder 已提交
728
        return _C_ops.relu(x)
729 730
    if _in_legacy_dygraph():
        return _legacy_C_ops.relu(x)
731
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
732
    helper = LayerHelper('relu', **locals())
733 734 735 736 737
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='relu', inputs={'X': x}, outputs={'Out': out})
    return out


738
@inplace_apis_in_dygraph_only
739 740 741 742 743
def relu_(x, name=None):
    """
    Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_nn_cn_relu`.
    """
744 745
    if in_dygraph_mode():
        return _C_ops.relu_(x)
746 747
    if _in_legacy_dygraph():
        return _legacy_C_ops.relu_(x)
748 749


750
def log_sigmoid(x, name=None):
751
    r"""
752
    log_sigmoid activation.
753

754
    .. math::
755

756
        log\_sigmoid(x) = log \frac{1}{1 + e^{-x}}
757

758 759
    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
760
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
761

762 763
    Returns:
        A Tensor with the same data type and shape as ``x`` .
764

765 766 767
    Examples:
        .. code-block:: python

768 769
            import paddle
            import paddle.nn.functional as F
770

771 772
            x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
            out = F.log_sigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
773 774
    """

H
hong 已提交
775
    if in_dygraph_mode():
776
        return _C_ops.logsigmoid(x)
H
hong 已提交
777 778

    if _in_legacy_dygraph():
779
        return _legacy_C_ops.logsigmoid(x)
780

781 782 783
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'log_sigmoid'
    )
784
    helper = LayerHelper("log_sigmoid", **locals())
785 786 787
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='logsigmoid', inputs={'X': x}, outputs={'Out': out})
    return out
788 789


790
def maxout(x, groups, axis=1, name=None):
791
    r"""
792 793 794 795 796 797 798 799
    maxout activation.

    Assumed the input shape is (N, Ci, H, W).
    The output shape is (N, Co, H, W).
    Then Co = Ci/groups and the operator formula is as follows:

    .. math::

800 801 802 803 804 805 806 807 808
        \begin{array}{l}
        &out_{si+j} = \max_{k} x_{gsi + sk + j} \\
        &g = groups \\
        &s = \frac{input.size}{num\_channels} \\
        &0 \le i < \frac{num\_channels}{groups} \\
        &0 \le j < s \\
        &0 \le k < groups
        \end{array}

809 810 811 812 813 814 815 816 817 818 819 820

    Parameters:
        x (Tensor): The input is 4-D Tensor with shape [N, C, H, W] or [N, H, W, C], the data type
            of input is float32 or float64.
        groups (int, optional): The groups number of maxout. `groups` specifies the
            index of channel dimension where maxout will be performed. This must be
            a factor of number of features. Default is 1.
        axis (int, optional): The axis along which to perform maxout calculations.
            It should be 1 when data format is NCHW, be -1 or 3 when data format
            is NHWC. If ``axis`` < 0, it works the same way as :math:`axis + D` ,
            where D is the dimensions of ``x`` . ``axis`` only supports 1, 3 or -1.
            Default is 1.
821
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843

    Returns:
        A Tensor with the same data type as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            x = paddle.rand([1, 2, 3, 4])
            # [[[[0.5002636  0.22272532 0.17402348 0.2874594 ]
            #    [0.95313174 0.6228939  0.7129065  0.7087491 ]
            #    [0.02879342 0.88725346 0.61093384 0.38833922]]
            #   [[0.5231306  0.03807496 0.91661984 0.15602879]
            #    [0.666127   0.616567   0.30741522 0.24044901]
            #    [0.7142536  0.7351477  0.31588817 0.23782359]]]]
            out = F.maxout(x, groups=2)
            # [[[[0.5231306  0.22272532 0.91661984 0.2874594 ]
            #    [0.95313174 0.6228939  0.7129065  0.7087491 ]
            #    [0.7142536  0.88725346 0.61093384 0.38833922]]]]
    """
844
    if _in_legacy_dygraph():
845
        return _legacy_C_ops.maxout(x, 'groups', groups, 'axis', axis)
846
    if in_dygraph_mode():
847
        return _C_ops.maxout(x, groups, axis)
848 849 850 851
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'maxout')
    if axis not in [1, -1, 3]:
        raise ValueError(
            "Attr(axis) should be 1 when data format is NCHW, -1 or 3 when data format is NHWC. Received "
852 853
            "Attr(axis): %s." % str(axis)
        )
854 855 856 857 858
    if axis == -1:
        axis = 3

    helper = LayerHelper('maxout', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
859 860 861 862 863 864
    helper.append_op(
        type='maxout',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'groups': groups, 'axis': axis},
    )
865 866 867
    return out


868 869 870 871 872 873
def relu6(x, name=None):
    """
    relu6 activation

    .. math::

874
        relu6(x) = min(max(0,x), 6)
875

876
    Parameters:
877
        x (Tensor): The input Tensor with data type float32, float64.
878
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
879 880 881 882 883 884 885

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

886 887
            import paddle
            import paddle.nn.functional as F
888

889 890 891 892
            x = paddle.to_tensor([-1, 0.3, 6.5])
            out = F.relu6(x)
            print(out)
            # [0, 0.3, 6]
893 894
    """
    threshold = 6.0
895
    if in_dygraph_mode():
896
        return _C_ops.relu6(x, threshold)
Z
zhiboniu 已提交
897
    if in_dynamic_mode():
898
        return _legacy_C_ops.relu6(x, 'threshold', threshold)
899 900 901 902

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')
    helper = LayerHelper('relu6', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
903 904 905 906 907 908
    helper.append_op(
        type='relu6',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'threshold': threshold},
    )
909 910 911
    return out


912 913 914 915 916 917
def selu(
    x,
    scale=1.0507009873554804934193349852946,
    alpha=1.6732632423543772848170429916717,
    name=None,
):
918
    r"""
919 920 921 922
    selu activation

    .. math::

923
        selu(x)= scale *
924 925 926 927 928 929
            \left\{
                \begin{array}{lcl}
                x,& &\text{if } \ x > 0 \\
                alpha * e^{x} - alpha,& &\text{if } \ x <= 0
                \end{array}
            \right.
930

931
    Parameters:
932
        x (Tensor): The input Tensor with data type float32, float64.
933 934
        scale (float, optional): The value of scale(must be greater than 1.0) for selu. Default is 1.0507009873554804934193349852946
        alpha (float, optional): The value of alpha(must be no less than zero) for selu. Default is 1.6732632423543772848170429916717
935
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
936 937 938 939 940 941 942

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

943 944
            import paddle
            import paddle.nn.functional as F
945

946 947 948 949
            x = paddle.to_tensor([[0.0, 1.0],[2.0, 3.0]])
            out = F.selu(x)
            print(out)
            # [[0, 1.050701],[2.101402, 3.152103]]
950
    """
951 952
    if scale <= 1.0:
        raise ValueError(
953 954
            "The scale must be greater than 1.0. Received: {}.".format(scale)
        )
955 956 957

    if alpha < 0:
        raise ValueError(
958 959
            "The alpha must be no less than zero. Received: {}.".format(alpha)
        )
960

H
hong 已提交
961
    if in_dygraph_mode():
962
        return _C_ops.selu(x, scale, alpha)
H
hong 已提交
963
    if _in_legacy_dygraph():
964
        return _legacy_C_ops.selu(x, 'scale', scale, 'alpha', alpha)
965 966 967 968

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'selu')
    helper = LayerHelper('selu', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
969 970 971 972 973 974
    helper.append_op(
        type='selu',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'scale': scale, 'alpha': alpha},
    )
975 976 977
    return out


M
minghaoBD 已提交
978
def silu(x, name=None):
979 980 981 982 983
    r"""
    silu activation

    .. math::

M
minghaoBD 已提交
984
        silu(x) = \frac{x}{1 + e^{-x}}
985

986 987
    Where :math:`x` is the input Tensor.

M
minghaoBD 已提交
988 989
    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
990
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
991

M
minghaoBD 已提交
992
    Returns:
993
        A Tensor with the same data type and shape as :attr:`x`.
994

M
minghaoBD 已提交
995 996
    Examples:
        .. code-block:: python
997 998 999

            import paddle
            import paddle.nn.functional as F
1000

1001 1002
            x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
            out = F.silu(x) # [ 0.731059, 1.761594, 2.857722, 3.928055 ]
M
minghaoBD 已提交
1003 1004
    """

1005
    if in_dygraph_mode():
W
wanghuancoder 已提交
1006
        return _C_ops.silu(x)
1007 1008
    if _in_legacy_dygraph():
        return _legacy_C_ops.silu(x)
M
minghaoBD 已提交
1009 1010 1011 1012 1013 1014 1015 1016

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'silu')
    helper = LayerHelper("silu", **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='silu', inputs={'X': x}, outputs={'Out': out})
    return out


1017
def softmax(x, axis=-1, dtype=None, name=None):
1018
    r"""
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
    This operator implements the softmax layer. The calculation process is as follows:

    1. The dimension :attr:`axis` of ``x`` will be permuted to the last.

    2. Then ``x`` will be logically flattened to a 2-D matrix. The matrix's second
    dimension(row length) is the same as the dimension :attr:`axis` of ``x``,
    and the first dimension(column length) is the product of all other dimensions
    of ``x``. For each row of the matrix, the softmax operator squashes the
    K-dimensional(K is the width of the matrix, which is also the size of ``x``'s
    dimension :attr:`axis`) vector of arbitrary real values to a K-dimensional
    vector of real values in the range [0, 1] that add up to 1.

    3. After the softmax operation is completed, the inverse operations of steps 1 and 2
    are performed to restore the two-dimensional matrix to the same dimension as the ``x`` .

    It computes the exponential of the given dimension and the sum of exponential
    values of all the other dimensions in the K-dimensional vector input.
    Then the ratio of the exponential of the given dimension and the sum of
    exponential values of all the other dimensions is the output of the softmax
    operator.

    For each row :math:`i` and each column :math:`j` in the matrix, we have:

    .. math::

1044
        softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])}
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092

    Example:

    .. code-block:: text

        Case 1:
          Input:
            x.shape = [2, 3, 4]
            x.data = [[[2.0, 3.0, 4.0, 5.0],
                       [3.0, 4.0, 5.0, 6.0],
                       [7.0, 8.0, 8.0, 9.0]],
                      [[1.0, 2.0, 3.0, 4.0],
                       [5.0, 6.0, 7.0, 8.0],
                       [6.0, 7.0, 8.0, 9.0]]]

          Attrs:
            axis = -1

          Output:
            out.shape = [2, 3, 4]
            out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
                        [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]

        Case 2:
          Input:
            x.shape = [2, 3, 4]
            x.data = [[[2.0, 3.0, 4.0, 5.0],
                       [3.0, 4.0, 5.0, 6.0],
                       [7.0, 8.0, 8.0, 9.0]],
                      [[1.0, 2.0, 3.0, 4.0],
                       [5.0, 6.0, 7.0, 8.0],
                       [6.0, 7.0, 8.0, 9.0]]]
          Attrs:
            axis = 1

          Output:
            out.shape = [2, 3, 4]
            out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
                         [0.01786798, 0.01786798, 0.04661262, 0.04661262],
                         [0.97555875, 0.97555875, 0.93623955, 0.93623955]],
                        [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
                         [0.26762315, 0.26762315, 0.26762315, 0.26762315],
                         [0.72747516, 0.72747516, 0.72747516, 0.72747516]]]

1093 1094
    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
1095
        axis (int, optional): The axis along which to perform softmax
1096
            calculations. It should be in range [-D, D), where D is the
1097
            rank of ``x`` . If ``axis`` < 0, it works the same way as
1098
            :math:`axis + D` . Default is -1.
1099
        dtype (str, optional): The data type of the output tensor, can be float32, float64.
1100
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1101 1102

    Returns:
1103 1104
        A Tensor with the same shape and data type (use ``dtype`` if it is
        specified) as x.
1105 1106 1107 1108

    Examples:
        .. code-block:: python

1109 1110
            import paddle
            import paddle.nn.functional as F
1111

1112
            x = paddle.to_tensor([[[2.0, 3.0, 4.0, 5.0],
1113 1114 1115 1116
                        [3.0, 4.0, 5.0, 6.0],
                        [7.0, 8.0, 8.0, 9.0]],
                        [[1.0, 2.0, 3.0, 4.0],
                        [5.0, 6.0, 7.0, 8.0],
1117
                        [6.0, 7.0, 8.0, 9.0]]],dtype='float32')
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
            out1 = F.softmax(x)
            out2 = F.softmax(x, dtype='float64')
            # out1's data type is float32; out2's data type is float64
            # out1 and out2's value is as follows:
            # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
            # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
1128
    """
1129 1130 1131

    if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
        dtype = convert_np_dtype_to_dtype_(dtype)
1132
    use_cudnn = True
1133

H
hong 已提交
1134
    if in_dygraph_mode():
1135
        outs_cast = x if dtype is None else _C_ops.cast(x, dtype)
1136
        return _C_ops.softmax(outs_cast, axis)
H
hong 已提交
1137 1138

    if _in_legacy_dygraph():
1139 1140 1141
        outs_cast = (
            x
            if dtype is None
1142
            else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
1143 1144 1145 1146
        )
        return _legacy_C_ops.softmax(
            outs_cast, 'axis', axis, 'use_cudnn', use_cudnn
        )
1147 1148

    if dtype is None:
1149 1150 1151
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], 'softmax'
        )
1152
    else:
1153
        check_dtype(
1154 1155 1156 1157 1158 1159
            dtype,
            'dtype',
            ['float32', 'float64'],
            'softmax',
            'If dtype is not None, it only support float32 or float64.',
        )
1160 1161 1162 1163 1164

    helper = LayerHelper("softmax", **locals())
    outs_cast = x
    if dtype is not None:
        outs_cast = helper.create_variable_for_type_inference(dtype)
1165 1166 1167 1168 1169 1170
        helper.append_op(
            type='cast',
            inputs={'X': x},
            outputs={'Out': outs_cast},
            attrs={'in_dtype': x.dtype, 'out_dtype': dtype},
        )
1171 1172

    outs_softmax = helper.create_variable_for_type_inference(outs_cast.dtype)
1173 1174 1175 1176 1177 1178
    helper.append_op(
        type='softmax',
        inputs={'X': outs_cast},
        outputs={'Out': outs_softmax},
        attrs={'axis': axis, 'use_cudnn': use_cudnn},
    )
1179 1180

    return outs_softmax
1181 1182


1183
@inplace_apis_in_dygraph_only
1184 1185 1186 1187 1188 1189 1190 1191
def softmax_(x, axis=-1, dtype=None, name=None):
    r"""
    Inplace version of ``softmax`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_nn_cn_softmax`.
    """
    if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
        dtype = convert_np_dtype_to_dtype_(dtype)
    use_cudnn = True
1192 1193

    if in_dygraph_mode():
1194 1195 1196
        outs_cast = (
            x
            if dtype is None
1197
            else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
1198
        )
1199
        return _C_ops.softmax_(outs_cast, axis)
1200 1201

    if _in_legacy_dygraph():
1202 1203 1204
        outs_cast = (
            x
            if dtype is None
1205
            else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
1206 1207 1208 1209
        )
        return _legacy_C_ops.softmax_(
            outs_cast, 'axis', axis, 'use_cudnn', use_cudnn
        )
1210 1211


1212
def softplus(x, beta=1, threshold=20, name=None):
1213
    r"""
1214 1215 1216
    softplus activation

    .. math::
1217 1218 1219 1220
        softplus(x)=\begin{cases}
                \frac{1}{\beta} * \log(1 + e^{\beta * x}),&x\leqslant\frac{\varepsilon}{\beta};\\
                x,&x>\frac{\varepsilon}{\beta}.
            \end{cases}
1221

1222
    Parameters:
1223
        x (Tensor): The input Tensor with data type float32, float64.
1224 1225
        beta (float, optional): The value of :math:`\beta` for softplus. Default is 1
        threshold (float, optional): The value of :math:`\varepsilon` for softplus. Default is 20
1226
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1227 1228 1229 1230 1231 1232 1233

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

1234 1235
            import paddle
            import paddle.nn.functional as F
1236

1237
            x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3], dtype='float32')
1238
            out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
1239
    """
W
Wang Bojun 已提交
1240 1241

    if in_dygraph_mode():
1242
        return _C_ops.softplus(x, beta, threshold)
W
Wang Bojun 已提交
1243 1244

    if _in_legacy_dygraph():
1245
        return _legacy_C_ops.softplus(x, 'beta', beta, 'threshold', threshold)
1246

1247 1248 1249
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'softplus'
    )
1250 1251
    helper = LayerHelper('softplus', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
1252 1253 1254 1255 1256 1257
    helper.append_op(
        type='softplus',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'beta': beta, 'threshold': threshold},
    )
1258 1259 1260 1261
    return out


def softshrink(x, threshold=0.5, name=None):
1262
    r"""
1263 1264 1265 1266
    softshrink activation

    .. math::

1267
        softshrink(x)=
1268 1269 1270 1271 1272 1273 1274
            \left\{
                \begin{array}{rcl}
                x - threshold,& & \text{if } x > threshold \\
                x + threshold,& & \text{if } x < -threshold \\
                0,& &  \text{otherwise}
            \end{array}
            \right.
1275

1276
    Parameters:
1277 1278
        x (Tensor): The input Tensor with data type float32, float64.
        threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
1279
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1280 1281 1282 1283 1284 1285 1286

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

1287 1288
            import paddle
            import paddle.nn.functional as F
1289

1290 1291 1292 1293 1294
            x = paddle.to_tensor([-0.9, -0.2, 0.1, 0.8])
            out = F.softshrink(x)
            print(out)
            # Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
            #        [-0.39999998,  0.        ,  0.        ,  0.30000001])
1295
    """
1296 1297 1298
    if threshold < 0:
        raise ValueError(
            "The threshold must be no less than zero. Received: {}.".format(
1299 1300 1301
                threshold
            )
        )
1302

1303
    if in_dygraph_mode():
1304
        return _C_ops.softshrink(x, threshold)
1305
    if _in_legacy_dygraph():
1306
        return _legacy_C_ops.softshrink(x, 'lambda', threshold)
1307

1308 1309 1310
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'softshrink'
    )
1311 1312
    helper = LayerHelper('softshrink', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
1313 1314 1315 1316 1317 1318
    helper.append_op(
        type='softshrink',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'lambda': threshold},
    )
1319 1320 1321 1322
    return out


def softsign(x, name=None):
1323
    r"""
1324 1325 1326 1327
    softsign activation

    .. math::

1328
        softsign(x) = \frac{x}{1 + |x|}
1329

1330
    Parameters:
1331
        x (Tensor): The input Tensor with data type float32, float64.
1332
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1333 1334 1335 1336 1337 1338 1339

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

1340 1341
            import paddle
            import paddle.nn.functional as F
1342

1343 1344 1345 1346 1347
            x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            out = F.softsign(x)
            print(out)
            # Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
            #        [-0.28571430, -0.16666666,  0.09090909,  0.23076925])
1348
    """
1349
    if in_dygraph_mode():
W
wanghuancoder 已提交
1350
        return _C_ops.softsign(x)
1351 1352
    if in_dynamic_mode():
        return _legacy_C_ops.softsign(x)
1353

1354 1355 1356
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'softsign'
    )
1357 1358 1359 1360 1361 1362
    helper = LayerHelper('softsign', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='softsign', inputs={'X': x}, outputs={'Out': out})
    return out


1363
def swish(x, name=None):
1364
    r"""
1365 1366 1367 1368
    swish activation.

    .. math::

1369
        swish(x) = \frac{x}{1 + e^{-x}}
1370 1371 1372

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
1373
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

1384 1385 1386 1387 1388
            x = paddle.to_tensor([-2., 0., 1.])
            out = F.swish(x)
            print(out)
            # Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
            #        [-0.23840584,  0.        ,  0.73105854])
1389
    """
1390
    if in_dygraph_mode():
1391
        return _C_ops.swish(x, 1.0)
1392
    if _in_legacy_dygraph():
1393
        return _legacy_C_ops.swish(x, 'beta', 1.0)
1394 1395 1396 1397

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')
    helper = LayerHelper('swish', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
1398 1399 1400
    helper.append_op(
        type='swish', inputs={'X': x}, outputs={'Out': out}, attrs={'beta': 1.0}
    )
1401 1402 1403
    return out


1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
def mish(x, name=None):
    r"""
    mish activation.

    ..  math::

        softplus(x) = \begin{cases}
                x, \text{if } x > \text{threshold} \\
                \ln(1 + e^{x}),  \text{otherwise}
            \end{cases}

        mish(x) = x * \tanh(softplus(x))
1416

1417 1418
    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
1419
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

W
wangxinxin08 已提交
1430
            x = paddle.to_tensor([-5., 0., 5.])
1431 1432
            out = F.mish(x) # [-0.03357624, 0., 4.99955208]
    """
1433
    if in_dygraph_mode():
1434
        return _C_ops.mish(x, 20)
1435
    if _in_legacy_dygraph():
1436
        return _legacy_C_ops.mish(x)
1437 1438 1439 1440 1441 1442 1443 1444

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mish')
    helper = LayerHelper('mish', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='mish', inputs={'X': x}, outputs={'Out': out})
    return out


1445 1446 1447 1448 1449 1450
def tanhshrink(x, name=None):
    """
    tanhshrink activation

    .. math::

1451
        tanhshrink(x) = x - tanh(x)
1452 1453 1454

    Args:
        x (Tensor): The input Tensor with data type float32, float64.
1455
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1456 1457 1458 1459 1460 1461 1462

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

1463 1464
            import paddle
            import paddle.nn.functional as F
1465

1466 1467 1468 1469 1470
            x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            out = F.tanhshrink(x)
            print(out)
            # Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
            #        [-0.02005106, -0.00262468,  0.00033200,  0.00868741])
1471
    """
H
hong 已提交
1472
    if in_dygraph_mode():
1473
        return _C_ops.tanh_shrink(x)
H
hong 已提交
1474 1475

    if _in_legacy_dygraph():
1476
        return _legacy_C_ops.tanh_shrink(x)
1477

1478 1479 1480
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'tanhshrink'
    )
1481 1482 1483 1484 1485 1486
    helper = LayerHelper('tanh_shrink', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='tanh_shrink', inputs={'X': x}, outputs={'Out': out})
    return out


1487
def thresholded_relu(x, threshold=1.0, name=None):
1488
    r"""
1489 1490 1491 1492
    thresholded relu activation.

    .. math::

1493
        thresholded\_relu(x) =
1494 1495 1496 1497 1498 1499 1500
            \left\{
                \begin{array}{rl}
                x,& \text{if } \ x > threshold \\
                0,& \text{otherwise}
                \end{array}
            \right.

1501 1502 1503 1504

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        threshold (float, optional): The value of threshold for thresholded_relu. Default is 1.0
1505
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

1516 1517 1518 1519 1520
            x = paddle.to_tensor([2., 0., 1.])
            out = F.thresholded_relu(x)
            print(out)
            # Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
            #        [2., 0., 0.])
1521 1522
    """

H
hong 已提交
1523
    if in_dygraph_mode():
1524
        return _C_ops.thresholded_relu(x, threshold)
H
hong 已提交
1525 1526

    if _in_legacy_dygraph():
1527
        return _legacy_C_ops.thresholded_relu(x, 'threshold', threshold)
1528

1529 1530 1531
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'thresholded_relu'
    )
1532 1533
    helper = LayerHelper('thresholded_relu', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
1534 1535 1536 1537 1538 1539
    helper.append_op(
        type='thresholded_relu',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'threshold': threshold},
    )
1540 1541 1542
    return out


1543
def log_softmax(x, axis=-1, dtype=None, name=None):
1544
    r"""
1545 1546
    This operator implements the log_softmax layer. The calculation process is
    as follows:
1547 1548 1549

    .. math::

1550
        \begin{aligned}
1551 1552 1553
        log\_softmax[i, j] &= log(softmax(x)) \\
        &= log(\frac{\exp(X[i, j])}{\sum_j(\exp(X[i, j])})
        \end{aligned}
1554 1555

    Parameters:
1556 1557 1558 1559 1560 1561 1562
        x (Tensor): The input Tensor with data type float32, float64.
        axis (int, optional): The axis along which to perform log_softmax
            calculations. It should be in range [-D, D), where D is the
            dimensions of ``x`` . If ``axis`` < 0, it works the same way as
            :math:`axis + D` . Default is -1.
        dtype (str|np.dtype|core.VarDesc.VarType, optional): The desired data
            type of the output tensor. If dtype is specified, ``x`` is casted
1563
            to ``dtype`` before the operation is performed. This is useful for
1564 1565 1566
            preventing data type overflows. Supported dtype: float32, float64.
            If ``dtype`` is None, the output Tensor has the same dtype as x.
            Default is None.
1567
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1568

1569
    Returns:
1570 1571
        A Tensor with the same shape and data type (use ``dtype`` if it is
        specified) as x.
1572 1573 1574 1575

    Examples:
        .. code-block:: python

1576 1577 1578
            import paddle
            import paddle.nn.functional as F

Z
zhupengyang 已提交
1579 1580 1581 1582 1583 1584
            x = [[[-2.0, 3.0, -4.0, 5.0],
                  [3.0, -4.0, 5.0, -6.0],
                  [-7.0, -8.0, 8.0, 9.0]],
                 [[1.0, -2.0, -3.0, 4.0],
                  [-5.0, 6.0, 7.0, -8.0],
                  [6.0, 7.0, 8.0, 9.0]]]
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
            x = paddle.to_tensor(x)
            out1 = F.log_softmax(x)
            out2 = F.log_softmax(x, dtype='float64')
            # out1's data type is float32; out2's data type is float64
            # out1 and out2's value is as follows:
            # [[[ -7.1278396   -2.1278396   -9.127839    -0.12783948]
            #   [ -2.1270514   -9.127051    -0.12705144 -11.127051  ]
            #   [-16.313261   -17.313261    -1.3132617   -0.31326184]]
            #  [[ -3.0518122   -6.051812    -7.051812    -0.051812  ]
            #   [-12.313267    -1.3132664   -0.3132665  -15.313267  ]
            #   [ -3.4401896   -2.4401896   -1.4401896   -0.44018966]]]
    """
1597 1598 1599

    if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
        dtype = convert_np_dtype_to_dtype_(dtype)
1600

H
hong 已提交
1601
    if in_dygraph_mode():
1602
        if dtype is not None:
1603 1604
            x = _C_ops.cast(x, dtype)
        return _C_ops.log_softmax(x, axis)
1605

H
hong 已提交
1606 1607
    if _in_legacy_dygraph():
        if dtype is not None:
1608 1609
            x = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
        return _legacy_C_ops.log_softmax(x, 'axis', axis)
H
hong 已提交
1610

1611
    if dtype is None:
1612 1613 1614
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], 'log_softmax'
        )
1615
    else:
1616
        check_dtype(
1617 1618 1619 1620 1621 1622
            dtype,
            'dtype',
            ['float32', 'float64'],
            'log_softmax',
            'If dtype is not None, it only support float32 or float64.',
        )
1623

1624
    helper = LayerHelper("log_softmax", **locals())
1625
    out_cast = x
1626
    if dtype is not None:
1627
        out_cast = helper.create_variable_for_type_inference(dtype)
1628 1629 1630 1631 1632 1633
        helper.append_op(
            type='cast',
            inputs={'X': x},
            outputs={'Out': out_cast},
            attrs={'in_dtype': x.dtype, 'out_dtype': dtype},
        )
1634

1635
    out = helper.create_variable_for_type_inference(out_cast.dtype)
1636 1637 1638 1639 1640 1641
    helper.append_op(
        type='log_softmax',
        inputs={'X': out_cast},
        outputs={'Out': out},
        attrs={'axis': axis},
    )
1642

1643
    return out
F
Feiyu Chan 已提交
1644 1645 1646 1647


def glu(x, axis=-1, name=None):
    r"""
1648
    The gated linear unit. The input is evenly splited into 2 parts along a
F
Feiyu Chan 已提交
1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
    given axis. The first part is used as the content, and the second part is
    passed through a sigmoid function then used as the gate. The output is a
    elementwise multiplication of the content and the gate.

    .. math::

        \mathrm{GLU}(a, b) = a \otimes \sigma(b)

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
1659 1660 1661
        axis (int, optional): The axis along which split the input tensor. It
            should be in range [-D, D), where D is the dimensions of ``x`` .
            If ``axis`` < 0, it works the same way as :math:`axis + D` .
F
Feiyu Chan 已提交
1662
            Default is -1.
1663
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1664

F
Feiyu Chan 已提交
1665
    Returns:
1666
        A Tensor with the same data type as x. The size of the given aixs is
F
Feiyu Chan 已提交
1667
        halved.
1668

F
Feiyu Chan 已提交
1669 1670
    Examples:
        .. code-block:: python
1671

F
Feiyu Chan 已提交
1672 1673
            import paddle
            from paddle.nn import functional as F
1674

F
Feiyu Chan 已提交
1675 1676 1677 1678 1679 1680 1681
            x = paddle.to_tensor(
                [[-0.22014759, -1.76358426,  0.80566144,  0.04241343],
                 [-1.94900405, -1.89956081,  0.17134808, -1.11280477]]
            )
            print(F.glu(x).numpy())
            # array([[-0.15216254, -0.9004892 ],
            #        [-1.0577879 , -0.46985325]], dtype=float32)
1682

F
Feiyu Chan 已提交
1683
    """
1684 1685 1686
    check_variable_and_dtype(
        x, 'input', ['float16', 'float32', 'float64'], "glu"
    )
F
Feiyu Chan 已提交
1687 1688 1689 1690
    a, b = chunk(x, 2, axis=axis, name=name)
    gate = sigmoid(b, name=name)
    out = paddle.multiply(a, gate, name=name)
    return out
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715


def gumbel_softmax(x, temperature=1.0, hard=False, axis=-1, name=None):
    r"""
    Samples from the Gumbel-Softmax distribution and optionally discretizes.
    temperature is denoted by t. The calculation process is as follows:

    First, generate gumbel noise:

    .. math::

        G_i = -log(-log(U_i)), U_i \sim U(0,1)

    Second, add noise to ``x``:

    .. math::

        v = [x_1 + G_1,...,x_n + G_n]

    Finally, calculate gumbel_softmax and generate samples:

    .. math::
        gumbel\_softmax(v_i)=\frac{e^{v_i/t}}{\sum_{j=1}^n{e^{v_j/t}}},i=1,2,3...n

    Parameters:
1716 1717
        x (Tensor): An N-D Tensor, the first N - 1 dimensions index into a batch
            of independent distributions and the last dimension represents
1718 1719 1720
            a vector of probabilities with datatype float32, float64.
        temperature (float, optional): non-negative scalar temperature.
            Default is 1.0.
1721 1722
        hard (bool, optional): if True, the returned samples will be discretized as
            one-hot vectors, but will be differentiated as if it is the soft sample
1723
            in autograd. Default is False.
1724
        axis (int, optional): The axis along will be calculated softmax value.
1725
            Default is -1.
1726
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1727

1728
    Returns:
1729 1730
        Sampled tensor of same shape as ``x`` from the Gumbel-Softmax distribution.
        If ``hard = True``, the returned samples will be one-hot, otherwise they will be
1731
        probability distributions that sum to 1 across ``axis``.
1732

1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            logits = paddle.randn([4, 6])
            temperature = 0.01
            gumbel_softmax = F.gumbel_softmax(logits, temperature)
            print(gumbel_softmax)
            # out's value is as follows:
            # [[0.00000001, 1.        , 0.00000000, 0.00000000, 0.00000006, 0.00000000],
            # [0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 1.        ],
            # [0.00000062, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.99999940],
            # [0.00000000, 0.00000000, 0.00000000, 0.00001258, 0.99998736, 0.00000000]]
1748

1749
    """
H
hong 已提交
1750
    if in_dygraph_mode():
1751
        return _C_ops.gumbel_softmax(x, temperature, hard, axis)
H
hong 已提交
1752

Z
zhiboniu 已提交
1753
    if in_dynamic_mode():
1754 1755 1756
        return _legacy_C_ops.gumbel_softmax(
            x, 'temperature', temperature, 'hard', hard, 'axis', axis
        )
1757 1758 1759 1760

    helper = LayerHelper("gumbel_softmax", **locals())
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'gumbel_softmax')
    out = helper.create_variable_for_type_inference(x.dtype)
1761 1762 1763 1764 1765 1766
    helper.append_op(
        type='gumbel_softmax',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'temperature': temperature, 'hard': hard, 'axis': axis},
    )
1767
    return out