activation.py 57.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from ...tensor.ops import sigmoid  # noqa: F401
Z
zhiboniu 已提交
16 17
from ...tensor.math import tanh  # noqa: F401
from ...tensor.math import tanh_  # noqa: F401
18

19
from ...fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
F
Feiyu Chan 已提交
20 21
from ...tensor.manipulation import chunk
from ...tensor.math import multiply
22

23 24
import warnings
from ...fluid.layer_helper import LayerHelper
J
Jiabin Yang 已提交
25
from ...fluid.framework import convert_np_dtype_to_dtype_
26
from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _non_static_mode
27
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
28
import paddle
29
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
Z
zhiboniu 已提交
30
from paddle.framework import core
31
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
32

33 34
__all__ = []

35

36 37 38 39 40 41 42 43 44 45 46
def celu(x, alpha=1.0, name=None):
    r"""
    celu activation.

    .. math::

        celu(x) = max(0, x) + min(0, \alpha * (e^{x/\alpha}-1))

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        alpha (float, optional): The 'alpha' value of the CELU formulation. Default is 1.0.
47
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F
            x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
            out = F.celu(x, alpha=0.2)
            # [[-0.19865242,  6.        ],
            #  [ 1.        , 15.60000038]]
    """
    if alpha == 0:
        raise ZeroDivisionError("alpha cannot be 0 for celu")

65
    if _in_legacy_dygraph():
66
        return _legacy_C_ops.celu(x, 'alpha', alpha)
67
    if in_dygraph_mode():
68
        return _C_ops.celu(x, alpha)
69 70 71 72

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'celu')
    helper = LayerHelper("celu", **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
73 74 75 76
    helper.append_op(type='celu',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={'alpha': alpha})
77 78 79
    return out


80
def elu(x, alpha=1.0, name=None):
81
    r"""
82 83
    elu activation.

84
    .. math::
85

Z
zhupengyang 已提交
86 87 88 89 90 91 92
        elu(x)=
            \left\{
                \begin{array}{lcl}
                x,& &\text{if } \ x > 0 \\
                alpha * (e^{x} - 1),& &\text{if } \ x <= 0
                \end{array}
            \right.
93 94 95 96

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
97
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
98

99 100
    Returns:
        A Tensor with the same data type and shape as ``x`` .
101

102 103 104
    Examples:
        .. code-block:: python

105 106
            import paddle
            import paddle.nn.functional as F
107

Z
zhupengyang 已提交
108
            x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
109
            out = F.elu(x, alpha=0.2)
110 111
            # [[-0.12642411  6.        ]
            #  [ 1.          15.6      ]]
112 113
    """

114
    if in_dygraph_mode():
115
        return _C_ops.elu(x, alpha)
116 117

    if _in_legacy_dygraph():
118
        return _legacy_C_ops.elu(x, 'alpha', alpha)
119 120 121 122

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
    helper = LayerHelper("elu", **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
123 124 125 126
    helper.append_op(type='elu',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={'alpha': alpha})
127 128 129
    return out


130
@inplace_apis_in_dygraph_only
131 132 133 134 135
def elu_(x, alpha=1.0, name=None):
    r"""
    Inplace version of ``elu`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_nn_cn_elu`.
    """
Z
zhupengyang 已提交
136
    assert alpha >= 0., "elu_ only support alpha >= 0, please use elu instead."
137
    if in_dygraph_mode():
138 139
        return _C_ops.elu_(x, alpha)
    return _legacy_C_ops.elu_(x, 'alpha', alpha)
140 141


142
def gelu(x, approximate=False, name=None):
143
    r"""
144 145
    gelu activation.

146 147
    The activation function of Gelu is calculated element by element. More information refers to :ref: `Gaussian Error Linear Units`.

148
    if approximate is True
149 150 151

    .. math::

152
        gelu(x) = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
153

154
    else
155 156 157

    .. math::

158
        gelu(x) = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}}))
159

160 161
    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
162 163
        approximate (bool, optional): Whether to enable approximation. Default is False.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
164

165 166
    Returns:
        A Tensor with the same data type and shape as ``x`` .
167

168 169 170
    Examples:
        .. code-block:: python

171 172
            import paddle
            import paddle.nn.functional as F
173

Z
zhupengyang 已提交
174 175 176 177 178 179 180
            x = paddle.to_tensor([[-1, 0.5], [1, 1.5]])
            out1 = F.gelu(x)
            # [[-0.15865529,  0.34573123],
            #  [ 0.84134471,  1.39978933]]
            out2 = F.gelu(x, True)
            # [[-0.15880799,  0.34571400],
            #  [ 0.84119201,  1.39957154]]
181 182
    """

183
    if in_dygraph_mode():
184
        return _C_ops.gelu(x, approximate)
185 186

    if _in_legacy_dygraph():
187
        return _legacy_C_ops.gelu(x, 'approximate', approximate)
188 189 190 191

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'gelu')
    helper = LayerHelper("gelu", **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
192 193 194 195
    helper.append_op(type='gelu',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={'approximate': approximate})
196 197 198
    return out


199
def hardshrink(x, threshold=0.5, name=None):
200
    r"""
201 202 203 204 205
    hard shrinkage activation

    .. math::

        hardshrink(x)=
206 207 208 209 210 211 212
            \left\{
                \begin{array}{rcl}
                x,&  &if \ {x > threshold}  \\
                x,&  &if \ {x < -threshold}   \\
                0,&  &if \ {others} &
                \end{array}
            \right.
213 214 215

    Args:
        x (Tensor): The input Tensor with data type float32, float64.
216 217
        threshold (float, optional): The value of threshold for hardthrink. Default is 0.5.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
218 219 220 221 222 223 224

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

225 226
            import paddle
            import paddle.nn.functional as F
227

Z
zhupengyang 已提交
228
            x = paddle.to_tensor([-1, 0.3, 2.5])
229
            out = F.hardshrink(x) # [-1., 0., 2.5]
230 231

    """
H
hong 已提交
232
    if in_dygraph_mode():
233
        return _C_ops.hard_shrink(x, threshold)
H
hong 已提交
234 235

    if _in_legacy_dygraph():
236
        return _legacy_C_ops.hard_shrink(x, 'threshold', threshold)
237 238 239 240 241

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'hardshrink')
    helper = LayerHelper('hardshrink', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
242 243 244 245
    helper.append_op(type='hard_shrink',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={'threshold': threshold})
246 247 248
    return out


249
def hardtanh(x, min=-1.0, max=1.0, name=None):
250
    r"""
251 252 253 254
    hardtanh activation

    .. math::

255 256 257 258 259 260 261 262
        hardtanh(x)=
            \left\{
                \begin{array}{cll}
                    max,& & \text{if } x > max \\
                    min,& & \text{if } x < min \\
                    x,& & \text{otherwise}
                \end{array}
            \right.
263

264
    Parameters:
265 266 267
        x (Tensor): The input Tensor with data type float32, float64.
        min (float, optional): The minimum value of the linear region range. Default is -1.
        max (float, optional): The maximum value of the linear region range. Default is 1.
268
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F
            import numpy as np

            x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5]))
            out = F.hardtanh(x) # [-1., 0.3, 1.]
    """

H
hong 已提交
284
    if in_dygraph_mode():
285
        return _C_ops.brelu(x, min, max)
H
hong 已提交
286 287

    if _in_legacy_dygraph():
288
        return _legacy_C_ops.brelu(x, 't_min', min, 't_max', max)
289 290 291 292 293 294

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'hardtanh')

    helper = LayerHelper('hardtanh', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
295 296 297 298 299 300 301
    helper.append_op(type='brelu',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={
                         't_min': min,
                         't_max': max
                     })
302 303 304
    return out


305
def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
306
    r"""
307 308 309 310 311 312 313 314
    hardsigmoid activation.

    A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
    which is much faster than sigmoid.

    .. math::

        hardsigmoid(x)=
315 316 317 318 319 320 321
            \left\{
                \begin{array}{lcl}
                0, & &\text{if } \ x \leq -3 \\
                1, & &\text{if } \ x \geq 3 \\
                slope * x + offset, & &\text{otherwise}
                \end{array}
            \right.
322 323 324

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
325 326
        slope (float, optional): The slope of hardsigmoid function. Default is 0.1666667.
        offset (float, optional): The offset of hardsigmoid function. Default is 0.5.
327
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
328 329 330 331 332 333 334 335 336 337 338 339 340 341

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            x = paddle.to_tensor([-4., 5., 1.])
            out = F.hardsigmoid(x) # [0., 1., 0.666667]
    """

H
hong 已提交
342
    if in_dygraph_mode():
343
        return _C_ops.hard_sigmoid(x, slope, offset)
H
hong 已提交
344 345

    if _in_legacy_dygraph():
346
        return _legacy_C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
347 348 349 350 351 352

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'hardsigmoid')

    helper = LayerHelper('hardsigmoid', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
353 354 355 356 357 358 359
    helper.append_op(type='hard_sigmoid',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={
                         'slope': slope,
                         'offset': offset
                     })
360 361 362 363
    return out


def hardswish(x, name=None):
364
    r"""
365 366 367 368 369 370 371 372 373
    hardswish activation

    hardswish is proposed in MobileNetV3, and performs better in computational stability
    and efficiency compared to swish function. For more details please refer
    to: https://arxiv.org/pdf/1905.02244.pdf

    .. math::

        hardswish(x)=
374 375 376 377 378 379 380
            \left\{
                \begin{array}{cll}
                0 &, & \text{if } x \leq -3 \\
                x &, & \text{if } x \geq 3 \\
                \frac{x(x+3)}{6} &, & \text{otherwise}
                \end{array}
            \right.
381 382 383

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
384
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
385 386 387 388 389 390 391 392 393 394 395 396 397 398

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            x = paddle.to_tensor([-4., 5., 1.])
            out = F.hardswish(x) # [0., 5., 0.666667]
    """

399
    if _in_legacy_dygraph():
400
        return _legacy_C_ops.hard_swish(x)
401
    if in_dygraph_mode():
402
        return _C_ops.hard_swish(x, 6, 6, 3)
403 404 405 406 407 408 409 410 411 412

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'hardswish')

    helper = LayerHelper('hardswish', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='hard_swish', inputs={'X': x}, outputs={'Out': out})
    return out


413
def leaky_relu(x, negative_slope=0.01, name=None):
414
    r"""
415 416
    leaky_relu activation

417
    .. math::
418 419 420 421 422 423 424
        leaky\_relu(x)=
        \left\{
            \begin{array}{rcl}
                x, & & if \ x >= 0 \\
                negative\_slope * x, & & otherwise \\
            \end{array}
        \right.
425 426 427 428 429

    Args:
        x (Tensor): The input Tensor with data type float32, float64.
        negative_slope (float, optional): Slope of the activation function at
            :math:`x < 0` . Default is 0.01.
430
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
431 432 433 434 435 436 437 438 439 440

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

Z
zhupengyang 已提交
441
            x = paddle.to_tensor([-2., 0., 1.])
442 443 444
            out = F.leaky_relu(x)
            print(out)
            # [-0.02, 0., 1.]
445 446

    """
447
    if in_dygraph_mode():
448
        return _C_ops.leaky_relu(x, negative_slope)
449 450

    if _in_legacy_dygraph():
451
        return _legacy_C_ops.leaky_relu(x, 'alpha', negative_slope)
452 453 454 455 456

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'leaky_relu')
    helper = LayerHelper('leaky_relu', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
457 458 459 460
    helper.append_op(type='leaky_relu',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={'alpha': negative_slope})
461 462 463
    return out


464
def prelu(x, weight, data_format="NCHW", name=None):
465 466 467 468 469 470 471 472 473 474 475
    """
    prelu activation.

    .. math::

        prelu(x) = max(0, x) + weight * min(0, x)

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        weight (Tensor): The learnable parameter with data type same as ``x``.
            The weight shape is [1] or [in], where `in` is the input channel of ``x``.
476
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
477 478
        data_format(str, optional): Data format that specifies the layout of input.
            It may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default: "NCHW".
479 480 481 482 483 484 485 486 487 488

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

489
            data = paddle.to_tensor([[[[-2.0,  3.0, -4.0,  5.0],
Z
zhupengyang 已提交
490 491 492 493
                               [ 3.0, -4.0,  5.0, -6.0],
                               [-7.0, -8.0,  8.0,  9.0]],
                              [[ 1.0, -2.0, -3.0,  4.0],
                               [-5.0,  6.0,  7.0, -8.0],
494 495 496 497 498
                               [ 6.0,  7.0,  8.0,  9.0]]]], dtype='float32')

            w = paddle.to_tensor([0.25], dtype='float32')
            out = F.prelu(data, w)
            print(out)
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
            # [[[[-0.5 ,  3.  , -1.  ,  5.  ],
            #    [ 3.  , -1.  ,  5.  , -1.5 ],
            #    [-1.75, -2.  ,  8.  ,  9.  ]],
            #   [[ 1.  , -0.5 , -0.75,  4.  ],
            #    [-1.25,  6.  ,  7.  , -2.  ],
            #    [ 6.  ,  7.  ,  8.  ,  9.  ]]]]
    """
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu')
    check_variable_and_dtype(weight, 'weight',
                             ['float16', 'float32', 'float64'], 'prelu')

    assert len(weight.shape
               ) == 1, "The dim count of weight shape should be 1 in prelu()."

    mode = 'all'
    if weight.shape[0] > 1:
515 516 517 518 519 520 521 522 523 524 525

        true_data_format = [
            'NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC'
        ]
        if data_format not in true_data_format:
            raise ValueError(
                "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', "
                "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format))

        data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC'

526 527 528
        assert len(
            x.shape
        ) > 1, "The dim count of x should be equal or larger than 2 in prelu() when weight shape is not [1]."
529 530 531 532 533 534 535 536

        #NOTE(GuoxiaWang): support NHWC data format
        if data_format == 'NHWC':
            assert weight.shape[0] == x.shape[
                -1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]."
        else:
            assert weight.shape[0] == x.shape[
                1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]."
537 538
        mode = 'channel'

539
    if in_dygraph_mode():
540
        return _C_ops.prelu(x, weight, data_format, mode)
541
    if _in_legacy_dygraph():
542 543
        return _legacy_C_ops.prelu(x, weight, 'mode', mode, 'data_format',
                                   data_format)
544

545
    helper = LayerHelper('prelu', **locals())
546
    out = helper.create_variable_for_type_inference(x.dtype)
547 548 549 550 551 552 553 554 555 556
    helper.append_op(type="prelu",
                     inputs={
                         "X": x,
                         "Alpha": weight
                     },
                     outputs={"Out": out},
                     attrs={
                         "mode": mode,
                         "data_format": data_format
                     })
557 558 559
    return out


560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
def rrelu(x, lower=1. / 8., upper=1. / 3., training=True, name=None):
    r"""
    rrelu activation.

    Applies the randomized leaky rectified liner unit function to improve generalization performance,
    as described in the paper:
    `Empirical Evaluation of Rectified Activations in Convolutional Network <https://arxiv.org/abs/1505.00853>`_

    During training, randomly samples the negative slope for activation values as described below:

    .. math::

        rrelu(x)=
            \left\{
                \begin{array}{rcl}
                    x, & & if \ x >= 0 \\
                    a * x, & & otherwise \\
                \end{array}
            \right.

    where :math:`x` is the input tensor,
    :math:`a` is randomly sampled from uniform distribution in range (:math:`lower`, :math:`upper`),

    In the test phase, the negative slope will take the average value of :math:`lower` and :math:`upper`:

    .. math::

        rrelu(x)=
            \left\{
                \begin{array}{rcl}
                    x, & & if \ x >= 0 \\
                    (lower + upper) * 0.5 * x, & & otherwise \\
                \end{array}
            \right.

    where :math:`x` is the input tensor,
    :math:`lower` and :math:`upper` are the bounds of uniform distribution.

    Parameters:
        x (Tensor): The input Tensor with data type float16, float32, float64.
        lower (float, optional): The lower bound of uniform distribution. Default: 0.125.
        upper (float, optional): The upper bound of uniform distribution. Default: 0.333.
        training (bool, optional): Current mode is in training or others.  Default is True.
603
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            input_tensor = paddle.to_tensor([[[[-2.0,  3.0, -4.0,  5.0],
                                            [ 3.0, -4.0,  5.0, -6.0],
                                            [-7.0, -8.0,  8.0,  9.0]],
                                            [[ 1.0, -2.0, -3.0,  4.0],
                                            [-5.0,  6.0,  7.0, -8.0],
                                            [ 6.0,  7.0,  8.0,  9.0]]]], dtype='float32')

            out = F.rrelu(input_tensor, 0.1, 0.3)
622
            print(out)
623 624 625 626 627 628 629 630 631 632 633 634 635 636
            #[[[[-0.20000899  3.         -0.8810822   5.        ]
            #   [ 3.         -0.55175185  5.         -1.0776101 ]
            #   [-1.0680687  -1.9896201   8.          9.        ]]
            #  [[ 1.         -0.5238267  -0.65515125  4.        ]
            #   [-1.3766339   6.          7.         -2.3465784 ]
            #   [ 6.          7.          8.          9.        ]]]]
    """

    if not in_dynamic_mode():
        check_variable_and_dtype(x, 'X', ['float16', 'float32', 'float64'],
                                 'rrelu')

    if not isinstance(lower, float) or not isinstance(upper, float):
        raise TypeError(
637 638
            "The lower and upper values must be float type. Received: lower {}, upper {}."
            .format(lower, upper))
639 640 641

    if lower < 0 or lower > 1:
        raise ValueError(
642 643
            "The lower value must be no less than zero or greater than one. Received: {}."
            .format(lower))
644 645 646

    if upper < lower:
        raise ValueError(
647 648
            "The upper value must be greater than lower value. Received: lower {}, upper {}."
            .format(lower, upper))
649 650 651 652 653 654 655 656 657

    if upper > 1:
        raise ValueError(
            "The upper value must be no greater than one. Received: {}.".format(
                upper))

    is_test = not training

    if _in_legacy_dygraph():
658 659
        out, noise = _legacy_C_ops.rrelu(x, 'lower', lower, 'upper', upper,
                                         'is_test', is_test)
660 661 662 663 664 665
        return out

    helper = LayerHelper('rrelu', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    noise = helper.create_variable_for_type_inference(dtype=x.dtype)
    attrs = {'lower': lower, 'upper': upper, 'is_test': is_test}
666 667 668 669 670 671 672
    helper.append_op(type='rrelu',
                     inputs={"X": x},
                     outputs={
                         "Out": out,
                         "Noise": noise
                     },
                     attrs=attrs)
673 674 675
    return out


676
def relu(x, name=None):
677
    """
678
    relu activation.
679

680
    .. math::
681 682 683 684

        out = max(x, 0)

    Parameters:
685
        x (Tensor): The input Tensor with data type float32, float64.
686
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
687 688

    Returns:
689
        A Tensor with the same data type and shape as ``x`` .
690 691 692 693

    Examples:
        .. code-block:: python

694 695
            import paddle
            import paddle.nn.functional as F
696

697 698 699 700
            x = paddle.to_tensor([-2, 0, 1], dtype='float32')
            out = F.relu(x)
            print(out)
            # [0., 0., 1.]
701 702
    """

703
    if in_dygraph_mode():
W
wanghuancoder 已提交
704
        return _C_ops.relu(x)
705 706
    if _in_legacy_dygraph():
        return _legacy_C_ops.relu(x)
707
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
708
    helper = LayerHelper('relu', **locals())
709 710 711 712 713
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='relu', inputs={'X': x}, outputs={'Out': out})
    return out


714
@inplace_apis_in_dygraph_only
715 716 717 718 719
def relu_(x, name=None):
    """
    Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_nn_cn_relu`.
    """
720 721
    if in_dygraph_mode():
        return _C_ops.relu_(x)
722 723
    if _in_legacy_dygraph():
        return _legacy_C_ops.relu_(x)
724 725


726
def log_sigmoid(x, name=None):
727
    r"""
728
    log_sigmoid activation.
729

730
    .. math::
731

732
        log\_sigmoid(x) = log \frac{1}{1 + e^{-x}}
733

734 735
    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
736
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
737

738 739
    Returns:
        A Tensor with the same data type and shape as ``x`` .
740

741 742 743
    Examples:
        .. code-block:: python

744 745
            import paddle
            import paddle.nn.functional as F
746

747 748
            x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
            out = F.log_sigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
749 750
    """

H
hong 已提交
751
    if in_dygraph_mode():
752
        return _C_ops.logsigmoid(x)
H
hong 已提交
753 754

    if _in_legacy_dygraph():
755
        return _legacy_C_ops.logsigmoid(x)
756 757

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
758 759
                             'log_sigmoid')
    helper = LayerHelper("log_sigmoid", **locals())
760 761 762
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='logsigmoid', inputs={'X': x}, outputs={'Out': out})
    return out
763 764


765
def maxout(x, groups, axis=1, name=None):
766
    r"""
767 768 769 770 771 772 773 774
    maxout activation.

    Assumed the input shape is (N, Ci, H, W).
    The output shape is (N, Co, H, W).
    Then Co = Ci/groups and the operator formula is as follows:

    .. math::

775 776 777 778 779 780 781 782 783
        \begin{array}{l}
        &out_{si+j} = \max_{k} x_{gsi + sk + j} \\
        &g = groups \\
        &s = \frac{input.size}{num\_channels} \\
        &0 \le i < \frac{num\_channels}{groups} \\
        &0 \le j < s \\
        &0 \le k < groups
        \end{array}

784 785 786 787 788 789 790 791 792 793 794 795

    Parameters:
        x (Tensor): The input is 4-D Tensor with shape [N, C, H, W] or [N, H, W, C], the data type
            of input is float32 or float64.
        groups (int, optional): The groups number of maxout. `groups` specifies the
            index of channel dimension where maxout will be performed. This must be
            a factor of number of features. Default is 1.
        axis (int, optional): The axis along which to perform maxout calculations.
            It should be 1 when data format is NCHW, be -1 or 3 when data format
            is NHWC. If ``axis`` < 0, it works the same way as :math:`axis + D` ,
            where D is the dimensions of ``x`` . ``axis`` only supports 1, 3 or -1.
            Default is 1.
796
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818

    Returns:
        A Tensor with the same data type as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            x = paddle.rand([1, 2, 3, 4])
            # [[[[0.5002636  0.22272532 0.17402348 0.2874594 ]
            #    [0.95313174 0.6228939  0.7129065  0.7087491 ]
            #    [0.02879342 0.88725346 0.61093384 0.38833922]]
            #   [[0.5231306  0.03807496 0.91661984 0.15602879]
            #    [0.666127   0.616567   0.30741522 0.24044901]
            #    [0.7142536  0.7351477  0.31588817 0.23782359]]]]
            out = F.maxout(x, groups=2)
            # [[[[0.5231306  0.22272532 0.91661984 0.2874594 ]
            #    [0.95313174 0.6228939  0.7129065  0.7087491 ]
            #    [0.7142536  0.88725346 0.61093384 0.38833922]]]]
    """
819
    if _in_legacy_dygraph():
820
        return _legacy_C_ops.maxout(x, 'groups', groups, 'axis', axis)
821
    if in_dygraph_mode():
822
        return _C_ops.maxout(x, groups, axis)
823 824 825 826 827 828 829 830 831 832
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'maxout')
    if axis not in [1, -1, 3]:
        raise ValueError(
            "Attr(axis) should be 1 when data format is NCHW, -1 or 3 when data format is NHWC. Received "
            "Attr(axis): %s." % str(axis))
    if axis == -1:
        axis = 3

    helper = LayerHelper('maxout', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
833 834 835 836 837 838 839
    helper.append_op(type='maxout',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={
                         'groups': groups,
                         'axis': axis
                     })
840 841 842
    return out


843 844 845 846 847 848
def relu6(x, name=None):
    """
    relu6 activation

    .. math::

849
        relu6(x) = min(max(0,x), 6)
850

851
    Parameters:
852
        x (Tensor): The input Tensor with data type float32, float64.
853
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
854 855 856 857 858 859 860

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

861 862
            import paddle
            import paddle.nn.functional as F
863

864 865 866 867
            x = paddle.to_tensor([-1, 0.3, 6.5])
            out = F.relu6(x)
            print(out)
            # [0, 0.3, 6]
868 869
    """
    threshold = 6.0
870
    if in_dygraph_mode():
871
        return _C_ops.relu6(x, threshold)
Z
zhiboniu 已提交
872
    if in_dynamic_mode():
873
        return _legacy_C_ops.relu6(x, 'threshold', threshold)
874 875 876 877

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')
    helper = LayerHelper('relu6', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
878 879 880 881
    helper.append_op(type='relu6',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={'threshold': threshold})
882 883 884 885 886 887 888
    return out


def selu(x,
         scale=1.0507009873554804934193349852946,
         alpha=1.6732632423543772848170429916717,
         name=None):
889
    r"""
890 891 892 893
    selu activation

    .. math::

894
        selu(x)= scale *
895 896 897 898 899 900
            \left\{
                \begin{array}{lcl}
                x,& &\text{if } \ x > 0 \\
                alpha * e^{x} - alpha,& &\text{if } \ x <= 0
                \end{array}
            \right.
901

902
    Parameters:
903
        x (Tensor): The input Tensor with data type float32, float64.
904 905
        scale (float, optional): The value of scale(must be greater than 1.0) for selu. Default is 1.0507009873554804934193349852946
        alpha (float, optional): The value of alpha(must be no less than zero) for selu. Default is 1.6732632423543772848170429916717
906
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
907 908 909 910 911 912 913

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

914 915
            import paddle
            import paddle.nn.functional as F
916

917 918 919 920
            x = paddle.to_tensor([[0.0, 1.0],[2.0, 3.0]])
            out = F.selu(x)
            print(out)
            # [[0, 1.050701],[2.101402, 3.152103]]
921
    """
922 923 924 925 926 927 928 929
    if scale <= 1.0:
        raise ValueError(
            "The scale must be greater than 1.0. Received: {}.".format(scale))

    if alpha < 0:
        raise ValueError(
            "The alpha must be no less than zero. Received: {}.".format(alpha))

H
hong 已提交
930
    if in_dygraph_mode():
931
        return _C_ops.selu(x, scale, alpha)
H
hong 已提交
932
    if _in_legacy_dygraph():
933
        return _legacy_C_ops.selu(x, 'scale', scale, 'alpha', alpha)
934 935 936 937

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'selu')
    helper = LayerHelper('selu', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
938 939 940 941 942 943 944
    helper.append_op(type='selu',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={
                         'scale': scale,
                         'alpha': alpha
                     })
945 946 947
    return out


M
minghaoBD 已提交
948
def silu(x, name=None):
949 950 951 952 953
    r"""
    silu activation

    .. math::

M
minghaoBD 已提交
954
        silu(x) = \frac{x}{1 + e^{-x}}
955

M
minghaoBD 已提交
956 957
    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
958
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
959

M
minghaoBD 已提交
960 961
    Returns:
        A Tensor with the same data type and shape as ``x`` .
962

M
minghaoBD 已提交
963 964
    Examples:
        .. code-block:: python
965 966 967

            import paddle
            import paddle.nn.functional as F
968

969 970
            x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
            out = F.silu(x) # [ 0.731059, 1.761594, 2.857722, 3.928055 ]
M
minghaoBD 已提交
971 972
    """

973
    if in_dygraph_mode():
W
wanghuancoder 已提交
974
        return _C_ops.silu(x)
975 976
    if _in_legacy_dygraph():
        return _legacy_C_ops.silu(x)
M
minghaoBD 已提交
977 978 979 980 981 982 983 984

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'silu')
    helper = LayerHelper("silu", **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='silu', inputs={'X': x}, outputs={'Out': out})
    return out


985
def softmax(x, axis=-1, dtype=None, name=None):
986
    r"""
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
    This operator implements the softmax layer. The calculation process is as follows:

    1. The dimension :attr:`axis` of ``x`` will be permuted to the last.

    2. Then ``x`` will be logically flattened to a 2-D matrix. The matrix's second
    dimension(row length) is the same as the dimension :attr:`axis` of ``x``,
    and the first dimension(column length) is the product of all other dimensions
    of ``x``. For each row of the matrix, the softmax operator squashes the
    K-dimensional(K is the width of the matrix, which is also the size of ``x``'s
    dimension :attr:`axis`) vector of arbitrary real values to a K-dimensional
    vector of real values in the range [0, 1] that add up to 1.

    3. After the softmax operation is completed, the inverse operations of steps 1 and 2
    are performed to restore the two-dimensional matrix to the same dimension as the ``x`` .

    It computes the exponential of the given dimension and the sum of exponential
    values of all the other dimensions in the K-dimensional vector input.
    Then the ratio of the exponential of the given dimension and the sum of
    exponential values of all the other dimensions is the output of the softmax
    operator.

    For each row :math:`i` and each column :math:`j` in the matrix, we have:

    .. math::

1012
        softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])}
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060

    Example:

    .. code-block:: text

        Case 1:
          Input:
            x.shape = [2, 3, 4]
            x.data = [[[2.0, 3.0, 4.0, 5.0],
                       [3.0, 4.0, 5.0, 6.0],
                       [7.0, 8.0, 8.0, 9.0]],
                      [[1.0, 2.0, 3.0, 4.0],
                       [5.0, 6.0, 7.0, 8.0],
                       [6.0, 7.0, 8.0, 9.0]]]

          Attrs:
            axis = -1

          Output:
            out.shape = [2, 3, 4]
            out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
                        [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]

        Case 2:
          Input:
            x.shape = [2, 3, 4]
            x.data = [[[2.0, 3.0, 4.0, 5.0],
                       [3.0, 4.0, 5.0, 6.0],
                       [7.0, 8.0, 8.0, 9.0]],
                      [[1.0, 2.0, 3.0, 4.0],
                       [5.0, 6.0, 7.0, 8.0],
                       [6.0, 7.0, 8.0, 9.0]]]
          Attrs:
            axis = 1

          Output:
            out.shape = [2, 3, 4]
            out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
                         [0.01786798, 0.01786798, 0.04661262, 0.04661262],
                         [0.97555875, 0.97555875, 0.93623955, 0.93623955]],
                        [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
                         [0.26762315, 0.26762315, 0.26762315, 0.26762315],
                         [0.72747516, 0.72747516, 0.72747516, 0.72747516]]]

1061 1062 1063 1064 1065 1066
    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        axis (int, optional): The axis along which to perform log_softmax
            calculations. It should be in range [-D, D), where D is the
            dimensions of ``x`` . If ``axis`` < 0, it works the same way as
            :math:`axis + D` . Default is -1.
1067
        dtype (str, optional): The data type of the output tensor, can be float32, float64.
1068
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1069 1070

    Returns:
1071 1072
        A Tensor with the same shape and data type (use ``dtype`` if it is
        specified) as x.
1073 1074 1075 1076

    Examples:
        .. code-block:: python

1077 1078 1079
            import paddle
            import paddle.nn.functional as F
            import numpy as np
1080

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
            x = np.array([[[2.0, 3.0, 4.0, 5.0],
                        [3.0, 4.0, 5.0, 6.0],
                        [7.0, 8.0, 8.0, 9.0]],
                        [[1.0, 2.0, 3.0, 4.0],
                        [5.0, 6.0, 7.0, 8.0],
                        [6.0, 7.0, 8.0, 9.0]]], 'float32')
            x = paddle.to_tensor(x)
            out1 = F.softmax(x)
            out2 = F.softmax(x, dtype='float64')
            # out1's data type is float32; out2's data type is float64
            # out1 and out2's value is as follows:
            # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
            # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
1098
    """
1099 1100 1101

    if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
        dtype = convert_np_dtype_to_dtype_(dtype)
1102
    use_cudnn = True
1103

H
hong 已提交
1104 1105
    if in_dygraph_mode():
        outs_cast = x if dtype is None \
1106 1107
            else _C_ops.cast(x, dtype)
        return _C_ops.softmax(outs_cast, axis)
H
hong 已提交
1108 1109

    if _in_legacy_dygraph():
1110
        outs_cast = x if dtype is None \
1111 1112 1113
            else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
        return _legacy_C_ops.softmax(outs_cast, 'axis', axis, 'use_cudnn',
                                     use_cudnn)
1114 1115 1116 1117 1118

    if dtype is None:
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 'softmax')
    else:
1119 1120 1121
        check_dtype(
            dtype, 'dtype', ['float32', 'float64'], 'softmax',
            'If dtype is not None, it only support float32 or float64.')
1122 1123 1124 1125 1126

    helper = LayerHelper("softmax", **locals())
    outs_cast = x
    if dtype is not None:
        outs_cast = helper.create_variable_for_type_inference(dtype)
1127 1128 1129 1130 1131 1132 1133
        helper.append_op(type='cast',
                         inputs={'X': x},
                         outputs={'Out': outs_cast},
                         attrs={
                             'in_dtype': x.dtype,
                             'out_dtype': dtype
                         })
1134 1135

    outs_softmax = helper.create_variable_for_type_inference(outs_cast.dtype)
1136 1137 1138 1139 1140 1141 1142
    helper.append_op(type='softmax',
                     inputs={'X': outs_cast},
                     outputs={'Out': outs_softmax},
                     attrs={
                         'axis': axis,
                         'use_cudnn': use_cudnn
                     })
1143 1144

    return outs_softmax
1145 1146


1147
@inplace_apis_in_dygraph_only
1148 1149 1150 1151 1152 1153 1154 1155
def softmax_(x, axis=-1, dtype=None, name=None):
    r"""
    Inplace version of ``softmax`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_nn_cn_softmax`.
    """
    if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
        dtype = convert_np_dtype_to_dtype_(dtype)
    use_cudnn = True
1156 1157 1158

    if in_dygraph_mode():
        outs_cast = x if dtype is None \
1159 1160
            else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
        return _C_ops.softmax_(outs_cast, axis)
1161 1162 1163

    if _in_legacy_dygraph():
        outs_cast = x if dtype is None \
1164 1165 1166
            else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
        return _legacy_C_ops.softmax_(outs_cast, 'axis', axis, 'use_cudnn',
                                      use_cudnn)
1167 1168


1169
def softplus(x, beta=1, threshold=20, name=None):
1170
    r"""
1171 1172 1173 1174
    softplus activation

    .. math::

1175 1176
        softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\
        \text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
1177

1178
    Parameters:
1179 1180 1181
        x (Tensor): The input Tensor with data type float32, float64.
        beta (float, optional): The value of beta for softplus. Default is 1
        threshold (float, optional): The value of threshold for softplus. Default is 20
1182
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1183 1184 1185 1186 1187 1188 1189

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

1190 1191 1192
            import paddle
            import paddle.nn.functional as F
            import numpy as np
1193

1194 1195
            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
1196
    """
W
Wang Bojun 已提交
1197 1198

    if in_dygraph_mode():
1199
        return _C_ops.softplus(x, beta, threshold)
W
Wang Bojun 已提交
1200 1201

    if _in_legacy_dygraph():
1202
        return _legacy_C_ops.softplus(x, 'beta', beta, 'threshold', threshold)
1203 1204 1205 1206 1207

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'softplus')
    helper = LayerHelper('softplus', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
1208 1209 1210 1211 1212 1213 1214
    helper.append_op(type='softplus',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={
                         'beta': beta,
                         'threshold': threshold
                     })
1215 1216 1217 1218
    return out


def softshrink(x, threshold=0.5, name=None):
1219
    r"""
1220 1221 1222 1223
    softshrink activation

    .. math::

1224
        softshrink(x)=
1225 1226 1227 1228 1229 1230 1231
            \left\{
                \begin{array}{rcl}
                x - threshold,& & \text{if } x > threshold \\
                x + threshold,& & \text{if } x < -threshold \\
                0,& &  \text{otherwise}
            \end{array}
            \right.
1232

1233
    Parameters:
1234 1235
        x (Tensor): The input Tensor with data type float32, float64.
        threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
1236
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1237 1238 1239 1240 1241 1242 1243

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

1244 1245 1246
            import paddle
            import paddle.nn.functional as F
            import numpy as np
1247

1248 1249
            x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
            out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
1250
    """
1251 1252 1253 1254 1255
    if threshold < 0:
        raise ValueError(
            "The threshold must be no less than zero. Received: {}.".format(
                threshold))

1256
    if in_dygraph_mode():
1257
        return _C_ops.soft_shrink(x, threshold)
1258
    if _in_legacy_dygraph():
1259
        return _legacy_C_ops.softshrink(x, 'lambda', threshold)
1260 1261 1262 1263 1264

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'softshrink')
    helper = LayerHelper('softshrink', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
1265 1266 1267 1268
    helper.append_op(type='softshrink',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={'lambda': threshold})
1269 1270 1271 1272
    return out


def softsign(x, name=None):
1273
    r"""
1274 1275 1276 1277
    softsign activation

    .. math::

1278
        softsign(x) = \frac{x}{1 + |x|}
1279

1280
    Parameters:
1281
        x (Tensor): The input Tensor with data type float32, float64.
1282
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1283 1284 1285 1286 1287 1288 1289

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

1290 1291 1292
            import paddle
            import paddle.nn.functional as F
            import numpy as np
1293

1294 1295
            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
1296
    """
1297
    if in_dygraph_mode():
W
wanghuancoder 已提交
1298
        return _C_ops.softsign(x)
1299 1300
    if in_dynamic_mode():
        return _legacy_C_ops.softsign(x)
1301 1302 1303 1304 1305 1306 1307 1308 1309

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'softsign')
    helper = LayerHelper('softsign', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='softsign', inputs={'X': x}, outputs={'Out': out})
    return out


1310
def swish(x, name=None):
1311
    r"""
1312 1313 1314 1315
    swish activation.

    .. math::

1316
        swish(x) = \frac{x}{1 + e^{-x}}
1317 1318 1319

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
1320
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F
            import numpy as np

            x = paddle.to_tensor(np.array([-2., 0., 1.]))
            out = F.swish(x) # [-0.238406, 0., 0.731059]
    """
1335
    if in_dygraph_mode():
1336
        return _C_ops.swish(x, 1.0)
1337
    if _in_legacy_dygraph():
1338
        return _legacy_C_ops.swish(x, 'beta', 1.0)
1339 1340 1341 1342

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')
    helper = LayerHelper('swish', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
1343 1344 1345 1346
    helper.append_op(type='swish',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={'beta': 1.0})
1347 1348 1349
    return out


1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
def mish(x, name=None):
    r"""
    mish activation.

    ..  math::

        softplus(x) = \begin{cases}
                x, \text{if } x > \text{threshold} \\
                \ln(1 + e^{x}),  \text{otherwise}
            \end{cases}

        mish(x) = x * \tanh(softplus(x))
1362

1363 1364
    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
1365
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

W
wangxinxin08 已提交
1376
            x = paddle.to_tensor([-5., 0., 5.])
1377 1378
            out = F.mish(x) # [-0.03357624, 0., 4.99955208]
    """
1379
    if in_dygraph_mode():
1380
        return _C_ops.mish(x, 20)
1381
    if _in_legacy_dygraph():
1382
        return _legacy_C_ops.mish(x)
1383 1384 1385 1386 1387 1388 1389 1390

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mish')
    helper = LayerHelper('mish', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='mish', inputs={'X': x}, outputs={'Out': out})
    return out


1391 1392 1393 1394 1395 1396
def tanhshrink(x, name=None):
    """
    tanhshrink activation

    .. math::

1397
        tanhshrink(x) = x - tanh(x)
1398 1399 1400

    Args:
        x (Tensor): The input Tensor with data type float32, float64.
1401
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1402 1403 1404 1405 1406 1407 1408

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

1409 1410 1411
            import paddle
            import paddle.nn.functional as F
            import numpy as np
1412

1413 1414
            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
1415
    """
H
hong 已提交
1416
    if in_dygraph_mode():
1417
        return _C_ops.tanh_shrink(x)
H
hong 已提交
1418 1419

    if _in_legacy_dygraph():
1420
        return _legacy_C_ops.tanh_shrink(x)
1421 1422 1423 1424 1425 1426 1427 1428 1429

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'tanhshrink')
    helper = LayerHelper('tanh_shrink', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='tanh_shrink', inputs={'X': x}, outputs={'Out': out})
    return out


1430
def thresholded_relu(x, threshold=1.0, name=None):
1431
    r"""
1432 1433 1434 1435
    thresholded relu activation.

    .. math::

1436
        thresholded\_relu(x) =
1437 1438 1439 1440 1441 1442 1443
            \left\{
                \begin{array}{rl}
                x,& \text{if } \ x > threshold \\
                0,& \text{otherwise}
                \end{array}
            \right.

1444 1445 1446 1447

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        threshold (float, optional): The value of threshold for thresholded_relu. Default is 1.0
1448
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F
            import numpy as np

            x = paddle.to_tensor(np.array([2., 0., 1.]))
            out = F.thresholded_relu(x) # [2., 0., 0.]
    """

H
hong 已提交
1464
    if in_dygraph_mode():
1465
        return _C_ops.thresholded_relu(x, threshold)
H
hong 已提交
1466 1467

    if _in_legacy_dygraph():
1468
        return _legacy_C_ops.thresholded_relu(x, 'threshold', threshold)
1469 1470 1471 1472 1473

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'thresholded_relu')
    helper = LayerHelper('thresholded_relu', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
1474 1475 1476 1477
    helper.append_op(type='thresholded_relu',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={'threshold': threshold})
1478 1479 1480
    return out


1481
def log_softmax(x, axis=-1, dtype=None, name=None):
1482
    r"""
1483 1484
    This operator implements the log_softmax layer. The calculation process is
    as follows:
1485 1486 1487

    .. math::

1488
        \begin{aligned}
1489 1490 1491
        log\_softmax[i, j] &= log(softmax(x)) \\
        &= log(\frac{\exp(X[i, j])}{\sum_j(\exp(X[i, j])})
        \end{aligned}
1492 1493

    Parameters:
1494 1495 1496 1497 1498 1499 1500
        x (Tensor): The input Tensor with data type float32, float64.
        axis (int, optional): The axis along which to perform log_softmax
            calculations. It should be in range [-D, D), where D is the
            dimensions of ``x`` . If ``axis`` < 0, it works the same way as
            :math:`axis + D` . Default is -1.
        dtype (str|np.dtype|core.VarDesc.VarType, optional): The desired data
            type of the output tensor. If dtype is specified, ``x`` is casted
1501
            to ``dtype`` before the operation is performed. This is useful for
1502 1503 1504
            preventing data type overflows. Supported dtype: float32, float64.
            If ``dtype`` is None, the output Tensor has the same dtype as x.
            Default is None.
1505
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1506

1507
    Returns:
1508 1509
        A Tensor with the same shape and data type (use ``dtype`` if it is
        specified) as x.
1510 1511 1512 1513

    Examples:
        .. code-block:: python

1514 1515 1516
            import paddle
            import paddle.nn.functional as F

Z
zhupengyang 已提交
1517 1518 1519 1520 1521 1522
            x = [[[-2.0, 3.0, -4.0, 5.0],
                  [3.0, -4.0, 5.0, -6.0],
                  [-7.0, -8.0, 8.0, 9.0]],
                 [[1.0, -2.0, -3.0, 4.0],
                  [-5.0, 6.0, 7.0, -8.0],
                  [6.0, 7.0, 8.0, 9.0]]]
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
            x = paddle.to_tensor(x)
            out1 = F.log_softmax(x)
            out2 = F.log_softmax(x, dtype='float64')
            # out1's data type is float32; out2's data type is float64
            # out1 and out2's value is as follows:
            # [[[ -7.1278396   -2.1278396   -9.127839    -0.12783948]
            #   [ -2.1270514   -9.127051    -0.12705144 -11.127051  ]
            #   [-16.313261   -17.313261    -1.3132617   -0.31326184]]
            #  [[ -3.0518122   -6.051812    -7.051812    -0.051812  ]
            #   [-12.313267    -1.3132664   -0.3132665  -15.313267  ]
            #   [ -3.4401896   -2.4401896   -1.4401896   -0.44018966]]]
    """
1535 1536 1537

    if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
        dtype = convert_np_dtype_to_dtype_(dtype)
1538

H
hong 已提交
1539
    if in_dygraph_mode():
1540
        if dtype is not None:
1541 1542
            x = _C_ops.cast(x, dtype)
        return _C_ops.log_softmax(x, axis)
1543

H
hong 已提交
1544 1545
    if _in_legacy_dygraph():
        if dtype is not None:
1546 1547
            x = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
        return _legacy_C_ops.log_softmax(x, 'axis', axis)
H
hong 已提交
1548

1549
    if dtype is None:
1550 1551 1552
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 'log_softmax')
    else:
1553 1554 1555
        check_dtype(
            dtype, 'dtype', ['float32', 'float64'], 'log_softmax',
            'If dtype is not None, it only support float32 or float64.')
1556

1557
    helper = LayerHelper("log_softmax", **locals())
1558
    out_cast = x
1559
    if dtype is not None:
1560
        out_cast = helper.create_variable_for_type_inference(dtype)
1561 1562 1563 1564 1565 1566 1567
        helper.append_op(type='cast',
                         inputs={'X': x},
                         outputs={'Out': out_cast},
                         attrs={
                             'in_dtype': x.dtype,
                             'out_dtype': dtype
                         })
1568

1569
    out = helper.create_variable_for_type_inference(out_cast.dtype)
1570 1571 1572 1573
    helper.append_op(type='log_softmax',
                     inputs={'X': out_cast},
                     outputs={'Out': out},
                     attrs={'axis': axis})
1574

1575
    return out
F
Feiyu Chan 已提交
1576 1577 1578 1579


def glu(x, axis=-1, name=None):
    r"""
1580
    The gated linear unit. The input is evenly splited into 2 parts along a
F
Feiyu Chan 已提交
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
    given axis. The first part is used as the content, and the second part is
    passed through a sigmoid function then used as the gate. The output is a
    elementwise multiplication of the content and the gate.

    .. math::

        \mathrm{GLU}(a, b) = a \otimes \sigma(b)

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
1591 1592 1593
        axis (int, optional): The axis along which split the input tensor. It
            should be in range [-D, D), where D is the dimensions of ``x`` .
            If ``axis`` < 0, it works the same way as :math:`axis + D` .
F
Feiyu Chan 已提交
1594
            Default is -1.
1595
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1596

F
Feiyu Chan 已提交
1597
    Returns:
1598
        A Tensor with the same data type as x. The size of the given aixs is
F
Feiyu Chan 已提交
1599
        halved.
1600

F
Feiyu Chan 已提交
1601 1602
    Examples:
        .. code-block:: python
1603

F
Feiyu Chan 已提交
1604 1605
            import paddle
            from paddle.nn import functional as F
1606

F
Feiyu Chan 已提交
1607 1608 1609 1610 1611 1612 1613
            x = paddle.to_tensor(
                [[-0.22014759, -1.76358426,  0.80566144,  0.04241343],
                 [-1.94900405, -1.89956081,  0.17134808, -1.11280477]]
            )
            print(F.glu(x).numpy())
            # array([[-0.15216254, -0.9004892 ],
            #        [-1.0577879 , -0.46985325]], dtype=float32)
1614

F
Feiyu Chan 已提交
1615 1616 1617 1618 1619 1620 1621
    """
    check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
                             "glu")
    a, b = chunk(x, 2, axis=axis, name=name)
    gate = sigmoid(b, name=name)
    out = paddle.multiply(a, gate, name=name)
    return out
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646


def gumbel_softmax(x, temperature=1.0, hard=False, axis=-1, name=None):
    r"""
    Samples from the Gumbel-Softmax distribution and optionally discretizes.
    temperature is denoted by t. The calculation process is as follows:

    First, generate gumbel noise:

    .. math::

        G_i = -log(-log(U_i)), U_i \sim U(0,1)

    Second, add noise to ``x``:

    .. math::

        v = [x_1 + G_1,...,x_n + G_n]

    Finally, calculate gumbel_softmax and generate samples:

    .. math::
        gumbel\_softmax(v_i)=\frac{e^{v_i/t}}{\sum_{j=1}^n{e^{v_j/t}}},i=1,2,3...n

    Parameters:
1647 1648
        x (Tensor): An N-D Tensor, the first N - 1 dimensions index into a batch
            of independent distributions and the last dimension represents
1649 1650 1651
            a vector of probabilities with datatype float32, float64.
        temperature (float, optional): non-negative scalar temperature.
            Default is 1.0.
1652 1653
        hard (bool, optional): if True, the returned samples will be discretized as
            one-hot vectors, but will be differentiated as if it is the soft sample
1654
            in autograd. Default is False.
1655
        axis (int, optional): The axis along will be calculated softmax value.
1656
            Default is -1.
1657
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1658

1659
    Returns:
1660 1661
        Sampled tensor of same shape as ``x`` from the Gumbel-Softmax distribution.
        If ``hard = True``, the returned samples will be one-hot, otherwise they will be
1662
        probability distributions that sum to 1 across ``axis``.
1663

1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            logits = paddle.randn([4, 6])
            temperature = 0.01
            gumbel_softmax = F.gumbel_softmax(logits, temperature)
            print(gumbel_softmax)
            # out's value is as follows:
            # [[0.00000001, 1.        , 0.00000000, 0.00000000, 0.00000006, 0.00000000],
            # [0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 1.        ],
            # [0.00000062, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.99999940],
            # [0.00000000, 0.00000000, 0.00000000, 0.00001258, 0.99998736, 0.00000000]]
1679

1680
    """
H
hong 已提交
1681
    if in_dygraph_mode():
1682
        return _C_ops.gumbel_softmax(x, temperature, hard, axis)
H
hong 已提交
1683

Z
zhiboniu 已提交
1684
    if in_dynamic_mode():
1685 1686
        return _legacy_C_ops.gumbel_softmax(x, 'temperature', temperature,
                                            'hard', hard, 'axis', axis)
1687 1688 1689 1690

    helper = LayerHelper("gumbel_softmax", **locals())
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'gumbel_softmax')
    out = helper.create_variable_for_type_inference(x.dtype)
1691 1692 1693 1694 1695 1696 1697 1698
    helper.append_op(type='gumbel_softmax',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs={
                         'temperature': temperature,
                         'hard': hard,
                         'axis': axis
                     })
1699
    return out