activation.py 36.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
# TODO: define activation functions of neural network
16

17
__all__ = [
18 19
    'ELU',
    'GELU',
20
    'Hardshrink',
21
    'Hardswish',
W
WangXi 已提交
22
    'Tanh',
23 24
    'Hardtanh',
    'PReLU',
25
    'ReLU',
26 27
    'ReLU6',
    'SELU',
C
ceci3 已提交
28
    'LeakyReLU',
29
    'Sigmoid',
30
    'Hardsigmoid',
31
    'Softmax',
32 33 34
    'Softplus',
    'Softshrink',
    'Softsign',
35
    'Swish',
36
    'Tanhshrink',
37
    'ThresholdedReLU',
38
    'LogSigmoid',
39
    'LogSoftmax',
40
    'Maxout',
41 42
]

43 44 45
from ...fluid.dygraph import layers
from ...fluid import core
from ...fluid.framework import in_dygraph_mode
46 47
from ...fluid.param_attr import ParamAttr
from ...fluid.initializer import Constant
Q
Qi Li 已提交
48
from paddle.framework import get_default_dtype
49
from .. import functional as F
50 51


52
class ELU(layers.Layer):
53
    r"""
54 55
    ELU Activation.

56
    .. math::
57

58 59 60 61 62 63
        ELU(x) = max(0, x) + min(0, \\alpha * (e^{x}-1))

    Parameters:
        alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
64

65 66 67
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
68

69 70 71
    Examples:
        .. code-block:: python

72
            import paddle
73

Z
zhupengyang 已提交
74
            x = paddle.to_tensor([[-1. ,6.], [1., 15.6]])
75 76 77 78
            m = paddle.nn.ELU(0.2)
            out = m(x)
            # [[-0.12642411  6.        ]
            #  [ 1.          15.6      ]]
79 80 81 82 83 84 85 86 87 88
    """

    def __init__(self, alpha=1.0, name=None):
        super(ELU, self).__init__()
        self._alpha = alpha
        self._name = name

    def forward(self, x):
        return F.elu(x, self._alpha, self._name)

89 90 91 92
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'alpha={}{}'.format(self._alpha, name_str)

93 94

class GELU(layers.Layer):
95
    r"""
96 97 98 99
    GELU Activation.

    If approximate is True

100
    .. math::
101 102 103 104 105

        GELU(x) = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3})))

    else

106
    .. math::
107 108 109 110 111 112 113

        GELU(x) = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}}))

    Parameters:
        approximate (bool, optional): Wether to enable approximation. Default is False.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
114

115 116 117
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
118

119 120 121
    Examples:
        .. code-block:: python

122 123
            import paddle
            import numpy as np
124

125
            x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]]))
126

127 128
            m = paddle.nn.GELU()
            out = m(x) # [-0.158655 0.345731 0.841345 1.39979]
129

130 131
            m = paddle.nn.GELU(True)
            out = m(x) # [-0.158808 0.345714 0.841192 1.39957]
132 133 134 135 136 137 138 139 140 141
    """

    def __init__(self, approximate=False, name=None):
        super(GELU, self).__init__()
        self._approximate = approximate
        self._name = name

    def forward(self, x):
        return F.gelu(x, self._approximate, self._name)

142 143 144 145
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'approximate={}{}'.format(self._approximate, name_str)

146

147
class Hardshrink(layers.Layer):
148
    r"""
149 150 151 152 153
    Hardshrink Activation

    .. math::

        hardshrink(x)=
154 155 156 157 158 159 160
            \\left\\{
            \\begin{aligned}
            &x, & & if \\ x > threshold \\\\
            &x, & & if \\ x < -threshold \\\\
            &0, & & if \\ others
            \\end{aligned}
            \\right.
161 162 163 164 165 166 167 168 169 170 171 172 173 174

    Parameters:
        threshold (float, optional): The value of threshold for hardthrink. Default is 0.5
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:

        .. code-block:: python

175
            import paddle
176

Z
zhupengyang 已提交
177
            x = paddle.to_tensor([-1, 0.3, 2.5])
178 179
            m = paddle.nn.Hardshrink()
            out = m(x) # [-1., 0., 2.5]
180 181 182 183 184 185 186 187
    """

    def __init__(self, threshold=0.5, name=None):
        super(Hardshrink, self).__init__()
        self._threshold = threshold
        self._name = name

    def forward(self, x):
188
        return F.hardshrink(x, self._threshold, self._name)
189

190 191 192 193
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'threshold={}{}'.format(self._threshold, name_str)

194

195
class Hardswish(layers.Layer):
196
    r"""
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
    Hardswish activation

    Hardswish is proposed in MobileNetV3, and performs better in computational stability
    and efficiency compared to swish function. For more details please refer
    to: https://arxiv.org/pdf/1905.02244.pdf

    .. math::

        Hardswish(x)=
            \\left\\{
            \\begin{aligned}
            &0, & & \\text{if } x \\leq -3 \\\\
            &x, & & \\text{if } x \\geq 3 \\\\
            &\\frac{x(x+3)}{6}, & & \\text{otherwise}
            \\end{aligned}
            \\right.

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.to_tensor([-4., 5., 1.])
            m = paddle.nn.Hardswish()
            out = m(x) # [0., 5., 0.666667]
    """

    def __init__(self, name=None):
        super(Hardswish, self).__init__()
        self._name = name

    def forward(self, x):
        return F.hardswish(x, self._name)

240 241 242 243
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

244

W
WangXi 已提交
245
class Tanh(layers.Layer):
246
    r"""
W
WangXi 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
    Tanh Activation.

    .. math::
        Tanh(x) = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:

        .. code-block:: python

            import paddle
            import numpy as np

            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            m = paddle.nn.Tanh()
            out = m(x)
W
WangXi 已提交
270
            print(out)
W
WangXi 已提交
271 272 273 274 275 276 277 278 279 280
            # [-0.37994896 -0.19737532  0.09966799  0.29131261]
    """

    def __init__(self, name=None):
        super(Tanh, self).__init__()
        self._name = name

    def forward(self, x):
        return F.tanh(x, self._name)

281 282 283 284
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

W
WangXi 已提交
285

286
class Hardtanh(layers.Layer):
287
    r"""
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
    Hardtanh Activation

    .. math::

        Hardtanh(x)= \\begin{cases}
                        max, \\text{if } x > max \\\\
                        min, \\text{if } x < min \\\\
                        x,  \\text{otherwise}
                      \\end{cases}

    Parameters:
        min (float, optional): The value of min for Hardtanh. Default is -1.
        max (float, optional): The value of max for Hardtanh. Default is 1.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
303

304 305 306
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
307

308 309 310 311 312
    Examples:
        .. code-block:: python

            import paddle

Z
zhupengyang 已提交
313
            x = paddle.to_tensor([-1.5, 0.3, 2.5])
314
            m = paddle.nn.Hardtanh()
Z
zhupengyang 已提交
315
            out = m(x) # [-1., 0.3, 1.]
316 317 318 319 320 321 322 323 324 325 326
    """

    def __init__(self, min=-1.0, max=1.0, name=None):
        super(Hardtanh, self).__init__()
        self._min = min
        self._max = max
        self._name = name

    def forward(self, x):
        return F.hardtanh(x, self._min, self._max, self._name)

327 328 329 330
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'min={}, max={}{}'.format(self._min, self._max, name_str)

331 332 333 334 335 336 337 338 339 340 341

class PReLU(layers.Layer):
    """
    PReLU Activation.

    .. math::

        PReLU(x) = max(0, x) + weight * min(0, x)

    Parameters:
        num_parameters (int, optional): Number of `weight` to learn. The supported values are:
342
            1 - a single parameter `alpha` is used for all input channels;
343 344 345
            Number of channels - a seperate `alpha` is used for each input channel.
            Default is 1.
        init (float, optional): Init value of learnable `weight`. Default is 0.25.
346
        weight_attr(ParamAttr, optional): The parameter attribute for the learnable `weight`.
347
            Default is None. For more information, please refer to :ref:`api_paddle_ParamAttr`.
348 349
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
350

351
    Shape:
Q
Qi Li 已提交
352
        - input: Tensor with any shape. Default dtype is float32.
353
        - output: Tensor with the same shape as input.
354

355 356 357 358 359 360
    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

Q
Qi Li 已提交
361
            paddle.set_default_dtype("float64")
362 363 364 365 366 367

            data = np.array([[[[-2.0,  3.0, -4.0,  5.0],
                            [ 3.0, -4.0,  5.0, -6.0],
                            [-7.0, -8.0,  8.0,  9.0]],
                            [[ 1.0, -2.0, -3.0,  4.0],
                            [-5.0,  6.0,  7.0, -8.0],
Q
Qi Li 已提交
368
                            [ 6.0,  7.0,  8.0,  9.0]]]], 'float64')
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
            x = paddle.to_tensor(data)
            m = paddle.nn.PReLU(1, 0.25)
            out = m(x)
            # [[[[-0.5 ,  3.  , -1.  ,  5.  ],
            #    [ 3.  , -1.  ,  5.  , -1.5 ],
            #    [-1.75, -2.  ,  8.  ,  9.  ]],
            #   [[ 1.  , -0.5 , -0.75,  4.  ],
            #    [-1.25,  6.  ,  7.  , -2.  ],
            #    [ 6.  ,  7.  ,  8.  ,  9.  ]]]]
    """

    def __init__(self, num_parameters=1, init=0.25, weight_attr=None,
                 name=None):
        super(PReLU, self).__init__()
        self._num_parameters = num_parameters
        self._init = init
        self._weight_attr = weight_attr
        self._name = name

        self._weight = self.create_parameter(
            attr=self._weight_attr,
Q
Qi Li 已提交
390 391
            shape=[self._num_parameters],
            dtype=get_default_dtype(),
392
            is_bias=False,
Q
Qi Li 已提交
393
            default_initializer=Constant(self._init))
394 395 396 397

    def forward(self, x):
        return F.prelu(x, self._weight)

398 399 400 401 402
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'num_parameters={}, init={}, dtype={}{}'.format(
            self._num_parameters, self._init, self._dtype, name_str)

403

404 405 406 407
class ReLU(layers.Layer):
    """
    ReLU Activation.

408
    .. math::
409

410
        ReLU(x) = max(x, 0)
411 412

    Parameters:
413 414 415 416 417 418
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
419

420 421 422
    Examples:
        .. code-block:: python

423
            import paddle
424

Z
zhupengyang 已提交
425
            x = paddle.to_tensor([-2., 0., 1.])
426 427
            m = paddle.nn.ReLU()
            out = m(x) # [0., 0., 1.]
428 429
    """

430
    def __init__(self, name=None):
431
        super(ReLU, self).__init__()
432
        self._name = name
433

434 435
    def forward(self, x):
        return F.relu(x, self._name)
436

437 438 439 440
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

441

442 443 444 445 446 447
class ReLU6(layers.Layer):
    """
    ReLU6 Activation

    .. math::

448
        ReLU6(x) = min(max(0,x), 6)
449 450 451 452 453 454 455 456 457 458 459 460

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

461 462
            import paddle
            import numpy as np
463

464 465 466
            x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
            m = paddle.nn.ReLU6()
            out = m(x) # [0, 0.3, 6]
467 468 469 470 471 472 473 474 475
    """

    def __init__(self, name=None):
        super(ReLU6, self).__init__()
        self._name = name

    def forward(self, x):
        return F.relu6(x, self._name)

476 477 478 479
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

480 481

class SELU(layers.Layer):
482
    r"""
483 484 485 486
    SELU Activation

    .. math::

487 488 489 490 491
        SELU(x)= scale *
                 \\begin{cases}
                   x, \\text{if } x > 0 \\\\
                   alpha * e^{x} - alpha, \\text{if } x <= 0
                 \\end{cases}
492 493

    Parameters:
494 495
        scale (float, optional): The value of scale(must be greater than 1.0) for SELU. Default is 1.0507009873554804934193349852946
        alpha (float, optional): The value of alpha(must be no less than zero) for SELU. Default is 1.6732632423543772848170429916717
496 497 498 499 500 501 502 503 504 505
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

506 507
            import paddle
            import numpy as np
508

509
            x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]]))
510 511
            m = paddle.nn.SELU()
            out = m(x) # [[0, 1.050701],[2.101402, 3.152103]]
512 513 514 515 516 517 518 519 520 521 522 523 524 525
    """

    def __init__(self,
                 scale=1.0507009873554804934193349852946,
                 alpha=1.6732632423543772848170429916717,
                 name=None):
        super(SELU, self).__init__()
        self._scale = scale
        self._alpha = alpha
        self._name = name

    def forward(self, x):
        return F.selu(x, self._scale, self._alpha, self._name)

526 527 528 529 530
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'scale={:.16f}, alpha={:.16f}{}'.format(self._scale, self._alpha,
                                                       name_str)

531

C
ceci3 已提交
532
class LeakyReLU(layers.Layer):
533
    r"""
C
ceci3 已提交
534 535
    Leaky ReLU Activation.

536
    .. math::
C
ceci3 已提交
537

538
        LeakyReLU(x)=
539 540 541 542 543 544
            \\left\\{
            \\begin{aligned}
            &x, & & if \\ x >= 0 \\\\
            &negative\_slope * x, & & otherwise \\\\
            \\end{aligned}
            \\right. \\\\
C
ceci3 已提交
545 546

    Parameters:
547 548
        negative_slope (float, optional): Slope of the activation function at
            :math:`x < 0` . Default is 0.01.
549 550
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
551

552 553 554
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
555

C
ceci3 已提交
556 557 558
    Examples:
        .. code-block:: python

559
            import paddle
C
Chen Long 已提交
560
            import numpy as np
561

562
            m = paddle.nn.LeakyReLU()
Z
zhupengyang 已提交
563
            x = paddle.to_tensor(np.array([-2, 0, 1], 'float32'))
564
            out = m(x)  # [-0.02, 0., 1.]
C
ceci3 已提交
565 566
    """

567
    def __init__(self, negative_slope=0.01, name=None):
C
ceci3 已提交
568
        super(LeakyReLU, self).__init__()
569
        self._negative_slope = negative_slope
570
        self._name = name
C
ceci3 已提交
571

572
    def forward(self, x):
573
        return F.leaky_relu(x, self._negative_slope, self._name)
C
ceci3 已提交
574

575 576 577 578
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'negative_slope={}{}'.format(self._negative_slope, name_str)

C
ceci3 已提交
579

580 581
class Sigmoid(layers.Layer):
    """
582
    this interface is used to construct a callable object of the ``Sigmoid`` class. This layer calcluate the `sigmoid` of input x.
583

584
    .. math::
S
swtkiwi 已提交
585

586
        Sigmoid(x) = \\frac{1}{1 + e^{-x}}
587

588 589
    Parameters:
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
590

591 592
    Shape:
        x: N-D tensor, available dtype is float16, float32, float64.
593 594

    Returns:
595
        A callable object of Sigmoid.
596

597
    Examples:
598

599 600
        .. code-block:: python

601 602 603
          import paddle

          m = paddle.nn.Sigmoid()
604 605
          x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
          out = m(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376]
606 607
    """

608
    def __init__(self, name=None):
609
        super(Sigmoid, self).__init__()
610
        self.name = name
611

612 613
    def forward(self, x):
        return F.sigmoid(x, self.name)
614

615 616 617 618
    def extra_repr(self):
        name_str = 'name={}'.format(self.name) if self.name else ''
        return name_str

619

620
class Hardsigmoid(layers.Layer):
621
    r"""
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
    This interface is used to construct a callable object of the ``Hardsigmoid`` class.
    This layer calcluate the `hardsigmoid` of input x.

    A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
    which is much faster than sigmoid.

    .. math::

        Hardsigmoid(x)=
            \\left\\{
            \\begin{aligned}
            &0, & & \\text{if } x \\leq -3 \\\\
            &1, & & \\text{if } x \\geq 3 \\\\
            &x/6 + 1/2, & & \\text{otherwise}
            \\end{aligned}
            \\right.

    Parameters:
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        x: N-D tensor, available dtype is float32, float64.

    Returns:
        A callable object of Hardsigmoid.

    Examples:

        .. code-block:: python

          import paddle

Z
zhupengyang 已提交
654
          m = paddle.nn.Hardsigmoid()
655 656 657 658 659 660 661 662 663
          x = paddle.to_tensor([-4., 5., 1.])
          out = m(x) # [0., 1, 0.666667]
    """

    def __init__(self, name=None):
        super(Hardsigmoid, self).__init__()
        self.name = name

    def forward(self, x):
664
        return F.hardsigmoid(x, name=self.name)
665

666 667 668 669
    def extra_repr(self):
        name_str = 'name={}'.format(self.name) if self.name else ''
        return name_str

670

671
class Softplus(layers.Layer):
672
    r"""
673 674 675 676
    Softplus Activation

    .. math::

677 678
        Softplus(x) = \\frac{1}{beta} * \\log(1 + e^{beta * x}) \\\\
        \\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
679 680

    Parameters:
681 682
        beta (float, optional): The value of beta for Softplus. Default is 1
        threshold (float, optional): The value of threshold for Softplus. Default is 20
683 684 685 686 687 688 689 690 691 692
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

693 694
            import paddle
            import numpy as np
695

696 697 698
            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            m = paddle.nn.Softplus()
            out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355]
699 700 701 702 703 704 705 706 707 708 709
    """

    def __init__(self, beta=1, threshold=20, name=None):
        super(Softplus, self).__init__()
        self._beta = beta
        self._threshold = threshold
        self._name = name

    def forward(self, x):
        return F.softplus(x, self._beta, self._threshold, self._name)

710 711 712 713 714
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'beta={}, threshold={}{}'.format(self._beta, self._threshold,
                                                name_str)

715 716

class Softshrink(layers.Layer):
717
    r"""
718 719 720 721
    Softshrink Activation

    .. math::

722 723 724 725 726
        Softshrink(x)= \\begin{cases}
                        x - threshold, \\text{if } x > threshold \\\\
                        x + threshold, \\text{if } x < -threshold \\\\
                        0,  \\text{otherwise}
                      \\end{cases}
727 728

    Parameters:
729
        threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
730 731 732 733 734 735 736 737 738 739
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

740 741
            import paddle
            import numpy as np
742

743 744 745
            x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
            m = paddle.nn.Softshrink()
            out = m(x) # [-0.4, 0, 0, 0.3]
746 747 748 749 750 751 752 753 754 755
    """

    def __init__(self, threshold=0.5, name=None):
        super(Softshrink, self).__init__()
        self._threshold = threshold
        self._name = name

    def forward(self, x):
        return F.softshrink(x, self._threshold, self._name)

756 757 758 759
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'threshold={}{}'.format(self._threshold, name_str)

760 761

class Softsign(layers.Layer):
762
    r"""
763 764 765 766
    Softsign Activation

    .. math::

767
        Softsign(x) = \\frac{x}{1 + |x|}
768 769 770 771 772 773 774 775 776 777 778 779

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

780 781
            import paddle
            import numpy as np
782

783 784 785
            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            m = paddle.nn.Softsign()
            out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
786 787 788 789 790 791 792 793 794
    """

    def __init__(self, name=None):
        super(Softsign, self).__init__()
        self._name = name

    def forward(self, x):
        return F.softsign(x, self._name)

795 796 797 798
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

799

800
class Swish(layers.Layer):
801
    r"""
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
    Swish Activation.

    .. math::

        Swish(x) = \\frac{x}{1 + e^{-x}}

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

            x = paddle.to_tensor(np.array([-2., 0., 1.]))
            m = paddle.nn.Swish()
            out = m(x) # [-0.238406, 0., 0.731059]
    """

    def __init__(self, name=None):
        super(Swish, self).__init__()
        self._name = name

    def forward(self, x):
        return F.swish(x, self._name)

834 835 836 837
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

838

839 840 841 842 843 844
class Tanhshrink(layers.Layer):
    """
    Tanhshrink Activation

    .. math::

845
        Tanhshrink(x) = x - tanh(x)
846 847 848 849 850 851 852 853 854 855 856 857

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

858 859
            import paddle
            import numpy as np
860

861 862 863
            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            m = paddle.nn.Tanhshrink()
            out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
864 865 866 867 868 869 870 871 872
    """

    def __init__(self, name=None):
        super(Tanhshrink, self).__init__()
        self._name = name

    def forward(self, x):
        return F.tanhshrink(x, self._name)

873 874 875 876
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

877

878
class ThresholdedReLU(layers.Layer):
879
    r"""
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
    Thresholded ReLU Activation

    .. math::

        ThresholdedReLU(x) = \\begin{cases}
                               x, \\text{if } x > threshold \\\\
                               0, \\text{otherwise}
                              \\end{cases}

    Parameters:
        threshold (float, optional): The value of threshold for ThresholdedReLU. Default is 1.0
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

            x = paddle.to_tensor(np.array([2., 0., 1.]))
            m = paddle.nn.ThresholdedReLU()
            out = m(x) # [2., 0., 0.]
    """

    def __init__(self, threshold=1.0, name=None):
        super(ThresholdedReLU, self).__init__()
        self._threshold = threshold
        self._name = name

    def forward(self, x):
        return F.thresholded_relu(x, self._threshold, self._name)

917 918 919 920
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'threshold={}{}'.format(self._threshold, name_str)

921

922
class LogSigmoid(layers.Layer):
923
    r"""
924
    LogSigmoid Activation.
925

926
    .. math::
927

928
        LogSigmoid(x) = log \\frac{1}{1 + e^{-x}}
929 930 931 932 933

    Parameters:
        x (Tensor): The input Tensor with data type float32, or float64.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
934

935 936 937
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
938

939 940 941
    Examples:
        .. code-block:: python

942
            import paddle
943

944
            x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
945 946
            m = paddle.nn.LogSigmoid()
            out = m(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
947 948 949 950 951 952 953
    """

    def __init__(self, name=None):
        super(LogSigmoid, self).__init__()
        self._name = name

    def forward(self, x):
954
        return F.log_sigmoid(x, self._name)
955

956 957 958 959
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

960

961
class Softmax(layers.Layer):
962
    r"""
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
    Softmax Activation.

    This operator implements the softmax layer. The calculation process is as follows:

    1. The dimension :attr:`axis` of ``x`` will be permuted to the last.

    2. Then ``x`` will be logically flattened to a 2-D matrix. The matrix's second
    dimension(row length) is the same as the dimension :attr:`axis` of ``x``,
    and the first dimension(column length) is the product of all other dimensions
    of ``x``. For each row of the matrix, the softmax operator squashes the
    K-dimensional(K is the width of the matrix, which is also the size of ``x``'s
    dimension :attr:`axis`) vector of arbitrary real values to a K-dimensional
    vector of real values in the range [0, 1] that add up to 1.

    3. After the softmax operation is completed, the inverse operations of steps 1 and 2
    are performed to restore the two-dimensional matrix to the same dimension as the ``x`` .

    It computes the exponential of the given dimension and the sum of exponential
    values of all the other dimensions in the K-dimensional vector input.
    Then the ratio of the exponential of the given dimension and the sum of
    exponential values of all the other dimensions is the output of the softmax
    operator.

    For each row :math:`i` and each column :math:`j` in the matrix, we have:

    .. math::

        Softmax[i, j] = \\frac{\\exp(x[i, j])}{\\sum_j(exp(x[i, j])}

    Example:

    .. code-block:: text

        Case 1:
          Input:
            x.shape = [2, 3, 4]
            x.data = [[[2.0, 3.0, 4.0, 5.0],
                       [3.0, 4.0, 5.0, 6.0],
                       [7.0, 8.0, 8.0, 9.0]],
                      [[1.0, 2.0, 3.0, 4.0],
                       [5.0, 6.0, 7.0, 8.0],
                       [6.0, 7.0, 8.0, 9.0]]]

          Attrs:
            axis = -1

          Output:
            out.shape = [2, 3, 4]
            out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
                        [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]

        Case 2:
          Input:
            x.shape = [2, 3, 4]
            x.data = [[[2.0, 3.0, 4.0, 5.0],
                       [3.0, 4.0, 5.0, 6.0],
                       [7.0, 8.0, 8.0, 9.0]],
                      [[1.0, 2.0, 3.0, 4.0],
                       [5.0, 6.0, 7.0, 8.0],
                       [6.0, 7.0, 8.0, 9.0]]]
          Attrs:
            axis = 1

          Output:
            out.shape = [2, 3, 4]
            out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
                         [0.01786798, 0.01786798, 0.04661262, 0.04661262],
                         [0.97555875, 0.97555875, 0.93623955, 0.93623955]],
                        [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
                         [0.26762315, 0.26762315, 0.26762315, 0.26762315],
                         [0.72747516, 0.72747516, 0.72747516, 0.72747516]]]

    Parameters:
        axis (int, optional): The axis along which to perform log_softmax
            calculations. It should be in range [-D, D), where D is the
            dimensions of ``x`` . If ``axis`` < 0, it works the same way as
            :math:`axis + D` . Default is -1.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

            x = np.array([[[2.0, 3.0, 4.0, 5.0],
                        [3.0, 4.0, 5.0, 6.0],
                        [7.0, 8.0, 8.0, 9.0]],
                        [[1.0, 2.0, 3.0, 4.0],
                        [5.0, 6.0, 7.0, 8.0],
                        [6.0, 7.0, 8.0, 9.0]]], 'float32')
            x = paddle.to_tensor(x)
            m = paddle.nn.Softmax()
            out = m(x)
            # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
            # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
    """

    def __init__(self, axis=-1, name=None):
        super(Softmax, self).__init__()
        self._axis = axis
        self._dtype = None
        self._name = name

    def forward(self, x):
        return F.softmax(x, self._axis, self._dtype, self._name)

1083 1084 1085 1086
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'axis={}{}'.format(self._axis, name_str)

1087

1088
class LogSoftmax(layers.Layer):
1089
    r"""
1090 1091 1092 1093
    This operator implements the log_softmax layer. The calculation process is as follows:

    .. math::

Z
zhupengyang 已提交
1094 1095 1096 1097
        \\begin{aligned} 
        Out[i, j] &= log(softmax(x)) \\\\
        &= log(\\frac{\\exp(X[i, j])}{\\sum_j(\\exp(X[i, j])})
        \\end{aligned}
1098 1099

    Parameters:
1100 1101 1102 1103 1104 1105
        axis (int, optional): The axis along which to perform log_softmax
            calculations. It should be in range [-D, D), where D is the
            dimensions of the input Tensor . If ``axis`` < 0, it works the
            same way as :math:`axis + D` . Default is -1.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
1106

1107 1108 1109
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
1110 1111 1112 1113

    Examples:
        .. code-block:: python

1114 1115
            import paddle

Z
zhupengyang 已提交
1116 1117 1118 1119 1120 1121
            x = [[[-2.0, 3.0, -4.0, 5.0],
                  [3.0, -4.0, 5.0, -6.0],
                  [-7.0, -8.0, 8.0, 9.0]],
                 [[1.0, -2.0, -3.0, 4.0],
                  [-5.0, 6.0, 7.0, -8.0],
                  [6.0, 7.0, 8.0, 9.0]]]
1122 1123 1124 1125 1126 1127 1128 1129 1130
            m = paddle.nn.LogSoftmax()
            x = paddle.to_tensor(x)
            out = m(x)
            # [[[ -7.1278396   -2.1278396   -9.127839    -0.12783948]
            #   [ -2.1270514   -9.127051    -0.12705144 -11.127051  ]
            #   [-16.313261   -17.313261    -1.3132617   -0.31326184]]
            #  [[ -3.0518122   -6.051812    -7.051812    -0.051812  ]
            #   [-12.313267    -1.3132664   -0.3132665  -15.313267  ]
            #   [ -3.4401896   -2.4401896   -1.4401896   -0.44018966]]]
1131 1132
    """

1133
    def __init__(self, axis=-1, name=None):
1134 1135
        super(LogSoftmax, self).__init__()
        self._axis = axis
1136
        self._name = name
1137

1138 1139
    def forward(self, x):
        return F.log_softmax(x, self._axis)
1140

1141 1142 1143 1144
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'axis={}{}'.format(self._axis, name_str)

1145 1146

class Maxout(layers.Layer):
1147
    r"""
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
    Maxout Activation.

    Assumed the input shape is (N, Ci, H, W).
    The output shape is (N, Co, H, W).
    Then Co = Ci/groups and the operator formula is as follows:

    .. math::

        &out_{si+j} = \max_{k} x_{gsi + sk + j} \\\\
        &g = groups \\\\
        &s = \\frac{input.size}{num\\_channels} \\\\
        &0 \\le i < \\frac{num\\_channels}{groups} \\\\
        &0 \\le j < s \\\\
        &0 \\le k < groups

    Parameters:
        groups (int, optional): The groups number of maxout. `groups` specifies the
            index of channel dimension where maxout will be performed. This must be
            a factor of number of features. Default is 1.
        axis (int, optional): The axis along which to perform maxout calculations.
            It should be 1 when data format is NCHW, be -1 or 3 when data format
            is NHWC. If ``axis`` < 0, it works the same way as :math:`axis + D` ,
            where D is the dimensions of ``x`` . Default is 1.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: :math:`(N, C_{in}, H_{in}, W_{in})`
        - output: :math:`(N, C_{out}, H_{out}, W_{out})`

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.rand([1, 2, 3, 4])
            # [[[[0.5002636  0.22272532 0.17402348 0.2874594 ]
            #    [0.95313174 0.6228939  0.7129065  0.7087491 ]
            #    [0.02879342 0.88725346 0.61093384 0.38833922]]
            #   [[0.5231306  0.03807496 0.91661984 0.15602879]
            #    [0.666127   0.616567   0.30741522 0.24044901]
            #    [0.7142536  0.7351477  0.31588817 0.23782359]]]]
            m = paddle.nn.Maxout(groups=2)
            out = m(x)
            # [[[[0.5231306  0.22272532 0.91661984 0.2874594 ]
            #    [0.95313174 0.6228939  0.7129065  0.7087491 ]
            #    [0.7142536  0.88725346 0.61093384 0.38833922]]]]
    """

    def __init__(self, groups, axis=1, name=None):
        super(Maxout, self).__init__()
        self._groups = groups
        self._axis = axis
        self._name = name

    def forward(self, x):
        return F.maxout(x, self._groups, self._axis, self._name)
1205 1206 1207 1208

    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'groups={}, axis={}{}'.format(self._groups, self._axis, name_str)