activation.py 37.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
# TODO: define activation functions of neural network
16

17 18
from ...fluid import core
from ...fluid.framework import in_dygraph_mode
Z
zhiboniu 已提交
19 20
from ...framework import ParamAttr
from ..initializer import Constant
Q
Qi Li 已提交
21
from paddle.framework import get_default_dtype
22
from .. import functional as F
Z
zhiboniu 已提交
23
from paddle.nn import Layer
24

25 26
__all__ = []

27

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
class CELU(Layer):
    r"""
    CELU Activation.

    .. math::
    
        CELU(x) = max(0, x) + min(0, \alpha * (e^{x/\alpha}-1))

    Parameters:
        alpha (float, optional): The 'alpha' value of the CELU formulation. Default is 1.0.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

            import paddle
            
            x = paddle.to_tensor([[-1. ,6.], [1., 15.6]])
            m = paddle.nn.CELU(0.2)
            out = m(x)
            # [[-0.19865242,  6.        ],
            #  [ 1.        , 15.60000038]]
    """

    def __init__(self, alpha=1.0, name=None):
        super(CELU, self).__init__()
        self._alpha = alpha
        self._name = name

    def forward(self, x):
        return F.celu(x, self._alpha, self._name)

    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'alpha={}{}'.format(self._alpha, name_str)


Z
zhiboniu 已提交
70
class ELU(Layer):
71
    r"""
72 73
    ELU Activation.

74
    .. math::
75

76
        ELU(x) = max(0, x) + min(0, \alpha * (e^{x}-1))
77 78 79 80 81

    Parameters:
        alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
82

83 84 85
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
86

87 88 89
    Examples:
        .. code-block:: python

90
            import paddle
91

Z
zhupengyang 已提交
92
            x = paddle.to_tensor([[-1. ,6.], [1., 15.6]])
93 94 95 96
            m = paddle.nn.ELU(0.2)
            out = m(x)
            # [[-0.12642411  6.        ]
            #  [ 1.          15.6      ]]
97 98 99 100 101 102 103 104 105 106
    """

    def __init__(self, alpha=1.0, name=None):
        super(ELU, self).__init__()
        self._alpha = alpha
        self._name = name

    def forward(self, x):
        return F.elu(x, self._alpha, self._name)

107 108 109 110
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'alpha={}{}'.format(self._alpha, name_str)

111

Z
zhiboniu 已提交
112
class GELU(Layer):
113
    r"""
114 115 116 117
    GELU Activation.

    If approximate is True

118
    .. math::
119

120
        GELU(x) = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
121 122 123

    else

124
    .. math::
125

126
        GELU(x) = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}}))
127 128 129 130 131

    Parameters:
        approximate (bool, optional): Wether to enable approximation. Default is False.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
132

133 134 135
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
136

137 138 139
    Examples:
        .. code-block:: python

140 141
            import paddle
            import numpy as np
142

143
            x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]]))
144

145 146
            m = paddle.nn.GELU()
            out = m(x) # [-0.158655 0.345731 0.841345 1.39979]
147

148 149
            m = paddle.nn.GELU(True)
            out = m(x) # [-0.158808 0.345714 0.841192 1.39957]
150 151 152 153 154 155 156 157 158 159
    """

    def __init__(self, approximate=False, name=None):
        super(GELU, self).__init__()
        self._approximate = approximate
        self._name = name

    def forward(self, x):
        return F.gelu(x, self._approximate, self._name)

160 161 162 163
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'approximate={}{}'.format(self._approximate, name_str)

164

Z
zhiboniu 已提交
165
class Hardshrink(Layer):
166
    r"""
167 168 169 170 171
    Hardshrink Activation

    .. math::

        hardshrink(x)=
172 173 174 175 176 177 178
            \left\{
                \begin{array}{rcl}
                    x, & & if \ x > threshold \\
                    x, & & if \ x < -threshold \\
                    0, & & if \ others
            \end{array}
            \right.
179 180 181 182 183 184 185 186 187 188 189 190 191 192

    Parameters:
        threshold (float, optional): The value of threshold for hardthrink. Default is 0.5
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:

        .. code-block:: python

193
            import paddle
194

Z
zhupengyang 已提交
195
            x = paddle.to_tensor([-1, 0.3, 2.5])
196 197
            m = paddle.nn.Hardshrink()
            out = m(x) # [-1., 0., 2.5]
198 199 200 201 202 203 204 205
    """

    def __init__(self, threshold=0.5, name=None):
        super(Hardshrink, self).__init__()
        self._threshold = threshold
        self._name = name

    def forward(self, x):
206
        return F.hardshrink(x, self._threshold, self._name)
207

208 209 210 211
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'threshold={}{}'.format(self._threshold, name_str)

212

Z
zhiboniu 已提交
213
class Hardswish(Layer):
214
    r"""
215 216 217 218 219 220 221 222 223
    Hardswish activation

    Hardswish is proposed in MobileNetV3, and performs better in computational stability
    and efficiency compared to swish function. For more details please refer
    to: https://arxiv.org/pdf/1905.02244.pdf

    .. math::

        Hardswish(x)=
224 225 226 227 228 229 230 231
            \left\{
                \begin{array}{cll}
                0 &, & \text{if } x \leq -3 \\
                x &, & \text{if } x \geq 3 \\
                \frac{x(x+3)}{6} &, & \text{otherwise}
                \end{array}
            \right.
            
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.to_tensor([-4., 5., 1.])
            m = paddle.nn.Hardswish()
            out = m(x) # [0., 5., 0.666667]
    """

    def __init__(self, name=None):
        super(Hardswish, self).__init__()
        self._name = name

    def forward(self, x):
        return F.hardswish(x, self._name)

259 260 261 262
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

263

Z
zhiboniu 已提交
264
class Tanh(Layer):
265
    r"""
W
WangXi 已提交
266 267 268
    Tanh Activation.

    .. math::
269
        Tanh(x) = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}
W
WangXi 已提交
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:

        .. code-block:: python

            import paddle
            import numpy as np

            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            m = paddle.nn.Tanh()
            out = m(x)
W
WangXi 已提交
289
            print(out)
W
WangXi 已提交
290 291 292 293 294 295 296 297 298 299
            # [-0.37994896 -0.19737532  0.09966799  0.29131261]
    """

    def __init__(self, name=None):
        super(Tanh, self).__init__()
        self._name = name

    def forward(self, x):
        return F.tanh(x, self._name)

300 301 302 303
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

W
WangXi 已提交
304

Z
zhiboniu 已提交
305
class Hardtanh(Layer):
306
    r"""
307 308 309 310
    Hardtanh Activation

    .. math::

311 312 313 314 315 316 317 318 319
        Hardtanh(x)=
            \left\{
                \begin{array}{cll}
                    max,& & \text{if } x > max \\
                    min,& & \text{if } x < min \\
                    x,& & \text{otherwise}
                \end{array}
            \right.

320 321 322 323 324 325

    Parameters:
        min (float, optional): The value of min for Hardtanh. Default is -1.
        max (float, optional): The value of max for Hardtanh. Default is 1.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
326

327 328 329
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
330

331 332 333 334 335
    Examples:
        .. code-block:: python

            import paddle

Z
zhupengyang 已提交
336
            x = paddle.to_tensor([-1.5, 0.3, 2.5])
337
            m = paddle.nn.Hardtanh()
Z
zhupengyang 已提交
338
            out = m(x) # [-1., 0.3, 1.]
339 340 341 342 343 344 345 346 347 348 349
    """

    def __init__(self, min=-1.0, max=1.0, name=None):
        super(Hardtanh, self).__init__()
        self._min = min
        self._max = max
        self._name = name

    def forward(self, x):
        return F.hardtanh(x, self._min, self._max, self._name)

350 351 352 353
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'min={}, max={}{}'.format(self._min, self._max, name_str)

354

Z
zhiboniu 已提交
355
class PReLU(Layer):
356 357 358 359 360 361 362 363 364
    """
    PReLU Activation.

    .. math::

        PReLU(x) = max(0, x) + weight * min(0, x)

    Parameters:
        num_parameters (int, optional): Number of `weight` to learn. The supported values are:
365
            1 - a single parameter `alpha` is used for all input channels;
366 367 368
            Number of channels - a seperate `alpha` is used for each input channel.
            Default is 1.
        init (float, optional): Init value of learnable `weight`. Default is 0.25.
369
        weight_attr(ParamAttr, optional): The parameter attribute for the learnable `weight`.
370
            Default is None. For more information, please refer to :ref:`api_paddle_ParamAttr`.
371 372
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
373

374
    Shape:
Q
Qi Li 已提交
375
        - input: Tensor with any shape. Default dtype is float32.
376
        - output: Tensor with the same shape as input.
377

378 379 380 381 382 383
    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

Q
Qi Li 已提交
384
            paddle.set_default_dtype("float64")
385 386 387 388 389 390

            data = np.array([[[[-2.0,  3.0, -4.0,  5.0],
                            [ 3.0, -4.0,  5.0, -6.0],
                            [-7.0, -8.0,  8.0,  9.0]],
                            [[ 1.0, -2.0, -3.0,  4.0],
                            [-5.0,  6.0,  7.0, -8.0],
Q
Qi Li 已提交
391
                            [ 6.0,  7.0,  8.0,  9.0]]]], 'float64')
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
            x = paddle.to_tensor(data)
            m = paddle.nn.PReLU(1, 0.25)
            out = m(x)
            # [[[[-0.5 ,  3.  , -1.  ,  5.  ],
            #    [ 3.  , -1.  ,  5.  , -1.5 ],
            #    [-1.75, -2.  ,  8.  ,  9.  ]],
            #   [[ 1.  , -0.5 , -0.75,  4.  ],
            #    [-1.25,  6.  ,  7.  , -2.  ],
            #    [ 6.  ,  7.  ,  8.  ,  9.  ]]]]
    """

    def __init__(self, num_parameters=1, init=0.25, weight_attr=None,
                 name=None):
        super(PReLU, self).__init__()
        self._num_parameters = num_parameters
        self._init = init
        self._weight_attr = weight_attr
        self._name = name

        self._weight = self.create_parameter(
            attr=self._weight_attr,
Q
Qi Li 已提交
413 414
            shape=[self._num_parameters],
            dtype=get_default_dtype(),
415
            is_bias=False,
Q
Qi Li 已提交
416
            default_initializer=Constant(self._init))
417 418 419 420

    def forward(self, x):
        return F.prelu(x, self._weight)

421 422 423 424 425
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'num_parameters={}, init={}, dtype={}{}'.format(
            self._num_parameters, self._init, self._dtype, name_str)

426

Z
zhiboniu 已提交
427
class ReLU(Layer):
428 429 430
    """
    ReLU Activation.

431
    .. math::
432

433
        ReLU(x) = max(x, 0)
434 435

    Parameters:
436 437 438 439 440 441
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
442

443 444 445
    Examples:
        .. code-block:: python

446
            import paddle
447

Z
zhupengyang 已提交
448
            x = paddle.to_tensor([-2., 0., 1.])
449 450
            m = paddle.nn.ReLU()
            out = m(x) # [0., 0., 1.]
451 452
    """

453
    def __init__(self, name=None):
454
        super(ReLU, self).__init__()
455
        self._name = name
456

457 458
    def forward(self, x):
        return F.relu(x, self._name)
459

460 461 462 463
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

464

Z
zhiboniu 已提交
465
class ReLU6(Layer):
466 467 468 469 470
    """
    ReLU6 Activation

    .. math::

471
        ReLU6(x) = min(max(0,x), 6)
472 473 474 475 476 477 478 479 480 481 482 483

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

484 485
            import paddle
            import numpy as np
486

487 488 489
            x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
            m = paddle.nn.ReLU6()
            out = m(x) # [0, 0.3, 6]
490 491 492 493 494 495 496 497 498
    """

    def __init__(self, name=None):
        super(ReLU6, self).__init__()
        self._name = name

    def forward(self, x):
        return F.relu6(x, self._name)

499 500 501 502
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

503

Z
zhiboniu 已提交
504
class SELU(Layer):
505
    r"""
506 507 508 509
    SELU Activation

    .. math::

510
        SELU(x)= scale *
511 512 513 514 515 516
            \left\{
                \begin{array}{lcl}
                x,& &\text{if } \ x > 0 \\
                alpha * e^{x} - alpha,& &\text{if } \ x <= 0
                \end{array}
            \right.
517 518

    Parameters:
519 520
        scale (float, optional): The value of scale(must be greater than 1.0) for SELU. Default is 1.0507009873554804934193349852946
        alpha (float, optional): The value of alpha(must be no less than zero) for SELU. Default is 1.6732632423543772848170429916717
521 522 523 524 525 526 527 528 529 530
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

531 532
            import paddle
            import numpy as np
533

534
            x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]]))
535 536
            m = paddle.nn.SELU()
            out = m(x) # [[0, 1.050701],[2.101402, 3.152103]]
537 538 539 540 541 542 543 544 545 546 547 548 549 550
    """

    def __init__(self,
                 scale=1.0507009873554804934193349852946,
                 alpha=1.6732632423543772848170429916717,
                 name=None):
        super(SELU, self).__init__()
        self._scale = scale
        self._alpha = alpha
        self._name = name

    def forward(self, x):
        return F.selu(x, self._scale, self._alpha, self._name)

551 552 553 554 555
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'scale={:.16f}, alpha={:.16f}{}'.format(self._scale, self._alpha,
                                                       name_str)

556

Z
zhiboniu 已提交
557
class LeakyReLU(Layer):
558
    r"""
C
ceci3 已提交
559 560
    Leaky ReLU Activation.

561
    .. math::
C
ceci3 已提交
562

563
        LeakyReLU(x)=
564 565 566 567 568 569 570
            \left\{
                \begin{array}{rcl}
                    x, & & if \ x >= 0 \\
                    negative\_slope * x, & & otherwise \\
                \end{array}
            \right.

C
ceci3 已提交
571 572

    Parameters:
573 574
        negative_slope (float, optional): Slope of the activation function at
            :math:`x < 0` . Default is 0.01.
575 576
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
577

578 579 580
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
581

C
ceci3 已提交
582 583 584
    Examples:
        .. code-block:: python

585
            import paddle
C
Chen Long 已提交
586
            import numpy as np
587

588
            m = paddle.nn.LeakyReLU()
Z
zhupengyang 已提交
589
            x = paddle.to_tensor(np.array([-2, 0, 1], 'float32'))
590
            out = m(x)  # [-0.02, 0., 1.]
C
ceci3 已提交
591 592
    """

593
    def __init__(self, negative_slope=0.01, name=None):
C
ceci3 已提交
594
        super(LeakyReLU, self).__init__()
595
        self._negative_slope = negative_slope
596
        self._name = name
C
ceci3 已提交
597

598
    def forward(self, x):
599
        return F.leaky_relu(x, self._negative_slope, self._name)
C
ceci3 已提交
600

601 602 603 604
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'negative_slope={}{}'.format(self._negative_slope, name_str)

C
ceci3 已提交
605

Z
zhiboniu 已提交
606
class Sigmoid(Layer):
607
    """
608
    this interface is used to construct a callable object of the ``Sigmoid`` class. This layer calcluate the `sigmoid` of input x.
609

610
    .. math::
S
swtkiwi 已提交
611

612
        Sigmoid(x) = \\frac{1}{1 + e^{-x}}
613

614 615
    Parameters:
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
616

617 618
    Shape:
        x: N-D tensor, available dtype is float16, float32, float64.
619 620

    Returns:
621
        A callable object of Sigmoid.
622

623
    Examples:
624

625 626
        .. code-block:: python

627 628 629
          import paddle

          m = paddle.nn.Sigmoid()
630 631
          x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
          out = m(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376]
632 633
    """

634
    def __init__(self, name=None):
635
        super(Sigmoid, self).__init__()
636
        self.name = name
637

638 639
    def forward(self, x):
        return F.sigmoid(x, self.name)
640

641 642 643 644
    def extra_repr(self):
        name_str = 'name={}'.format(self.name) if self.name else ''
        return name_str

645

Z
zhiboniu 已提交
646
class Hardsigmoid(Layer):
647
    r"""
648 649 650 651 652 653 654 655 656
    This interface is used to construct a callable object of the ``Hardsigmoid`` class.
    This layer calcluate the `hardsigmoid` of input x.

    A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
    which is much faster than sigmoid.

    .. math::

        Hardsigmoid(x)=
657 658 659 660 661 662 663 664
            \left\{
                \begin{array}{rcl}
            0, & & \text{if } \ x \leq -3 \\
            1, & & \text{if } \ x \geq 3 \\
            x/6 + 1/2, & & \text{otherwise}
                \end{array}
            \right.

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680

    Parameters:
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        x: N-D tensor, available dtype is float32, float64.

    Returns:
        A callable object of Hardsigmoid.

    Examples:

        .. code-block:: python

          import paddle

Z
zhupengyang 已提交
681
          m = paddle.nn.Hardsigmoid()
682 683 684 685 686 687 688 689 690
          x = paddle.to_tensor([-4., 5., 1.])
          out = m(x) # [0., 1, 0.666667]
    """

    def __init__(self, name=None):
        super(Hardsigmoid, self).__init__()
        self.name = name

    def forward(self, x):
691
        return F.hardsigmoid(x, name=self.name)
692

693 694 695 696
    def extra_repr(self):
        name_str = 'name={}'.format(self.name) if self.name else ''
        return name_str

697

Z
zhiboniu 已提交
698
class Softplus(Layer):
699
    r"""
700 701 702 703
    Softplus Activation

    .. math::

704 705
        Softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\
        \text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
706 707

    Parameters:
708 709
        beta (float, optional): The value of beta for Softplus. Default is 1
        threshold (float, optional): The value of threshold for Softplus. Default is 20
710 711 712 713 714 715 716 717 718 719
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

720 721
            import paddle
            import numpy as np
722

723 724 725
            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            m = paddle.nn.Softplus()
            out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355]
726 727 728 729 730 731 732 733 734 735 736
    """

    def __init__(self, beta=1, threshold=20, name=None):
        super(Softplus, self).__init__()
        self._beta = beta
        self._threshold = threshold
        self._name = name

    def forward(self, x):
        return F.softplus(x, self._beta, self._threshold, self._name)

737 738 739 740 741
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'beta={}, threshold={}{}'.format(self._beta, self._threshold,
                                                name_str)

742

Z
zhiboniu 已提交
743
class Softshrink(Layer):
744
    r"""
745 746 747 748
    Softshrink Activation

    .. math::

749 750 751 752 753 754 755 756 757
        Softshrink(x)=
            \left\{
                \begin{array}{rcl}
                x - threshold,& & \text{if } x > threshold \\
                x + threshold,& & \text{if } x < -threshold \\
                0,& &  \text{otherwise}
            \end{array}
            \right.

758 759

    Parameters:
760
        threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
761 762 763 764 765 766 767 768 769 770
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

771 772
            import paddle
            import numpy as np
773

774 775 776
            x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
            m = paddle.nn.Softshrink()
            out = m(x) # [-0.4, 0, 0, 0.3]
777 778 779 780 781 782 783 784 785 786
    """

    def __init__(self, threshold=0.5, name=None):
        super(Softshrink, self).__init__()
        self._threshold = threshold
        self._name = name

    def forward(self, x):
        return F.softshrink(x, self._threshold, self._name)

787 788 789 790
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'threshold={}{}'.format(self._threshold, name_str)

791

Z
zhiboniu 已提交
792
class Softsign(Layer):
793
    r"""
794 795 796 797
    Softsign Activation

    .. math::

798
        Softsign(x) = \frac{x}{1 + |x|}
799 800 801 802 803 804 805 806 807 808 809 810

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

811 812
            import paddle
            import numpy as np
813

814 815 816
            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            m = paddle.nn.Softsign()
            out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
817 818 819 820 821 822 823 824 825
    """

    def __init__(self, name=None):
        super(Softsign, self).__init__()
        self._name = name

    def forward(self, x):
        return F.softsign(x, self._name)

826 827 828 829
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

830

Z
zhiboniu 已提交
831
class Swish(Layer):
832
    r"""
833 834 835 836
    Swish Activation.

    .. math::

837
        Swish(x) = \frac{x}{1 + e^{-x}}
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

            x = paddle.to_tensor(np.array([-2., 0., 1.]))
            m = paddle.nn.Swish()
            out = m(x) # [-0.238406, 0., 0.731059]
    """

    def __init__(self, name=None):
        super(Swish, self).__init__()
        self._name = name

    def forward(self, x):
        return F.swish(x, self._name)

865 866 867 868
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

869

Z
zhiboniu 已提交
870
class Tanhshrink(Layer):
871 872 873 874 875
    """
    Tanhshrink Activation

    .. math::

876
        Tanhshrink(x) = x - tanh(x)
877 878 879 880 881 882 883 884 885 886 887 888

    Parameters:
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

889 890
            import paddle
            import numpy as np
891

892 893 894
            x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
            m = paddle.nn.Tanhshrink()
            out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
895 896 897 898 899 900 901 902 903
    """

    def __init__(self, name=None):
        super(Tanhshrink, self).__init__()
        self._name = name

    def forward(self, x):
        return F.tanhshrink(x, self._name)

904 905 906 907
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

908

Z
zhiboniu 已提交
909
class ThresholdedReLU(Layer):
910
    r"""
911 912 913 914
    Thresholded ReLU Activation

    .. math::

915 916 917 918 919 920 921 922
        ThresholdedReLU(x) =
            \left\{
                \begin{array}{rl}
                x,& \text{if } \ x > threshold \\
                0,& \text{otherwise}
                \end{array}
            \right.

923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951

    Parameters:
        threshold (float, optional): The value of threshold for ThresholdedReLU. Default is 1.0
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

            x = paddle.to_tensor(np.array([2., 0., 1.]))
            m = paddle.nn.ThresholdedReLU()
            out = m(x) # [2., 0., 0.]
    """

    def __init__(self, threshold=1.0, name=None):
        super(ThresholdedReLU, self).__init__()
        self._threshold = threshold
        self._name = name

    def forward(self, x):
        return F.thresholded_relu(x, self._threshold, self._name)

952 953 954 955
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'threshold={}{}'.format(self._threshold, name_str)

956

Z
zhiboniu 已提交
957
class Silu(Layer):
M
minghaoBD 已提交
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
    """
    Silu Activation.
    .. math::

        Silu(x) = \frac{x}{1 + e^{-x}}

    Parameters:
        x (Tensor): The input Tensor with data type float32, or float64.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
            m = paddle.nn.Silu()
            out = m(x) # [ 0.731059, 1.761594, 2.857722, 3.928055 ]
    """

    def __init__(self, name=None):
        super(Silu, self).__init__()
        self._name = name

    def forward(self, x):
        return F.silu(x, self._name)

    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str


Z
zhiboniu 已提交
995
class LogSigmoid(Layer):
996
    r"""
997
    LogSigmoid Activation.
998

999
    .. math::
1000

1001
        LogSigmoid(x) = log \frac{1}{1 + e^{-x}}
1002 1003 1004 1005 1006

    Parameters:
        x (Tensor): The input Tensor with data type float32, or float64.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
1007

1008 1009 1010
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
1011

1012 1013 1014
    Examples:
        .. code-block:: python

1015
            import paddle
1016

1017
            x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
1018 1019
            m = paddle.nn.LogSigmoid()
            out = m(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
1020 1021 1022 1023 1024 1025 1026
    """

    def __init__(self, name=None):
        super(LogSigmoid, self).__init__()
        self._name = name

    def forward(self, x):
1027
        return F.log_sigmoid(x, self._name)
1028

1029 1030 1031 1032
    def extra_repr(self):
        name_str = 'name={}'.format(self._name) if self._name else ''
        return name_str

1033

Z
zhiboniu 已提交
1034
class Softmax(Layer):
1035
    r"""
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
    Softmax Activation.

    This operator implements the softmax layer. The calculation process is as follows:

    1. The dimension :attr:`axis` of ``x`` will be permuted to the last.

    2. Then ``x`` will be logically flattened to a 2-D matrix. The matrix's second
    dimension(row length) is the same as the dimension :attr:`axis` of ``x``,
    and the first dimension(column length) is the product of all other dimensions
    of ``x``. For each row of the matrix, the softmax operator squashes the
    K-dimensional(K is the width of the matrix, which is also the size of ``x``'s
    dimension :attr:`axis`) vector of arbitrary real values to a K-dimensional
    vector of real values in the range [0, 1] that add up to 1.

    3. After the softmax operation is completed, the inverse operations of steps 1 and 2
    are performed to restore the two-dimensional matrix to the same dimension as the ``x`` .

    It computes the exponential of the given dimension and the sum of exponential
    values of all the other dimensions in the K-dimensional vector input.
    Then the ratio of the exponential of the given dimension and the sum of
    exponential values of all the other dimensions is the output of the softmax
    operator.

    For each row :math:`i` and each column :math:`j` in the matrix, we have:

    .. math::

1063
        Softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])}
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155

    Example:

    .. code-block:: text

        Case 1:
          Input:
            x.shape = [2, 3, 4]
            x.data = [[[2.0, 3.0, 4.0, 5.0],
                       [3.0, 4.0, 5.0, 6.0],
                       [7.0, 8.0, 8.0, 9.0]],
                      [[1.0, 2.0, 3.0, 4.0],
                       [5.0, 6.0, 7.0, 8.0],
                       [6.0, 7.0, 8.0, 9.0]]]

          Attrs:
            axis = -1

          Output:
            out.shape = [2, 3, 4]
            out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
                        [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]

        Case 2:
          Input:
            x.shape = [2, 3, 4]
            x.data = [[[2.0, 3.0, 4.0, 5.0],
                       [3.0, 4.0, 5.0, 6.0],
                       [7.0, 8.0, 8.0, 9.0]],
                      [[1.0, 2.0, 3.0, 4.0],
                       [5.0, 6.0, 7.0, 8.0],
                       [6.0, 7.0, 8.0, 9.0]]]
          Attrs:
            axis = 1

          Output:
            out.shape = [2, 3, 4]
            out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
                         [0.01786798, 0.01786798, 0.04661262, 0.04661262],
                         [0.97555875, 0.97555875, 0.93623955, 0.93623955]],
                        [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
                         [0.26762315, 0.26762315, 0.26762315, 0.26762315],
                         [0.72747516, 0.72747516, 0.72747516, 0.72747516]]]

    Parameters:
        axis (int, optional): The axis along which to perform log_softmax
            calculations. It should be in range [-D, D), where D is the
            dimensions of ``x`` . If ``axis`` < 0, it works the same way as
            :math:`axis + D` . Default is -1.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

            x = np.array([[[2.0, 3.0, 4.0, 5.0],
                        [3.0, 4.0, 5.0, 6.0],
                        [7.0, 8.0, 8.0, 9.0]],
                        [[1.0, 2.0, 3.0, 4.0],
                        [5.0, 6.0, 7.0, 8.0],
                        [6.0, 7.0, 8.0, 9.0]]], 'float32')
            x = paddle.to_tensor(x)
            m = paddle.nn.Softmax()
            out = m(x)
            # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
            # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
            #   [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
    """

    def __init__(self, axis=-1, name=None):
        super(Softmax, self).__init__()
        self._axis = axis
        self._dtype = None
        self._name = name

    def forward(self, x):
        return F.softmax(x, self._axis, self._dtype, self._name)

1156 1157 1158 1159
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'axis={}{}'.format(self._axis, name_str)

1160

Z
zhiboniu 已提交
1161
class LogSoftmax(Layer):
1162
    r"""
1163 1164 1165 1166
    This operator implements the log_softmax layer. The calculation process is as follows:

    .. math::

1167 1168 1169 1170
        \begin{array} {rcl}
            Out[i, j] &= &log(softmax(x)) \\
            &= &log(\frac{\exp(X[i, j])}{\sum_j(\exp(X[i, j])})
        \end{array}
1171 1172

    Parameters:
1173 1174 1175 1176 1177 1178
        axis (int, optional): The axis along which to perform log_softmax
            calculations. It should be in range [-D, D), where D is the
            dimensions of the input Tensor . If ``axis`` < 0, it works the
            same way as :math:`axis + D` . Default is -1.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
1179

1180 1181 1182
    Shape:
        - input: Tensor with any shape.
        - output: Tensor with the same shape as input.
1183 1184 1185 1186

    Examples:
        .. code-block:: python

1187 1188
            import paddle

Z
zhupengyang 已提交
1189 1190 1191 1192 1193 1194
            x = [[[-2.0, 3.0, -4.0, 5.0],
                  [3.0, -4.0, 5.0, -6.0],
                  [-7.0, -8.0, 8.0, 9.0]],
                 [[1.0, -2.0, -3.0, 4.0],
                  [-5.0, 6.0, 7.0, -8.0],
                  [6.0, 7.0, 8.0, 9.0]]]
1195 1196 1197 1198 1199 1200 1201 1202 1203
            m = paddle.nn.LogSoftmax()
            x = paddle.to_tensor(x)
            out = m(x)
            # [[[ -7.1278396   -2.1278396   -9.127839    -0.12783948]
            #   [ -2.1270514   -9.127051    -0.12705144 -11.127051  ]
            #   [-16.313261   -17.313261    -1.3132617   -0.31326184]]
            #  [[ -3.0518122   -6.051812    -7.051812    -0.051812  ]
            #   [-12.313267    -1.3132664   -0.3132665  -15.313267  ]
            #   [ -3.4401896   -2.4401896   -1.4401896   -0.44018966]]]
1204 1205
    """

1206
    def __init__(self, axis=-1, name=None):
1207 1208
        super(LogSoftmax, self).__init__()
        self._axis = axis
1209
        self._name = name
1210

1211 1212
    def forward(self, x):
        return F.log_softmax(x, self._axis)
1213

1214 1215 1216 1217
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'axis={}{}'.format(self._axis, name_str)

1218

Z
zhiboniu 已提交
1219
class Maxout(Layer):
1220
    r"""
1221 1222 1223 1224 1225 1226 1227 1228
    Maxout Activation.

    Assumed the input shape is (N, Ci, H, W).
    The output shape is (N, Co, H, W).
    Then Co = Ci/groups and the operator formula is as follows:

    .. math::

1229 1230 1231 1232 1233 1234 1235 1236
        \begin{array}{l}
            &out_{si+j} = \max_{k} x_{gsi + sk + j} \\
            &g = groups \\
            &s = \frac{input.size}{num\_channels} \\
            &0 \le i < \frac{num\_channels}{groups} \\
            &0 \le j < s \\
            &0 \le k < groups
        \end{array}
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279

    Parameters:
        groups (int, optional): The groups number of maxout. `groups` specifies the
            index of channel dimension where maxout will be performed. This must be
            a factor of number of features. Default is 1.
        axis (int, optional): The axis along which to perform maxout calculations.
            It should be 1 when data format is NCHW, be -1 or 3 when data format
            is NHWC. If ``axis`` < 0, it works the same way as :math:`axis + D` ,
            where D is the dimensions of ``x`` . Default is 1.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: :math:`(N, C_{in}, H_{in}, W_{in})`
        - output: :math:`(N, C_{out}, H_{out}, W_{out})`

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.rand([1, 2, 3, 4])
            # [[[[0.5002636  0.22272532 0.17402348 0.2874594 ]
            #    [0.95313174 0.6228939  0.7129065  0.7087491 ]
            #    [0.02879342 0.88725346 0.61093384 0.38833922]]
            #   [[0.5231306  0.03807496 0.91661984 0.15602879]
            #    [0.666127   0.616567   0.30741522 0.24044901]
            #    [0.7142536  0.7351477  0.31588817 0.23782359]]]]
            m = paddle.nn.Maxout(groups=2)
            out = m(x)
            # [[[[0.5231306  0.22272532 0.91661984 0.2874594 ]
            #    [0.95313174 0.6228939  0.7129065  0.7087491 ]
            #    [0.7142536  0.88725346 0.61093384 0.38833922]]]]
    """

    def __init__(self, groups, axis=1, name=None):
        super(Maxout, self).__init__()
        self._groups = groups
        self._axis = axis
        self._name = name

    def forward(self, x):
        return F.maxout(x, self._groups, self._axis, self._name)
1280 1281 1282 1283

    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'groups={}, axis={}{}'.format(self._groups, self._axis, name_str)