ops.py 20.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
P
peizhilin 已提交
16
import os
17
from .layer_function_generator import generate_layer_fn, generate_activation_fn, generate_inplace_fn, add_sample_code
C
chengduo 已提交
18
from .. import core
H
hong 已提交
19
from ..framework import convert_np_dtype_to_dtype_, Variable, in_dygraph_mode
20
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
21
from paddle.utils import deprecated
H
hong 已提交
22
from paddle import _C_ops
Y
Yang Yu 已提交
23

24 25 26 27
__deprecated_func_name__ = {
    'tanh_shrink': 'tanhshrink',
    'logsigmoid': 'log_sigmoid'
}
28

29
__activations_noattr__ = [
30
    'sigmoid',
M
minghaoBD 已提交
31
    'silu',
32
    'logsigmoid',
33 34 35
    'tanh_shrink',
    'softplus',
    'softsign',
W
WangXi 已提交
36
    'tanh',
37 38 39
]

__unary_func__ = [
40
    'exp',
R
ronnywang 已提交
41
    'expm1',
42
    'atan',
43
    'sqrt',
Z
zhoukunsheng 已提交
44
    'rsqrt',
45 46 47
    'abs',
    'ceil',
    'floor',
C
add cos  
chengduoZH 已提交
48
    'cos',
J
joejiong 已提交
49
    'tan',
50
    'acos',
C
add sin  
chengduoZH 已提交
51
    'sin',
52
    'sinh',
53
    'asin',
54
    'cosh',
55 56 57
    'round',
    'reciprocal',
    'square',
58
    'lgamma',
X
xiaoting 已提交
59 60 61
    'acosh',
    'asinh',
    'atanh',
Y
Yu Yang 已提交
62 63
]

64 65 66 67 68 69 70 71 72 73
__inplace_unary_func__ = [
    'exp_',
    'sqrt_',
    'rsqrt_',
    'ceil_',
    'floor_',
    'round_',
    'reciprocal_',
]

X
Xin Pan 已提交
74
__all__ = []
Y
Yang Yu 已提交
75

Y
Yu Yang 已提交
76
for _OP in set(__all__):
77
    globals()[_OP] = generate_layer_fn(_OP)
Y
yuyang18 已提交
78

S
sneaxiy 已提交
79 80 81 82 83
# It is a hot fix in some unittest using:
#   fluid.layers.scale(x=x, scale=10.0, out=out_var)
# e.g.: test_program_code.py, test_dist_train.py
globals()['_scale'] = generate_layer_fn('scale')

S
sneaxiy 已提交
84 85
globals()['_elementwise_div'] = generate_layer_fn('elementwise_div')

86
__all__ += __activations_noattr__
87
__all__ += __unary_func__
88
__all__ += __inplace_unary_func__
89 90

for _OP in set(__activations_noattr__):
91 92 93
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
94
    _func = generate_activation_fn(_OP)
95 96
    _func = deprecated(since="2.0.0",
                       update_to="paddle.nn.functional.%s" % (_new_OP))(_func)
97
    globals()[_OP] = _func
98 99

for _OP in set(__unary_func__):
100 101 102
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
103 104 105
    _func = generate_activation_fn(_OP)
    _func = deprecated(since="2.0.0", update_to="paddle.%s" % (_new_OP))(_func)
    globals()[_OP] = _func
106

107 108 109 110
for _OP in set(__inplace_unary_func__):
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
111 112 113
    _func = generate_inplace_fn(_OP)
    _func = deprecated(since="2.0.0", update_to="paddle.%s" % (_new_OP))(_func)
    globals()[_OP] = _func
114

115 116
add_sample_code(
    globals()["sigmoid"], r"""
117 118 119 120 121 122
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F

123
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
124
        out = F.sigmoid(x)
N
Noel 已提交
125
        print(out)
126 127 128 129
        # [0.40131234 0.450166   0.52497919 0.57444252]

""")

130 131
add_sample_code(
    globals()["silu"], r"""
M
minghaoBD 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F

        x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
        out = F.silu(x)
        print(out)
        # [ 0.7310586 1.7615942 2.8577224, 3.9280552 ]

""")

145 146
add_sample_code(
    globals()["logsigmoid"], r"""
147 148 149 150 151 152
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F

153
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
154
        out = F.log_sigmoid(x)
N
Noel 已提交
155
        print(out)
156 157 158 159
        # [-0.91301525 -0.79813887 -0.64439666 -0.55435524]

""")

160 161
add_sample_code(
    globals()["exp"], r"""
162 163 164 165 166
Examples:
    .. code-block:: python

        import paddle

167
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
168
        out = paddle.exp(x)
N
Noel 已提交
169
        print(out)
170 171 172 173
        # [0.67032005 0.81873075 1.10517092 1.34985881]

""")

174 175
add_sample_code(
    globals()["expm1"], r"""
R
ronnywang 已提交
176 177 178 179 180 181 182 183 184 185 186 187
Examples:
    .. code-block:: python

        import paddle

        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        out = paddle.expm1(x)
        print(out)
        # [-0.32967997, -0.18126924,  0.10517092,  0.34985882]

""")

188 189
add_sample_code(
    globals()["tanh"], r"""
190 191 192 193 194
Examples:
    .. code-block:: python

        import paddle

195
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
196
        out = paddle.tanh(x)
N
Noel 已提交
197
        print(out)
198 199 200 201
        # [-0.37994896 -0.19737532  0.09966799  0.29131261]

""")

202 203
add_sample_code(
    globals()["atan"], r"""
204 205 206 207 208
Examples:
    .. code-block:: python

        import paddle

209
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
210
        out = paddle.atan(x)
N
Noel 已提交
211
        print(out)
212 213 214 215
        # [-0.38050638 -0.19739556  0.09966865  0.29145679]

""")

216 217
add_sample_code(
    globals()["tanh_shrink"], r"""
218 219 220 221 222
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F
223

224
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
225 226 227
        out = F.tanhshrink(x) 
        print(out)
        # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
228 229 230

""")

231 232
add_sample_code(
    globals()["sqrt"], r"""
233 234 235 236 237
Examples:
    .. code-block:: python

        import paddle

238
        x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
239
        out = paddle.sqrt(x)
N
Noel 已提交
240
        print(out)
241 242 243 244
        # [0.31622777 0.4472136  0.54772256 0.63245553]

""")

245 246
add_sample_code(
    globals()["rsqrt"], r"""
247 248 249 250 251
Examples:
    .. code-block:: python

        import paddle

252
        x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
253
        out = paddle.rsqrt(x)
254
        print(out)
255 256 257 258
        # [3.16227766 2.23606798 1.82574186 1.58113883]

""")

259 260
add_sample_code(
    globals()["abs"], r"""
261 262 263 264 265
Examples:
    .. code-block:: python

        import paddle

266
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
267
        out = paddle.abs(x)
N
Noel 已提交
268
        print(out)
269 270 271 272
        # [0.4 0.2 0.1 0.3]

""")

273 274
add_sample_code(
    globals()["ceil"], r"""
275 276 277 278 279
Examples:
    .. code-block:: python

        import paddle

280
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
281
        out = paddle.ceil(x)
N
Noel 已提交
282
        print(out)
283 284 285 286
        # [-0. -0.  1.  1.]

""")

287 288
add_sample_code(
    globals()["floor"], r"""
289 290 291 292 293
Examples:
    .. code-block:: python

        import paddle

294
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
295
        out = paddle.floor(x)
N
Noel 已提交
296
        print(out)
297 298 299 300
        # [-1. -1.  0.  0.]

""")

301 302
add_sample_code(
    globals()["cos"], r"""
303 304 305 306 307
Examples:
    .. code-block:: python

        import paddle

308
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
309
        out = paddle.cos(x)
N
Noel 已提交
310
        print(out)
311 312 313 314
        # [0.92106099 0.98006658 0.99500417 0.95533649]

""")

315 316
add_sample_code(
    globals()["tan"], r"""
J
joejiong 已提交
317 318 319 320 321 322 323 324 325 326 327 328
Examples:
    .. code-block:: python

        import paddle

        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        out = paddle.tan(x)
        print(out)
        # [-0.42279324, -0.20271005, 0.10033467, 0.30933627]

""")

329 330
add_sample_code(
    globals()["acos"], r"""
331 332 333 334 335
Examples:
    .. code-block:: python

        import paddle

336
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
337
        out = paddle.acos(x)
N
Noel 已提交
338
        print(out)
339 340 341 342
        # [1.98231317 1.77215425 1.47062891 1.26610367]

""")

343 344
add_sample_code(
    globals()["sin"], r"""
345 346 347 348 349
Examples:
    .. code-block:: python

        import paddle

350
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
351
        out = paddle.sin(x)
N
Noel 已提交
352
        print(out)
353 354 355 356
        # [-0.38941834 -0.19866933  0.09983342  0.29552021]

""")

357 358
add_sample_code(
    globals()["asin"], r"""
359 360 361 362 363
Examples:
    .. code-block:: python

        import paddle

364
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
365
        out = paddle.asin(x)
N
Noel 已提交
366
        print(out)
367 368 369 370
        # [-0.41151685 -0.20135792  0.10016742  0.30469265]

""")

371 372
add_sample_code(
    globals()["cosh"], r"""
373 374 375 376 377
Examples:
    .. code-block:: python

        import paddle

378
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
379
        out = paddle.cosh(x)
N
Noel 已提交
380
        print(out)
381 382 383 384
        # [1.08107237 1.02006676 1.00500417 1.04533851]

""")

385 386
add_sample_code(
    globals()["sinh"], r"""
387 388 389 390 391
Examples:
    .. code-block:: python

        import paddle

392
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
393
        out = paddle.sinh(x)
N
Noel 已提交
394
        print(out)
395 396 397 398
        # [-0.41075233 -0.201336    0.10016675  0.30452029]

""")

399 400
add_sample_code(
    globals()["asinh"], r"""
X
xiaoting 已提交
401 402 403 404 405 406 407 408 409 410 411 412
Examples:
    .. code-block:: python

        import paddle

        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        out = paddle.asinh(x)
        print(out)
        # [-0.39003533, -0.19869010,  0.09983408,  0.29567307]

""")

413 414
add_sample_code(
    globals()["acosh"], r"""
X
xiaoting 已提交
415 416 417 418 419 420 421 422 423 424 425 426
Examples:
    .. code-block:: python

        import paddle

        x = paddle.to_tensor([1., 3., 4., 5.])
        out = paddle.acosh(x)
        print(out)
        # [0.        , 1.76274729, 2.06343699, 2.29243159]

""")

427 428
add_sample_code(
    globals()["atanh"], r"""
X
xiaoting 已提交
429 430 431 432 433 434 435 436 437 438 439 440
Examples:
    .. code-block:: python

        import paddle

        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        out = paddle.atanh(x)
        print(out)
        # [-0.42364895, -0.20273256,  0.10033535,  0.30951962]

""")

441 442
add_sample_code(
    globals()["round"], r"""
443 444 445 446 447
Examples:
    .. code-block:: python

        import paddle

448
        x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5])
449
        out = paddle.round(x)
N
Noel 已提交
450
        print(out)
451 452 453 454
        # [-1. -0.  1.  2.]

""")

455 456
add_sample_code(
    globals()["reciprocal"], r"""
457 458 459 460 461
Examples:
    .. code-block:: python

        import paddle

462
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
463
        out = paddle.reciprocal(x)
N
Noel 已提交
464
        print(out)
465 466 467 468
        # [-2.5        -5.         10.          3.33333333]

""")

469 470
add_sample_code(
    globals()["square"], r"""
471 472 473 474 475
Examples:
    .. code-block:: python

        import paddle

476
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
477
        out = paddle.square(x)
N
Noel 已提交
478
        print(out)
479 480 481 482
        # [0.16 0.04 0.01 0.09]

""")

483 484
add_sample_code(
    globals()["lgamma"], r"""
485 486 487 488 489 490 491 492 493 494 495 496
Examples:
    .. code-block:: python

        import paddle

        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        out = paddle.lgamma(x)
        print(out)
        # [1.31452441, 1.76149750, 2.25271273, 1.09579802]

""")

497 498
add_sample_code(
    globals()["softplus"], r"""
499 500 501 502 503
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F
504

505
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
506 507 508
        out = F.softplus(x) 
        print(out)
        # [0.513015, 0.598139, 0.744397, 0.854355]
509 510 511

""")

512 513
add_sample_code(
    globals()["softsign"], r"""
514 515 516 517 518
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F
519

520
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
521 522 523
        out = F.softsign(x) 
        print(out)
        # [-0.285714, -0.166667, 0.0909091, 0.230769]
524 525 526

""")

527 528 529 530 531 532
__all__ += ['softshrink']

_softshrink_ = generate_layer_fn('softshrink')


def softshrink(x, alpha=None):
533 534 535
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'softshrink')

536 537 538 539 540 541 542 543 544 545 546
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            if name == 'alpha':
                kwargs['lambda'] = val
            else:
                kwargs[name] = val
    return _softshrink_(**kwargs)


547
softshrink.__doc__ = r"""
548 549 550
	:alias_main: paddle.nn.functional.softshrink
	:alias: paddle.nn.functional.softshrink,paddle.nn.functional.activation.softshrink
	:old_api: paddle.fluid.layers.softshrink
S
swtkiwi 已提交
551

552 553 554
:strong:`Softshrink Activation Operator`

..  math::
555 556 557 558 559
    out = \\begin{cases}
            x - \\alpha, \\text{if } x > \\alpha \\\\
            x + \\alpha, \\text{if } x < -\\alpha \\\\
            0,  \\text{otherwise}
          \\end{cases}
560 561 562


Args:
563 564
    x: Input of Softshrink operator, an N-D Tensor, with data type float32, float64 or float16.
    alpha (float): non-negative offset
565 566
    
Returns:
567
    Output of Softshrink operator with the same type of input.
568 569 570 571 572

Examples:
    .. code-block:: python
    
        import paddle.fluid as fluid
573
        data = fluid.data(name="input", shape=[None, 784])
574 575 576
        result = fluid.layers.softshrink(x=data, alpha=0.3)
"""

Y
yuyang18 已提交
577 578 579 580 581
__all__ += ['hard_shrink']

_hard_shrink_ = generate_layer_fn('hard_shrink')


582
@deprecated(since="2.0.0", update_to="paddle.nn.functional.hardshrink")
Y
yuyang18 已提交
583
def hard_shrink(x, threshold=None):
584 585 586
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'hard_shrink')

587
    locals_var = locals().copy()
Y
yuyang18 已提交
588
    kwargs = dict()
589
    for name, val in locals_var.items():
Y
yuyang18 已提交
590 591 592 593 594
        if val is not None:
            kwargs[name] = val
    return _hard_shrink_(**kwargs)


Y
yuyang18 已提交
595
hard_shrink.__doc__ = _hard_shrink_.__doc__ + """
Y
yuyang18 已提交
596 597
Examples:

598
    >>> import paddle.fluid as fluid
Y
yuyang18 已提交
599 600 601
    >>> data = fluid.layers.data(name="input", shape=[784])
    >>> result = fluid.layers.hard_shrink(x=data, threshold=0.3)
"""
Y
yuyang18 已提交
602

W
wopeizl 已提交
603 604 605 606 607
__all__ += ['cumsum']

_cum_sum_ = generate_layer_fn('cumsum')


608 609 610
@deprecated(since="2.0.0",
            update_to="paddle.cumsum",
            reason="New APIs for Paddle 2.0 are coming.")
W
wopeizl 已提交
611
def cumsum(x, axis=None, exclusive=None, reverse=None):
612
    check_type(x, 'x', (Variable), 'cumsum')
613
    locals_var = locals().copy()
W
wopeizl 已提交
614
    kwargs = dict()
615
    for name, val in locals_var.items():
W
wopeizl 已提交
616 617 618 619 620
        if val is not None:
            kwargs[name] = val
    return _cum_sum_(**kwargs)


L
liu zhengxi 已提交
621
cumsum.__doc__ = """
622 623 624
	:alias_main: paddle.cumsum
	:alias: paddle.cumsum,paddle.tensor.cumsum,paddle.tensor.math.cumsum
	:old_api: paddle.fluid.layers.cumsum
S
swtkiwi 已提交
625

L
liu zhengxi 已提交
626
The cumulative sum of the elements along a given axis. By default, the first element of the result is the same of the first element of the input. If exlusive is true, the first element of the result is 0.
W
wopeizl 已提交
627

L
liu zhengxi 已提交
628 629
Args:
    x (Variable): Input of cumsum operator, the Tensor/LoDTensor needed to be cumsumed. 
T
tianshuo78520a 已提交
630
    axis (int, optional): The dimension to accumulate along. -1 means the last dimension. Default is -1.
L
liu zhengxi 已提交
631 632 633 634 635 636 637 638 639 640 641 642
    exclusive (bool, optional): Whether to perform exclusive cumsum. Default is False.
    reverse (bool, optional): If true, the cumsum is performed in the reversed direction. Default is False.

Returns:
    Variable(Tensor/LoDTensor): The result of cumsum operator, output of cumsum operator. 

Examples:
    .. code-block:: python
        
        import paddle.fluid as fluid
        data = fluid.layers.data(name="input", shape=[32, 784])
        result = fluid.layers.cumsum(data, axis=0)
W
wopeizl 已提交
643
"""
Y
yuyang18 已提交
644 645 646 647 648 649 650

__all__ += ['thresholded_relu']

_thresholded_relu_ = generate_layer_fn('thresholded_relu')


def thresholded_relu(x, threshold=None):
651 652 653
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'thresholded_relu')

654
    locals_var = locals().copy()
Y
yuyang18 已提交
655
    kwargs = dict()
656
    for name, val in locals_var.items():
Y
yuyang18 已提交
657 658 659
        if val is not None:
            kwargs[name] = val

C
chengduo 已提交
660
    return _thresholded_relu_(**kwargs)
Y
yuyang18 已提交
661 662


663
thresholded_relu.__doc__ = r"""
664 665 666
	:alias_main: paddle.nn.functional.thresholded_relu
	:alias: paddle.nn.functional.thresholded_relu,paddle.nn.functional.activation.thresholded_relu
	:old_api: paddle.fluid.layers.thresholded_relu
S
swtkiwi 已提交
667

668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
:strong:`Thresholded ReLU Activation Operator`

Equation:
    ..  math::
        out = \\begin{cases}
            x, &if x > threshold \\\\
            0, &otherwise
            \\end{cases}

Args:
    x(Variable): The input of Thresholded ReLU op, Tensor or LoDTensor, dtype: float32 or float64.
        
    threshold(float, optional): The threshold value. Note that if the arg `threshold` is not set, the threshold in the equation is 1.0.

Returns:

    Variable: The output of Thresholded ReLU op, Tensor or LoDTensor, dtype: float32 or float64, the same as the input, shape: the same as the input.

Y
yuyang18 已提交
686
Examples:
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
    
    .. code-block:: python
    
        # declarative mode
        import numpy as np
        from paddle import fluid
        
        x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
        y = fluid.layers.thresholded_relu(x, threshold=0.1)
        
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        start = fluid.default_startup_program()
        main = fluid.default_main_program()
        
        data = np.random.randn(2, 3).astype("float32")
        exe.run(start)
        
        y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
        
        data
        # array([[ 0.21134382, -1.1805999 ,  0.32876605],
        #        [-1.2210793 , -0.7365624 ,  1.0013918 ]], dtype=float32)
        y_np
        # array([[ 0.21134382, -0.        ,  0.32876605],
        #        [-0.        , -0.        ,  1.0013918 ]], dtype=float32)
Y
yuyang18 已提交
713

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
    .. code-block:: python
    
        # imperative mode
        import numpy as np
        from paddle import fluid
        import paddle.fluid.dygraph as dg
        
        data = np.random.randn(2, 3).astype("float32")
        place = fluid.CPUPlace()
        with dg.guard(place) as g:
            x = dg.to_variable(data)
            y = fluid.layers.thresholded_relu(x, threshold=0.1)
            y_np = y.numpy()
        data
        # array([[ 0.21134382, -1.1805999 ,  0.32876605],
        #        [-1.2210793 , -0.7365624 ,  1.0013918 ]], dtype=float32)
        y_np
        # array([[ 0.21134382, -0.        ,  0.32876605],
        #        [-0.        , -0.        ,  1.0013918 ]], dtype=float32)
Y
yuyang18 已提交
733
"""
F
Feiyu Chan 已提交
734 735 736 737 738 739

__all__ += ['gelu']

_gelu_ = generate_layer_fn('gelu')


740
@deprecated(since="2.0.0", update_to="paddle.nn.functional.gelu")
741
def gelu(x, approximate=False):
F
Feiyu Chan 已提交
742 743 744 745 746 747 748 749
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            kwargs[name] = val
    return _gelu_(**kwargs)


750
gelu.__doc__ = r"""
F
Feiyu Chan 已提交
751 752 753 754
:strong:`GeLU Activation Operator`
For more details, see [Gaussian Error Linear Units](https://arxiv.org/abs/1606.08415).

Equation:
755 756 757 758 759
    if approximate is True
    ..  math::
        out = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3})))

    else
F
Feiyu Chan 已提交
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
    ..  math::
        out = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}}))

Args:

    x(Variable): The input of GeLU op, Tensor or LoDTensor, dtype: float32 or float64.

Returns:

    Variable: The output of GeLU op, Tensor or LoDTensor, dtype: float32 or float64, the same as the input, shape: the same as the input.

Examples:
    
    .. code-block:: python
    
        # declarative mode
        import numpy as np
        from paddle import fluid
        
        x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
        y = fluid.layers.gelu(x)
        
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        start = fluid.default_startup_program()
        main = fluid.default_main_program()
        
        data = np.random.randn(2, 3).astype("float32")
        exe.run(start)
        
        y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
        
        data
        # array([[ 0.87165993, -1.0541513 , -0.37214822],
        #         [ 0.15647964,  0.32496083,  0.33045998]], dtype=float32)
        y_np
        # array([[ 0.70456535, -0.15380788, -0.13207214],
        #        [ 0.08796856,  0.20387867,  0.2080159 ]], dtype=float32)

    .. code-block:: python
    
        # imperative mode
        import numpy as np
        from paddle import fluid
        import paddle.fluid.dygraph as dg
        
        data = np.random.randn(2, 3).astype("float32")
        place = fluid.CPUPlace()
        with dg.guard(place) as g:
            x = dg.to_variable(data)
            y = fluid.layers.gelu(x)
            y_np = y.numpy()
        data
        # array([[ 0.87165993, -1.0541513 , -0.37214822],
        #        [ 0.15647964,  0.32496083,  0.33045998]], dtype=float32)
        y_np
        # array([[ 0.70456535, -0.15380788, -0.13207214],
        #        [ 0.08796856,  0.20387867,  0.2080159 ]], dtype=float32)
"""
F
Feiyu Chan 已提交
819 820 821 822 823 824

__all__ += ['erf']

_erf_ = generate_layer_fn('erf')


W
WuHaobo 已提交
825
def erf(x, name=None):
H
hong 已提交
826 827 828
    if in_dygraph_mode():
        return _C_ops.final_state_erf(x)

F
Feiyu Chan 已提交
829 830 831 832 833 834 835 836
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            kwargs[name] = val
    return _erf_(**kwargs)


837
erf.__doc__ = r"""
F
Feiyu Chan 已提交
838 839 840 841 842 843 844 845 846
:strong:`Erf Operator`
For more details, see [Error function](https://en.wikipedia.org/wiki/Error_function).

Equation:
    ..  math::
        out = \\frac{2}{\\sqrt{\\pi}} \\int_{0}^{x}e^{- \\eta^{2}}d\\eta

Args:

W
WuHaobo 已提交
847
    x (Tensor): The input tensor, it's data type should be float32, float64.
F
Feiyu Chan 已提交
848 849 850

Returns:

W
WuHaobo 已提交
851
    Tensor: The output of Erf op, dtype: float32 or float64, the same as the input, shape: the same as the input.
F
Feiyu Chan 已提交
852 853 854 855 856

Examples:
    
    .. code-block:: python
    
W
WuHaobo 已提交
857
        import paddle
858
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
W
WuHaobo 已提交
859
        out = paddle.erf(x)
N
Noel 已提交
860
        print(out)
W
WuHaobo 已提交
861
        # [-0.42839236 -0.22270259  0.11246292  0.32862676]
F
Feiyu Chan 已提交
862
"""