ops.py 19.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
P
peizhilin 已提交
16
import os
17
from .layer_function_generator import generate_layer_fn, generate_activation_fn, generate_inplace_fn, add_sample_code
C
chengduo 已提交
18
from .. import core
19 20
from ..framework import convert_np_dtype_to_dtype_, Variable
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
21
from paddle.utils import deprecated
Y
Yang Yu 已提交
22

23 24 25 26
__deprecated_func_name__ = {
    'tanh_shrink': 'tanhshrink',
    'logsigmoid': 'log_sigmoid'
}
27

28
__activations_noattr__ = [
29
    'sigmoid',
M
minghaoBD 已提交
30
    'silu',
31
    'logsigmoid',
32 33 34
    'tanh_shrink',
    'softplus',
    'softsign',
W
WangXi 已提交
35
    'tanh',
36 37 38
]

__unary_func__ = [
39
    'exp',
40
    'atan',
41
    'sqrt',
Z
zhoukunsheng 已提交
42
    'rsqrt',
43 44 45
    'abs',
    'ceil',
    'floor',
C
add cos  
chengduoZH 已提交
46
    'cos',
J
joejiong 已提交
47
    'tan',
48
    'acos',
C
add sin  
chengduoZH 已提交
49
    'sin',
50
    'sinh',
51
    'asin',
52
    'cosh',
53 54 55
    'round',
    'reciprocal',
    'square',
56
    'lgamma',
Y
Yu Yang 已提交
57 58
]

59 60 61 62 63 64 65 66 67 68
__inplace_unary_func__ = [
    'exp_',
    'sqrt_',
    'rsqrt_',
    'ceil_',
    'floor_',
    'round_',
    'reciprocal_',
]

X
Xin Pan 已提交
69
__all__ = []
Y
Yang Yu 已提交
70

Y
Yu Yang 已提交
71
for _OP in set(__all__):
72
    globals()[_OP] = generate_layer_fn(_OP)
Y
yuyang18 已提交
73

S
sneaxiy 已提交
74 75 76 77 78
# It is a hot fix in some unittest using:
#   fluid.layers.scale(x=x, scale=10.0, out=out_var)
# e.g.: test_program_code.py, test_dist_train.py
globals()['_scale'] = generate_layer_fn('scale')

S
sneaxiy 已提交
79 80
globals()['_elementwise_div'] = generate_layer_fn('elementwise_div')

81
__all__ += __activations_noattr__
82
__all__ += __unary_func__
83
__all__ += __inplace_unary_func__
84 85

for _OP in set(__activations_noattr__):
86 87 88
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
89 90
    func = generate_activation_fn(_OP)
    func = deprecated(
91
        since="2.0.0", update_to="paddle.nn.functional.%s" % (_new_OP))(func)
92 93 94
    globals()[_OP] = func

for _OP in set(__unary_func__):
95 96 97
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
98
    func = generate_activation_fn(_OP)
99
    func = deprecated(since="2.0.0", update_to="paddle.%s" % (_new_OP))(func)
100
    globals()[_OP] = func
101

102 103 104 105 106 107 108 109
for _OP in set(__inplace_unary_func__):
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
    func = generate_inplace_fn(_OP)
    func = deprecated(since="2.0.0", update_to="paddle.%s" % (_new_OP))(func)
    globals()[_OP] = func

110 111 112 113 114 115 116
add_sample_code(globals()["sigmoid"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F

117
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
118
        out = F.sigmoid(x)
N
Noel 已提交
119
        print(out)
120 121 122 123
        # [0.40131234 0.450166   0.52497919 0.57444252]

""")

M
minghaoBD 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136 137
add_sample_code(globals()["silu"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F

        x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
        out = F.silu(x)
        print(out)
        # [ 0.7310586 1.7615942 2.8577224, 3.9280552 ]

""")

138 139 140 141 142 143 144
add_sample_code(globals()["logsigmoid"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F

145
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
146
        out = F.log_sigmoid(x)
N
Noel 已提交
147
        print(out)
148 149 150 151 152 153 154 155 156 157
        # [-0.91301525 -0.79813887 -0.64439666 -0.55435524]

""")

add_sample_code(globals()["exp"], r"""
Examples:
    .. code-block:: python

        import paddle

158
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
159
        out = paddle.exp(x)
N
Noel 已提交
160
        print(out)
161 162 163 164 165 166 167 168 169 170
        # [0.67032005 0.81873075 1.10517092 1.34985881]

""")

add_sample_code(globals()["tanh"], r"""
Examples:
    .. code-block:: python

        import paddle

171
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
172
        out = paddle.tanh(x)
N
Noel 已提交
173
        print(out)
174 175 176 177 178 179 180 181 182 183
        # [-0.37994896 -0.19737532  0.09966799  0.29131261]

""")

add_sample_code(globals()["atan"], r"""
Examples:
    .. code-block:: python

        import paddle

184
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
185
        out = paddle.atan(x)
N
Noel 已提交
186
        print(out)
187 188 189 190 191 192 193 194 195 196
        # [-0.38050638 -0.19739556  0.09966865  0.29145679]

""")

add_sample_code(globals()["tanh_shrink"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F
197

198
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
199 200 201
        out = F.tanhshrink(x) 
        print(out)
        # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
202 203 204 205 206 207 208 209 210

""")

add_sample_code(globals()["sqrt"], r"""
Examples:
    .. code-block:: python

        import paddle

211
        x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
212
        out = paddle.sqrt(x)
N
Noel 已提交
213
        print(out)
214 215 216 217 218 219 220 221 222 223
        # [0.31622777 0.4472136  0.54772256 0.63245553]

""")

add_sample_code(globals()["rsqrt"], r"""
Examples:
    .. code-block:: python

        import paddle

224
        x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
225
        out = paddle.rsqrt(x)
226
        print(out)
227 228 229 230 231 232 233 234 235 236
        # [3.16227766 2.23606798 1.82574186 1.58113883]

""")

add_sample_code(globals()["abs"], r"""
Examples:
    .. code-block:: python

        import paddle

237
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
238
        out = paddle.abs(x)
N
Noel 已提交
239
        print(out)
240 241 242 243 244 245 246 247 248 249
        # [0.4 0.2 0.1 0.3]

""")

add_sample_code(globals()["ceil"], r"""
Examples:
    .. code-block:: python

        import paddle

250
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
251
        out = paddle.ceil(x)
N
Noel 已提交
252
        print(out)
253 254 255 256 257 258 259 260 261 262
        # [-0. -0.  1.  1.]

""")

add_sample_code(globals()["floor"], r"""
Examples:
    .. code-block:: python

        import paddle

263
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
264
        out = paddle.floor(x)
N
Noel 已提交
265
        print(out)
266 267 268 269 270 271 272 273 274 275
        # [-1. -1.  0.  0.]

""")

add_sample_code(globals()["cos"], r"""
Examples:
    .. code-block:: python

        import paddle

276
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
277
        out = paddle.cos(x)
N
Noel 已提交
278
        print(out)
279 280 281 282
        # [0.92106099 0.98006658 0.99500417 0.95533649]

""")

J
joejiong 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295
add_sample_code(globals()["tan"], r"""
Examples:
    .. code-block:: python

        import paddle

        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        out = paddle.tan(x)
        print(out)
        # [-0.42279324, -0.20271005, 0.10033467, 0.30933627]

""")

296 297 298 299 300 301
add_sample_code(globals()["acos"], r"""
Examples:
    .. code-block:: python

        import paddle

302
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
303
        out = paddle.acos(x)
N
Noel 已提交
304
        print(out)
305 306 307 308 309 310 311 312 313 314
        # [1.98231317 1.77215425 1.47062891 1.26610367]

""")

add_sample_code(globals()["sin"], r"""
Examples:
    .. code-block:: python

        import paddle

315
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
316
        out = paddle.sin(x)
N
Noel 已提交
317
        print(out)
318 319 320 321 322 323 324 325 326 327
        # [-0.38941834 -0.19866933  0.09983342  0.29552021]

""")

add_sample_code(globals()["asin"], r"""
Examples:
    .. code-block:: python

        import paddle

328
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
329
        out = paddle.asin(x)
N
Noel 已提交
330
        print(out)
331 332 333 334 335 336 337 338 339 340
        # [-0.41151685 -0.20135792  0.10016742  0.30469265]

""")

add_sample_code(globals()["cosh"], r"""
Examples:
    .. code-block:: python

        import paddle

341
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
342
        out = paddle.cosh(x)
N
Noel 已提交
343
        print(out)
344 345 346 347 348 349 350 351 352 353
        # [1.08107237 1.02006676 1.00500417 1.04533851]

""")

add_sample_code(globals()["sinh"], r"""
Examples:
    .. code-block:: python

        import paddle

354
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
355
        out = paddle.sinh(x)
N
Noel 已提交
356
        print(out)
357 358 359 360 361 362 363 364 365 366
        # [-0.41075233 -0.201336    0.10016675  0.30452029]

""")

add_sample_code(globals()["round"], r"""
Examples:
    .. code-block:: python

        import paddle

367
        x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5])
368
        out = paddle.round(x)
N
Noel 已提交
369
        print(out)
370 371 372 373 374 375 376 377 378 379
        # [-1. -0.  1.  2.]

""")

add_sample_code(globals()["reciprocal"], r"""
Examples:
    .. code-block:: python

        import paddle

380
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
381
        out = paddle.reciprocal(x)
N
Noel 已提交
382
        print(out)
383 384 385 386 387 388 389 390 391 392
        # [-2.5        -5.         10.          3.33333333]

""")

add_sample_code(globals()["square"], r"""
Examples:
    .. code-block:: python

        import paddle

393
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
394
        out = paddle.square(x)
N
Noel 已提交
395
        print(out)
396 397 398 399
        # [0.16 0.04 0.01 0.09]

""")

400 401 402 403 404 405 406 407 408 409 410 411 412
add_sample_code(globals()["lgamma"], r"""
Examples:
    .. code-block:: python

        import paddle

        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        out = paddle.lgamma(x)
        print(out)
        # [1.31452441, 1.76149750, 2.25271273, 1.09579802]

""")

413 414 415 416 417 418
add_sample_code(globals()["softplus"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F
419

420
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
421 422 423
        out = F.softplus(x) 
        print(out)
        # [0.513015, 0.598139, 0.744397, 0.854355]
424 425 426 427 428 429 430 431 432

""")

add_sample_code(globals()["softsign"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F
433

434
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
435 436 437
        out = F.softsign(x) 
        print(out)
        # [-0.285714, -0.166667, 0.0909091, 0.230769]
438 439 440

""")

441 442 443 444 445 446
__all__ += ['softshrink']

_softshrink_ = generate_layer_fn('softshrink')


def softshrink(x, alpha=None):
447 448 449
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'softshrink')

450 451 452 453 454 455 456 457 458 459 460
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            if name == 'alpha':
                kwargs['lambda'] = val
            else:
                kwargs[name] = val
    return _softshrink_(**kwargs)


461
softshrink.__doc__ = r"""
462 463 464
	:alias_main: paddle.nn.functional.softshrink
	:alias: paddle.nn.functional.softshrink,paddle.nn.functional.activation.softshrink
	:old_api: paddle.fluid.layers.softshrink
S
swtkiwi 已提交
465

466 467 468
:strong:`Softshrink Activation Operator`

..  math::
469 470 471 472 473
    out = \\begin{cases}
            x - \\alpha, \\text{if } x > \\alpha \\\\
            x + \\alpha, \\text{if } x < -\\alpha \\\\
            0,  \\text{otherwise}
          \\end{cases}
474 475 476


Args:
477 478
    x: Input of Softshrink operator, an N-D Tensor, with data type float32, float64 or float16.
    alpha (float): non-negative offset
479 480
    
Returns:
481
    Output of Softshrink operator with the same type of input.
482 483 484 485 486

Examples:
    .. code-block:: python
    
        import paddle.fluid as fluid
487
        data = fluid.data(name="input", shape=[None, 784])
488 489 490
        result = fluid.layers.softshrink(x=data, alpha=0.3)
"""

Y
yuyang18 已提交
491 492 493 494 495
__all__ += ['hard_shrink']

_hard_shrink_ = generate_layer_fn('hard_shrink')


496
@deprecated(since="2.0.0", update_to="paddle.nn.functional.hardshrink")
Y
yuyang18 已提交
497
def hard_shrink(x, threshold=None):
498 499 500
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'hard_shrink')

501
    locals_var = locals().copy()
Y
yuyang18 已提交
502
    kwargs = dict()
503
    for name, val in locals_var.items():
Y
yuyang18 已提交
504 505 506 507 508
        if val is not None:
            kwargs[name] = val
    return _hard_shrink_(**kwargs)


Y
yuyang18 已提交
509
hard_shrink.__doc__ = _hard_shrink_.__doc__ + """
Y
yuyang18 已提交
510 511
Examples:

512
    >>> import paddle.fluid as fluid
Y
yuyang18 已提交
513 514 515
    >>> data = fluid.layers.data(name="input", shape=[784])
    >>> result = fluid.layers.hard_shrink(x=data, threshold=0.3)
"""
Y
yuyang18 已提交
516

W
wopeizl 已提交
517 518 519 520 521
__all__ += ['cumsum']

_cum_sum_ = generate_layer_fn('cumsum')


522 523 524 525
@deprecated(
    since="2.0.0",
    update_to="paddle.cumsum",
    reason="New APIs for Paddle 2.0 are coming.")
W
wopeizl 已提交
526
def cumsum(x, axis=None, exclusive=None, reverse=None):
527
    check_type(x, 'x', (Variable), 'cumsum')
528
    locals_var = locals().copy()
W
wopeizl 已提交
529
    kwargs = dict()
530
    for name, val in locals_var.items():
W
wopeizl 已提交
531 532 533 534 535
        if val is not None:
            kwargs[name] = val
    return _cum_sum_(**kwargs)


L
liu zhengxi 已提交
536
cumsum.__doc__ = """
537 538 539
	:alias_main: paddle.cumsum
	:alias: paddle.cumsum,paddle.tensor.cumsum,paddle.tensor.math.cumsum
	:old_api: paddle.fluid.layers.cumsum
S
swtkiwi 已提交
540

L
liu zhengxi 已提交
541
The cumulative sum of the elements along a given axis. By default, the first element of the result is the same of the first element of the input. If exlusive is true, the first element of the result is 0.
W
wopeizl 已提交
542

L
liu zhengxi 已提交
543 544
Args:
    x (Variable): Input of cumsum operator, the Tensor/LoDTensor needed to be cumsumed. 
T
tianshuo78520a 已提交
545
    axis (int, optional): The dimension to accumulate along. -1 means the last dimension. Default is -1.
L
liu zhengxi 已提交
546 547 548 549 550 551 552 553 554 555 556 557
    exclusive (bool, optional): Whether to perform exclusive cumsum. Default is False.
    reverse (bool, optional): If true, the cumsum is performed in the reversed direction. Default is False.

Returns:
    Variable(Tensor/LoDTensor): The result of cumsum operator, output of cumsum operator. 

Examples:
    .. code-block:: python
        
        import paddle.fluid as fluid
        data = fluid.layers.data(name="input", shape=[32, 784])
        result = fluid.layers.cumsum(data, axis=0)
W
wopeizl 已提交
558
"""
Y
yuyang18 已提交
559 560 561 562 563 564 565

__all__ += ['thresholded_relu']

_thresholded_relu_ = generate_layer_fn('thresholded_relu')


def thresholded_relu(x, threshold=None):
566 567 568
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'thresholded_relu')

569
    locals_var = locals().copy()
Y
yuyang18 已提交
570
    kwargs = dict()
571
    for name, val in locals_var.items():
Y
yuyang18 已提交
572 573 574
        if val is not None:
            kwargs[name] = val

C
chengduo 已提交
575
    return _thresholded_relu_(**kwargs)
Y
yuyang18 已提交
576 577


578
thresholded_relu.__doc__ = r"""
579 580 581
	:alias_main: paddle.nn.functional.thresholded_relu
	:alias: paddle.nn.functional.thresholded_relu,paddle.nn.functional.activation.thresholded_relu
	:old_api: paddle.fluid.layers.thresholded_relu
S
swtkiwi 已提交
582

583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
:strong:`Thresholded ReLU Activation Operator`

Equation:
    ..  math::
        out = \\begin{cases}
            x, &if x > threshold \\\\
            0, &otherwise
            \\end{cases}

Args:
    x(Variable): The input of Thresholded ReLU op, Tensor or LoDTensor, dtype: float32 or float64.
        
    threshold(float, optional): The threshold value. Note that if the arg `threshold` is not set, the threshold in the equation is 1.0.

Returns:

    Variable: The output of Thresholded ReLU op, Tensor or LoDTensor, dtype: float32 or float64, the same as the input, shape: the same as the input.

Y
yuyang18 已提交
601
Examples:
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
    
    .. code-block:: python
    
        # declarative mode
        import numpy as np
        from paddle import fluid
        
        x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
        y = fluid.layers.thresholded_relu(x, threshold=0.1)
        
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        start = fluid.default_startup_program()
        main = fluid.default_main_program()
        
        data = np.random.randn(2, 3).astype("float32")
        exe.run(start)
        
        y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
        
        data
        # array([[ 0.21134382, -1.1805999 ,  0.32876605],
        #        [-1.2210793 , -0.7365624 ,  1.0013918 ]], dtype=float32)
        y_np
        # array([[ 0.21134382, -0.        ,  0.32876605],
        #        [-0.        , -0.        ,  1.0013918 ]], dtype=float32)
Y
yuyang18 已提交
628

629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
    .. code-block:: python
    
        # imperative mode
        import numpy as np
        from paddle import fluid
        import paddle.fluid.dygraph as dg
        
        data = np.random.randn(2, 3).astype("float32")
        place = fluid.CPUPlace()
        with dg.guard(place) as g:
            x = dg.to_variable(data)
            y = fluid.layers.thresholded_relu(x, threshold=0.1)
            y_np = y.numpy()
        data
        # array([[ 0.21134382, -1.1805999 ,  0.32876605],
        #        [-1.2210793 , -0.7365624 ,  1.0013918 ]], dtype=float32)
        y_np
        # array([[ 0.21134382, -0.        ,  0.32876605],
        #        [-0.        , -0.        ,  1.0013918 ]], dtype=float32)
Y
yuyang18 已提交
648
"""
F
Feiyu Chan 已提交
649 650 651 652 653 654

__all__ += ['gelu']

_gelu_ = generate_layer_fn('gelu')


655
@deprecated(since="2.0.0", update_to="paddle.nn.functional.gelu")
656
def gelu(x, approximate=False):
F
Feiyu Chan 已提交
657 658 659 660 661 662 663 664
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            kwargs[name] = val
    return _gelu_(**kwargs)


665
gelu.__doc__ = r"""
F
Feiyu Chan 已提交
666 667 668 669
:strong:`GeLU Activation Operator`
For more details, see [Gaussian Error Linear Units](https://arxiv.org/abs/1606.08415).

Equation:
670 671 672 673 674
    if approximate is True
    ..  math::
        out = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3})))

    else
F
Feiyu Chan 已提交
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
    ..  math::
        out = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}}))

Args:

    x(Variable): The input of GeLU op, Tensor or LoDTensor, dtype: float32 or float64.

Returns:

    Variable: The output of GeLU op, Tensor or LoDTensor, dtype: float32 or float64, the same as the input, shape: the same as the input.

Examples:
    
    .. code-block:: python
    
        # declarative mode
        import numpy as np
        from paddle import fluid
        
        x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
        y = fluid.layers.gelu(x)
        
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        start = fluid.default_startup_program()
        main = fluid.default_main_program()
        
        data = np.random.randn(2, 3).astype("float32")
        exe.run(start)
        
        y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
        
        data
        # array([[ 0.87165993, -1.0541513 , -0.37214822],
        #         [ 0.15647964,  0.32496083,  0.33045998]], dtype=float32)
        y_np
        # array([[ 0.70456535, -0.15380788, -0.13207214],
        #        [ 0.08796856,  0.20387867,  0.2080159 ]], dtype=float32)

    .. code-block:: python
    
        # imperative mode
        import numpy as np
        from paddle import fluid
        import paddle.fluid.dygraph as dg
        
        data = np.random.randn(2, 3).astype("float32")
        place = fluid.CPUPlace()
        with dg.guard(place) as g:
            x = dg.to_variable(data)
            y = fluid.layers.gelu(x)
            y_np = y.numpy()
        data
        # array([[ 0.87165993, -1.0541513 , -0.37214822],
        #        [ 0.15647964,  0.32496083,  0.33045998]], dtype=float32)
        y_np
        # array([[ 0.70456535, -0.15380788, -0.13207214],
        #        [ 0.08796856,  0.20387867,  0.2080159 ]], dtype=float32)
"""
F
Feiyu Chan 已提交
734 735 736 737 738 739

__all__ += ['erf']

_erf_ = generate_layer_fn('erf')


W
WuHaobo 已提交
740
def erf(x, name=None):
F
Feiyu Chan 已提交
741 742 743 744 745 746 747 748
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            kwargs[name] = val
    return _erf_(**kwargs)


749
erf.__doc__ = r"""
F
Feiyu Chan 已提交
750 751 752 753 754 755 756 757 758
:strong:`Erf Operator`
For more details, see [Error function](https://en.wikipedia.org/wiki/Error_function).

Equation:
    ..  math::
        out = \\frac{2}{\\sqrt{\\pi}} \\int_{0}^{x}e^{- \\eta^{2}}d\\eta

Args:

W
WuHaobo 已提交
759
    x (Tensor): The input tensor, it's data type should be float32, float64.
F
Feiyu Chan 已提交
760 761 762

Returns:

W
WuHaobo 已提交
763
    Tensor: The output of Erf op, dtype: float32 or float64, the same as the input, shape: the same as the input.
F
Feiyu Chan 已提交
764 765 766 767 768

Examples:
    
    .. code-block:: python
    
W
WuHaobo 已提交
769
        import paddle
770
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
W
WuHaobo 已提交
771
        out = paddle.erf(x)
N
Noel 已提交
772
        print(out)
W
WuHaobo 已提交
773
        # [-0.42839236 -0.22270259  0.11246292  0.32862676]
F
Feiyu Chan 已提交
774
"""