ops.py 18.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
P
peizhilin 已提交
16
import os
17
from .layer_function_generator import generate_layer_fn, generate_activation_fn, add_sample_code
C
chengduo 已提交
18
from .. import core
19 20
from ..framework import convert_np_dtype_to_dtype_, Variable
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
21
from paddle.utils import deprecated
Y
Yang Yu 已提交
22

23 24 25 26
__deprecated_func_name__ = {
    'tanh_shrink': 'tanhshrink',
    'logsigmoid': 'log_sigmoid'
}
27

28
__activations_noattr__ = [
29
    'sigmoid',
M
minghaoBD 已提交
30
    'silu',
31
    'logsigmoid',
32 33 34
    'tanh_shrink',
    'softplus',
    'softsign',
W
WangXi 已提交
35
    'tanh',
36 37 38
]

__unary_func__ = [
39
    'exp',
40
    'atan',
41
    'sqrt',
Z
zhoukunsheng 已提交
42
    'rsqrt',
43 44 45
    'abs',
    'ceil',
    'floor',
C
add cos  
chengduoZH 已提交
46
    'cos',
J
joejiong 已提交
47
    'tan',
48
    'acos',
C
add sin  
chengduoZH 已提交
49
    'sin',
50
    'sinh',
51
    'asin',
52
    'cosh',
53 54 55
    'round',
    'reciprocal',
    'square',
Y
Yu Yang 已提交
56 57
]

X
Xin Pan 已提交
58
__all__ = []
Y
Yang Yu 已提交
59

Y
Yu Yang 已提交
60
for _OP in set(__all__):
61
    globals()[_OP] = generate_layer_fn(_OP)
Y
yuyang18 已提交
62

S
sneaxiy 已提交
63 64 65 66 67
# It is a hot fix in some unittest using:
#   fluid.layers.scale(x=x, scale=10.0, out=out_var)
# e.g.: test_program_code.py, test_dist_train.py
globals()['_scale'] = generate_layer_fn('scale')

S
sneaxiy 已提交
68 69
globals()['_elementwise_div'] = generate_layer_fn('elementwise_div')

70
__all__ += __activations_noattr__
71
__all__ += __unary_func__
72 73

for _OP in set(__activations_noattr__):
74 75 76
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
77 78
    func = generate_activation_fn(_OP)
    func = deprecated(
79
        since="2.0.0", update_to="paddle.nn.functional.%s" % (_new_OP))(func)
80 81 82
    globals()[_OP] = func

for _OP in set(__unary_func__):
83 84 85
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
86
    func = generate_activation_fn(_OP)
87
    func = deprecated(since="2.0.0", update_to="paddle.%s" % (_new_OP))(func)
88
    globals()[_OP] = func
89

90 91 92 93 94 95 96
add_sample_code(globals()["sigmoid"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F

97
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
98
        out = F.sigmoid(x)
N
Noel 已提交
99
        print(out)
100 101 102 103
        # [0.40131234 0.450166   0.52497919 0.57444252]

""")

M
minghaoBD 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117
add_sample_code(globals()["silu"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F

        x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
        out = F.silu(x)
        print(out)
        # [ 0.7310586 1.7615942 2.8577224, 3.9280552 ]

""")

118 119 120 121 122 123 124
add_sample_code(globals()["logsigmoid"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F

125
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
126
        out = F.log_sigmoid(x)
N
Noel 已提交
127
        print(out)
128 129 130 131 132 133 134 135 136 137
        # [-0.91301525 -0.79813887 -0.64439666 -0.55435524]

""")

add_sample_code(globals()["exp"], r"""
Examples:
    .. code-block:: python

        import paddle

138
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
139
        out = paddle.exp(x)
N
Noel 已提交
140
        print(out)
141 142 143 144 145 146 147 148 149 150
        # [0.67032005 0.81873075 1.10517092 1.34985881]

""")

add_sample_code(globals()["tanh"], r"""
Examples:
    .. code-block:: python

        import paddle

151
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
152
        out = paddle.tanh(x)
N
Noel 已提交
153
        print(out)
154 155 156 157 158 159 160 161 162 163
        # [-0.37994896 -0.19737532  0.09966799  0.29131261]

""")

add_sample_code(globals()["atan"], r"""
Examples:
    .. code-block:: python

        import paddle

164
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
165
        out = paddle.atan(x)
N
Noel 已提交
166
        print(out)
167 168 169 170 171 172 173 174 175 176
        # [-0.38050638 -0.19739556  0.09966865  0.29145679]

""")

add_sample_code(globals()["tanh_shrink"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F
177

178
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
179 180 181
        out = F.tanhshrink(x) 
        print(out)
        # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
182 183 184 185 186 187 188 189 190

""")

add_sample_code(globals()["sqrt"], r"""
Examples:
    .. code-block:: python

        import paddle

191
        x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
192
        out = paddle.sqrt(x)
N
Noel 已提交
193
        print(out)
194 195 196 197 198 199 200 201 202 203
        # [0.31622777 0.4472136  0.54772256 0.63245553]

""")

add_sample_code(globals()["rsqrt"], r"""
Examples:
    .. code-block:: python

        import paddle

204
        x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
205
        out = paddle.rsqrt(x)
206
        print(out)
207 208 209 210 211 212 213 214 215 216
        # [3.16227766 2.23606798 1.82574186 1.58113883]

""")

add_sample_code(globals()["abs"], r"""
Examples:
    .. code-block:: python

        import paddle

217
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
218
        out = paddle.abs(x)
N
Noel 已提交
219
        print(out)
220 221 222 223 224 225 226 227 228 229
        # [0.4 0.2 0.1 0.3]

""")

add_sample_code(globals()["ceil"], r"""
Examples:
    .. code-block:: python

        import paddle

230
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
231
        out = paddle.ceil(x)
N
Noel 已提交
232
        print(out)
233 234 235 236 237 238 239 240 241 242
        # [-0. -0.  1.  1.]

""")

add_sample_code(globals()["floor"], r"""
Examples:
    .. code-block:: python

        import paddle

243
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
244
        out = paddle.floor(x)
N
Noel 已提交
245
        print(out)
246 247 248 249 250 251 252 253 254 255
        # [-1. -1.  0.  0.]

""")

add_sample_code(globals()["cos"], r"""
Examples:
    .. code-block:: python

        import paddle

256
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
257
        out = paddle.cos(x)
N
Noel 已提交
258
        print(out)
259 260 261 262
        # [0.92106099 0.98006658 0.99500417 0.95533649]

""")

J
joejiong 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275
add_sample_code(globals()["tan"], r"""
Examples:
    .. code-block:: python

        import paddle

        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        out = paddle.tan(x)
        print(out)
        # [-0.42279324, -0.20271005, 0.10033467, 0.30933627]

""")

276 277 278 279 280 281
add_sample_code(globals()["acos"], r"""
Examples:
    .. code-block:: python

        import paddle

282
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
283
        out = paddle.acos(x)
N
Noel 已提交
284
        print(out)
285 286 287 288 289 290 291 292 293 294
        # [1.98231317 1.77215425 1.47062891 1.26610367]

""")

add_sample_code(globals()["sin"], r"""
Examples:
    .. code-block:: python

        import paddle

295
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
296
        out = paddle.sin(x)
N
Noel 已提交
297
        print(out)
298 299 300 301 302 303 304 305 306 307
        # [-0.38941834 -0.19866933  0.09983342  0.29552021]

""")

add_sample_code(globals()["asin"], r"""
Examples:
    .. code-block:: python

        import paddle

308
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
309
        out = paddle.asin(x)
N
Noel 已提交
310
        print(out)
311 312 313 314 315 316 317 318 319 320
        # [-0.41151685 -0.20135792  0.10016742  0.30469265]

""")

add_sample_code(globals()["cosh"], r"""
Examples:
    .. code-block:: python

        import paddle

321
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
322
        out = paddle.cosh(x)
N
Noel 已提交
323
        print(out)
324 325 326 327 328 329 330 331 332 333
        # [1.08107237 1.02006676 1.00500417 1.04533851]

""")

add_sample_code(globals()["sinh"], r"""
Examples:
    .. code-block:: python

        import paddle

334
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
335
        out = paddle.sinh(x)
N
Noel 已提交
336
        print(out)
337 338 339 340 341 342 343 344 345 346
        # [-0.41075233 -0.201336    0.10016675  0.30452029]

""")

add_sample_code(globals()["round"], r"""
Examples:
    .. code-block:: python

        import paddle

347
        x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5])
348
        out = paddle.round(x)
N
Noel 已提交
349
        print(out)
350 351 352 353 354 355 356 357 358 359
        # [-1. -0.  1.  2.]

""")

add_sample_code(globals()["reciprocal"], r"""
Examples:
    .. code-block:: python

        import paddle

360
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
361
        out = paddle.reciprocal(x)
N
Noel 已提交
362
        print(out)
363 364 365 366 367 368 369 370 371 372
        # [-2.5        -5.         10.          3.33333333]

""")

add_sample_code(globals()["square"], r"""
Examples:
    .. code-block:: python

        import paddle

373
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
374
        out = paddle.square(x)
N
Noel 已提交
375
        print(out)
376 377 378 379 380 381 382 383 384 385
        # [0.16 0.04 0.01 0.09]

""")

add_sample_code(globals()["softplus"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F
386

387
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
388 389 390
        out = F.softplus(x) 
        print(out)
        # [0.513015, 0.598139, 0.744397, 0.854355]
391 392 393 394 395 396 397 398 399

""")

add_sample_code(globals()["softsign"], r"""
Examples:
    .. code-block:: python

        import paddle
        import paddle.nn.functional as F
400

401
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
402 403 404
        out = F.softsign(x) 
        print(out)
        # [-0.285714, -0.166667, 0.0909091, 0.230769]
405 406 407

""")

408 409 410 411 412 413
__all__ += ['softshrink']

_softshrink_ = generate_layer_fn('softshrink')


def softshrink(x, alpha=None):
414 415 416
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'softshrink')

417 418 419 420 421 422 423 424 425 426 427
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            if name == 'alpha':
                kwargs['lambda'] = val
            else:
                kwargs[name] = val
    return _softshrink_(**kwargs)


428
softshrink.__doc__ = r"""
429 430 431
	:alias_main: paddle.nn.functional.softshrink
	:alias: paddle.nn.functional.softshrink,paddle.nn.functional.activation.softshrink
	:old_api: paddle.fluid.layers.softshrink
S
swtkiwi 已提交
432

433 434 435
:strong:`Softshrink Activation Operator`

..  math::
436 437 438 439 440
    out = \\begin{cases}
            x - \\alpha, \\text{if } x > \\alpha \\\\
            x + \\alpha, \\text{if } x < -\\alpha \\\\
            0,  \\text{otherwise}
          \\end{cases}
441 442 443


Args:
444 445
    x: Input of Softshrink operator, an N-D Tensor, with data type float32, float64 or float16.
    alpha (float): non-negative offset
446 447
    
Returns:
448
    Output of Softshrink operator with the same type of input.
449 450 451 452 453

Examples:
    .. code-block:: python
    
        import paddle.fluid as fluid
454
        data = fluid.data(name="input", shape=[None, 784])
455 456 457
        result = fluid.layers.softshrink(x=data, alpha=0.3)
"""

Y
yuyang18 已提交
458 459 460 461 462
__all__ += ['hard_shrink']

_hard_shrink_ = generate_layer_fn('hard_shrink')


463
@deprecated(since="2.0.0", update_to="paddle.nn.functional.hardshrink")
Y
yuyang18 已提交
464
def hard_shrink(x, threshold=None):
465 466 467
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'hard_shrink')

468
    locals_var = locals().copy()
Y
yuyang18 已提交
469
    kwargs = dict()
470
    for name, val in locals_var.items():
Y
yuyang18 已提交
471 472 473 474 475
        if val is not None:
            kwargs[name] = val
    return _hard_shrink_(**kwargs)


Y
yuyang18 已提交
476
hard_shrink.__doc__ = _hard_shrink_.__doc__ + """
Y
yuyang18 已提交
477 478
Examples:

479
    >>> import paddle.fluid as fluid
Y
yuyang18 已提交
480 481 482
    >>> data = fluid.layers.data(name="input", shape=[784])
    >>> result = fluid.layers.hard_shrink(x=data, threshold=0.3)
"""
Y
yuyang18 已提交
483

W
wopeizl 已提交
484 485 486 487 488
__all__ += ['cumsum']

_cum_sum_ = generate_layer_fn('cumsum')


489 490 491 492
@deprecated(
    since="2.0.0",
    update_to="paddle.cumsum",
    reason="New APIs for Paddle 2.0 are coming.")
W
wopeizl 已提交
493
def cumsum(x, axis=None, exclusive=None, reverse=None):
494
    check_type(x, 'x', (Variable), 'cumsum')
495
    locals_var = locals().copy()
W
wopeizl 已提交
496
    kwargs = dict()
497
    for name, val in locals_var.items():
W
wopeizl 已提交
498 499 500 501 502
        if val is not None:
            kwargs[name] = val
    return _cum_sum_(**kwargs)


L
liu zhengxi 已提交
503
cumsum.__doc__ = """
504 505 506
	:alias_main: paddle.cumsum
	:alias: paddle.cumsum,paddle.tensor.cumsum,paddle.tensor.math.cumsum
	:old_api: paddle.fluid.layers.cumsum
S
swtkiwi 已提交
507

L
liu zhengxi 已提交
508
The cumulative sum of the elements along a given axis. By default, the first element of the result is the same of the first element of the input. If exlusive is true, the first element of the result is 0.
W
wopeizl 已提交
509

L
liu zhengxi 已提交
510 511
Args:
    x (Variable): Input of cumsum operator, the Tensor/LoDTensor needed to be cumsumed. 
T
tianshuo78520a 已提交
512
    axis (int, optional): The dimension to accumulate along. -1 means the last dimension. Default is -1.
L
liu zhengxi 已提交
513 514 515 516 517 518 519 520 521 522 523 524
    exclusive (bool, optional): Whether to perform exclusive cumsum. Default is False.
    reverse (bool, optional): If true, the cumsum is performed in the reversed direction. Default is False.

Returns:
    Variable(Tensor/LoDTensor): The result of cumsum operator, output of cumsum operator. 

Examples:
    .. code-block:: python
        
        import paddle.fluid as fluid
        data = fluid.layers.data(name="input", shape=[32, 784])
        result = fluid.layers.cumsum(data, axis=0)
W
wopeizl 已提交
525
"""
Y
yuyang18 已提交
526 527 528 529 530 531 532

__all__ += ['thresholded_relu']

_thresholded_relu_ = generate_layer_fn('thresholded_relu')


def thresholded_relu(x, threshold=None):
533 534 535
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'thresholded_relu')

536
    locals_var = locals().copy()
Y
yuyang18 已提交
537
    kwargs = dict()
538
    for name, val in locals_var.items():
Y
yuyang18 已提交
539 540 541
        if val is not None:
            kwargs[name] = val

C
chengduo 已提交
542
    return _thresholded_relu_(**kwargs)
Y
yuyang18 已提交
543 544


545
thresholded_relu.__doc__ = r"""
546 547 548
	:alias_main: paddle.nn.functional.thresholded_relu
	:alias: paddle.nn.functional.thresholded_relu,paddle.nn.functional.activation.thresholded_relu
	:old_api: paddle.fluid.layers.thresholded_relu
S
swtkiwi 已提交
549

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
:strong:`Thresholded ReLU Activation Operator`

Equation:
    ..  math::
        out = \\begin{cases}
            x, &if x > threshold \\\\
            0, &otherwise
            \\end{cases}

Args:
    x(Variable): The input of Thresholded ReLU op, Tensor or LoDTensor, dtype: float32 or float64.
        
    threshold(float, optional): The threshold value. Note that if the arg `threshold` is not set, the threshold in the equation is 1.0.

Returns:

    Variable: The output of Thresholded ReLU op, Tensor or LoDTensor, dtype: float32 or float64, the same as the input, shape: the same as the input.

Y
yuyang18 已提交
568
Examples:
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
    
    .. code-block:: python
    
        # declarative mode
        import numpy as np
        from paddle import fluid
        
        x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
        y = fluid.layers.thresholded_relu(x, threshold=0.1)
        
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        start = fluid.default_startup_program()
        main = fluid.default_main_program()
        
        data = np.random.randn(2, 3).astype("float32")
        exe.run(start)
        
        y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
        
        data
        # array([[ 0.21134382, -1.1805999 ,  0.32876605],
        #        [-1.2210793 , -0.7365624 ,  1.0013918 ]], dtype=float32)
        y_np
        # array([[ 0.21134382, -0.        ,  0.32876605],
        #        [-0.        , -0.        ,  1.0013918 ]], dtype=float32)
Y
yuyang18 已提交
595

596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
    .. code-block:: python
    
        # imperative mode
        import numpy as np
        from paddle import fluid
        import paddle.fluid.dygraph as dg
        
        data = np.random.randn(2, 3).astype("float32")
        place = fluid.CPUPlace()
        with dg.guard(place) as g:
            x = dg.to_variable(data)
            y = fluid.layers.thresholded_relu(x, threshold=0.1)
            y_np = y.numpy()
        data
        # array([[ 0.21134382, -1.1805999 ,  0.32876605],
        #        [-1.2210793 , -0.7365624 ,  1.0013918 ]], dtype=float32)
        y_np
        # array([[ 0.21134382, -0.        ,  0.32876605],
        #        [-0.        , -0.        ,  1.0013918 ]], dtype=float32)
Y
yuyang18 已提交
615
"""
F
Feiyu Chan 已提交
616 617 618 619 620 621

__all__ += ['gelu']

_gelu_ = generate_layer_fn('gelu')


622
@deprecated(since="2.0.0", update_to="paddle.nn.functional.gelu")
623
def gelu(x, approximate=False):
F
Feiyu Chan 已提交
624 625 626 627 628 629 630 631
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            kwargs[name] = val
    return _gelu_(**kwargs)


632
gelu.__doc__ = r"""
F
Feiyu Chan 已提交
633 634 635 636
:strong:`GeLU Activation Operator`
For more details, see [Gaussian Error Linear Units](https://arxiv.org/abs/1606.08415).

Equation:
637 638 639 640 641
    if approximate is True
    ..  math::
        out = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3})))

    else
F
Feiyu Chan 已提交
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
    ..  math::
        out = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}}))

Args:

    x(Variable): The input of GeLU op, Tensor or LoDTensor, dtype: float32 or float64.

Returns:

    Variable: The output of GeLU op, Tensor or LoDTensor, dtype: float32 or float64, the same as the input, shape: the same as the input.

Examples:
    
    .. code-block:: python
    
        # declarative mode
        import numpy as np
        from paddle import fluid
        
        x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
        y = fluid.layers.gelu(x)
        
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        start = fluid.default_startup_program()
        main = fluid.default_main_program()
        
        data = np.random.randn(2, 3).astype("float32")
        exe.run(start)
        
        y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
        
        data
        # array([[ 0.87165993, -1.0541513 , -0.37214822],
        #         [ 0.15647964,  0.32496083,  0.33045998]], dtype=float32)
        y_np
        # array([[ 0.70456535, -0.15380788, -0.13207214],
        #        [ 0.08796856,  0.20387867,  0.2080159 ]], dtype=float32)

    .. code-block:: python
    
        # imperative mode
        import numpy as np
        from paddle import fluid
        import paddle.fluid.dygraph as dg
        
        data = np.random.randn(2, 3).astype("float32")
        place = fluid.CPUPlace()
        with dg.guard(place) as g:
            x = dg.to_variable(data)
            y = fluid.layers.gelu(x)
            y_np = y.numpy()
        data
        # array([[ 0.87165993, -1.0541513 , -0.37214822],
        #        [ 0.15647964,  0.32496083,  0.33045998]], dtype=float32)
        y_np
        # array([[ 0.70456535, -0.15380788, -0.13207214],
        #        [ 0.08796856,  0.20387867,  0.2080159 ]], dtype=float32)
"""
F
Feiyu Chan 已提交
701 702 703 704 705 706

__all__ += ['erf']

_erf_ = generate_layer_fn('erf')


W
WuHaobo 已提交
707
def erf(x, name=None):
F
Feiyu Chan 已提交
708 709 710 711 712 713 714 715
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            kwargs[name] = val
    return _erf_(**kwargs)


716
erf.__doc__ = r"""
F
Feiyu Chan 已提交
717 718 719 720 721 722 723 724 725
:strong:`Erf Operator`
For more details, see [Error function](https://en.wikipedia.org/wiki/Error_function).

Equation:
    ..  math::
        out = \\frac{2}{\\sqrt{\\pi}} \\int_{0}^{x}e^{- \\eta^{2}}d\\eta

Args:

W
WuHaobo 已提交
726
    x (Tensor): The input tensor, it's data type should be float32, float64.
F
Feiyu Chan 已提交
727 728 729

Returns:

W
WuHaobo 已提交
730
    Tensor: The output of Erf op, dtype: float32 or float64, the same as the input, shape: the same as the input.
F
Feiyu Chan 已提交
731 732 733 734 735

Examples:
    
    .. code-block:: python
    
W
WuHaobo 已提交
736
        import paddle
737
        x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
W
WuHaobo 已提交
738
        out = paddle.erf(x)
N
Noel 已提交
739
        print(out)
W
WuHaobo 已提交
740
        # [-0.42839236 -0.22270259  0.11246292  0.32862676]
F
Feiyu Chan 已提交
741
"""