ops.py 32.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
GGBond8488 已提交
15 16 17

from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only

18
from .. import _C_ops
19
from ..fluid.data_feeder import check_variable_and_dtype
20
from ..framework import LayerHelper, in_dynamic_mode
21
from .layer_function_generator import (
22
    add_sample_code,
23 24
    generate_activation_fn,
    generate_inplace_fn,
25
    generate_layer_fn,
26
)
27 28 29

__deprecated_func_name__ = {
    'tanh_shrink': 'tanhshrink',
30
    'logsigmoid': 'log_sigmoid',
31 32 33 34 35 36 37 38 39 40 41
}

__activations_noattr__ = [
    'silu',
    'logsigmoid',
    'tanh_shrink',
    'softplus',
    'softsign',
    'tanh',
]

42
__unary_func__ = ['abs']
43 44 45 46 47 48 49 50 51

__inplace_unary_func__ = [
    'exp_',
    'sqrt_',
    'rsqrt_',
    'ceil_',
    'floor_',
    'round_',
    'reciprocal_',
52
    'sigmoid_',
G
GGBond8488 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
    'abs_',
    'sin_',
    'sinh_',
    'asin_',
    'asinh_',
    'cos_',
    'cosh_',
    'acos_',
    'acosh_',
    'tan_',
    'atan_',
    'atanh_',
    'expm1_',
    'erf_',
    'square_',
68 69 70 71 72
]

__all__ = []

# It is a hot fix in some unittest using:
2
201716010711 已提交
73
#   paddle.scale(x=x, scale=10.0, out=out_var)
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
# e.g.: test_program_code.py, test_dist_train.py
globals()['_scale'] = generate_layer_fn('scale')

globals()['_elementwise_div'] = generate_layer_fn('elementwise_div')

for _OP in set(__activations_noattr__):
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
    _func = generate_activation_fn(_OP)
    globals()[_OP] = _func

for _OP in set(__unary_func__):
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
    _func = generate_activation_fn(_OP)
    globals()[_OP] = _func

for _OP in set(__inplace_unary_func__):
    _new_OP = _OP
    if _OP in __deprecated_func_name__:
        _new_OP = __deprecated_func_name__[_OP]
G
GGBond8488 已提交
97 98 99
    func = generate_inplace_fn(_OP)
    func.__module__ = __name__
    _func = inplace_apis_in_dygraph_only(func)
100 101
    globals()[_OP] = _func

102
add_sample_code(
103 104
    globals()["silu"],
    r"""
105 106
Examples:
    .. code-block:: python
107 108 109 110 111 112 113 114 115

        >>> import paddle
        >>> import paddle.nn.functional as F

        >>> x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
        >>> out = F.silu(x)
        >>> print(out)
        Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
        [0.73105860, 1.76159406, 2.85772228, 3.92805505])
116 117
""",
)
118

119
add_sample_code(
120 121
    globals()["logsigmoid"],
    r"""
122 123
Examples:
    .. code-block:: python
124 125 126 127 128 129 130 131 132

        >>> import paddle
        >>> import paddle.nn.functional as F

        >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        >>> out = F.log_sigmoid(x)
        >>> print(out)
        Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
        [-0.91301525, -0.79813892, -0.64439666, -0.55435526])
133 134
""",
)
135

136
add_sample_code(
137 138
    globals()["tanh"],
    r"""
139 140 141
Examples:
    .. code-block:: python

142
        >>> import paddle
143

144 145 146 147 148
        >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        >>> out = paddle.tanh(x)
        >>> print(out)
        Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
        [-0.37994900, -0.19737528,  0.09966799,  0.29131261])
149 150
""",
)
151

152
add_sample_code(
153 154
    globals()["tanh_shrink"],
    r"""
155 156 157
Examples:
    .. code-block:: python

158 159
        >>> import paddle
        >>> import paddle.nn.functional as F
160

161 162 163 164 165
        >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        >>> out = F.tanhshrink(x)
        >>> print(out)
        Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
        [-0.02005100, -0.00262472,  0.00033201,  0.00868741])
166 167
""",
)
168

169
add_sample_code(
170 171
    globals()["abs"],
    r"""
172 173 174
Examples:
    .. code-block:: python

175
        >>> import paddle
176

177 178 179 180 181
        >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        >>> out = paddle.abs(x)
        >>> print(out)
        Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
        [0.40000001, 0.20000000, 0.10000000, 0.30000001])
182 183
""",
)
184

185
add_sample_code(
186
    globals()["softplus"],
187
    r"""
188 189 190
Examples:
    .. code-block:: python

191 192
        >>> import paddle
        >>> import paddle.nn.functional as F
193

194 195 196 197 198
        >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        >>> out = F.softplus(x)
        >>> print(out)
        Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
        [0.51301527, 0.59813893, 0.74439669, 0.85435522])
199 200
""",
)
201

202
add_sample_code(
203
    globals()["softsign"],
204
    r"""
205 206 207
Examples:
    .. code-block:: python

208 209
        >>> import paddle
        >>> import paddle.nn.functional as F
210

211 212 213 214 215
        >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        >>> out = F.softsign(x)
        >>> print(out)
        Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
        [-0.28571430, -0.16666666,  0.09090909,  0.23076925])
216 217
""",
)
218 219


220 221 222
def acos(x, name=None):
    """
    Acos Activation Operator.
223

224 225
    .. math::
        out = cos^{-1}(x)
226

227 228 229
    Args:
        x (Tensor): Input of Acos operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
230

231 232
    Returns:
        Tensor. Output of Acos operator, a Tensor with shape same as input.
233

234 235
    Examples:
        .. code-block:: python
236

237
            >>> import paddle
238

239 240 241 242 243
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.acos(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.98231316, 1.77215421, 1.47062886, 1.26610363])
244
    """
245
    if in_dynamic_mode():
246
        return _C_ops.acos(x)
247 248
    else:
        check_variable_and_dtype(
249
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'acos'
250 251 252 253 254
        )
        helper = LayerHelper('acos', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='acos', inputs={"X": x}, outputs={"Out": out})
        return out
255 256


257 258 259
def acosh(x, name=None):
    """
    Acosh Activation Operator.
260

261 262
    .. math::
       out = acosh(x)
263

264 265 266
    Args:
        x (Tensor): Input of Acosh operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
267

268 269
    Returns:
        Tensor. Output of Acosh operator, a Tensor with shape same as input.
270

271 272
    Examples:
        .. code-block:: python
273

274
            >>> import paddle
275

276 277 278 279 280
            >>> x = paddle.to_tensor([1., 3., 4., 5.])
            >>> out = paddle.acosh(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.        , 1.76274717, 2.06343699, 2.29243159])
281
    """
282
    if in_dynamic_mode():
283
        return _C_ops.acosh(x)
284 285
    else:
        check_variable_and_dtype(
286
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'acosh'
287 288 289 290 291
        )
        helper = LayerHelper('acosh', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='acosh', inputs={"X": x}, outputs={"Out": out})
        return out
292 293


294 295 296
def asin(x, name=None):
    """
    Arcsine Operator.
297

298 299
    .. math::
       out = sin^{-1}(x)
300

301 302 303
    Args:
        x (Tensor): Input of Asin operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
304

305 306
    Returns:
        Tensor. Same shape and dtype as input.
307

308 309
    Examples:
        .. code-block:: python
310

311
            >>> import paddle
312

313 314 315 316 317
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.asin(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-0.41151685, -0.20135793,  0.10016742,  0.30469266])
318
    """
319
    if in_dynamic_mode():
320
        return _C_ops.asin(x)
321 322
    else:
        check_variable_and_dtype(
323
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'asin'
324 325 326 327 328
        )
        helper = LayerHelper('asin', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='asin', inputs={"X": x}, outputs={"Out": out})
        return out
329 330


331 332 333
def asinh(x, name=None):
    """
    Asinh Activation Operator.
334

335 336
    .. math::
       out = asinh(x)
337

338 339 340
    Args:
        x (Tensor): Input of Asinh operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
341

342 343
    Returns:
        Tensor. Output of Asinh operator, a Tensor with shape same as input.
344

345 346
    Examples:
        .. code-block:: python
347

348
            >>> import paddle
349

350 351 352 353 354
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.asinh(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-0.39003533, -0.19869010,  0.09983408,  0.29567307])
355
    """
356
    if in_dynamic_mode():
357
        return _C_ops.asinh(x)
358 359
    else:
        check_variable_and_dtype(
360
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'asinh'
361 362 363 364 365
        )
        helper = LayerHelper('asinh', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='asinh', inputs={"X": x}, outputs={"Out": out})
        return out
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384


def atan(x, name=None):
    """
    Arctangent Operator.

    .. math::
       out = tan^{-1}(x)

    Args:
        x (Tensor): Input of Atan operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Same shape and dtype as input x.

    Examples:
        .. code-block:: python

385
            >>> import paddle
386

387 388 389 390 391
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.atan(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-0.38050640, -0.19739556,  0.09966865,  0.29145682])
392
    """
393
    if in_dynamic_mode():
394
        return _C_ops.atan(x)
395 396
    else:
        check_variable_and_dtype(
397
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'atan'
398 399 400 401 402
        )
        helper = LayerHelper('atan', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='atan', inputs={"X": x}, outputs={"Out": out})
        return out
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421


def atanh(x, name=None):
    """
    Atanh Activation Operator.

    .. math::
       out = atanh(x)

    Args:
        x (Tensor): Input of Atan operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Atanh operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

422
            >>> import paddle
423

424 425 426 427 428
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.atanh(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-0.42364895, -0.20273255,  0.10033534,  0.30951962])
429
    """
430
    if in_dynamic_mode():
431
        return _C_ops.atanh(x)
432 433
    else:
        check_variable_and_dtype(
434
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'atanh'
435 436 437 438 439
        )
        helper = LayerHelper('atanh', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='atanh', inputs={"X": x}, outputs={"Out": out})
        return out
440 441


442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
def ceil(x, name=None):
    """

    Ceil Operator. Computes ceil of x element-wise.

    .. math::
        out = \\left \\lceil x \\right \\rceil

    Args:
        x (Tensor): Input of Ceil operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Ceil operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

460
            >>> import paddle
461

462 463 464 465 466
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.ceil(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-0., -0., 1. , 1. ])
467
    """
468
    if in_dynamic_mode():
469
        return _C_ops.ceil(x)
470 471
    else:
        check_variable_and_dtype(
472
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'ceil'
473 474 475 476 477
        )
        helper = LayerHelper('ceil', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='ceil', inputs={"X": x}, outputs={"Out": out})
        return out
478 479


480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
def cos(x, name=None):
    """
    Cosine Operator. Computes cosine of x element-wise.

    Input range is `(-inf, inf)` and output range is `[-1,1]`.

    .. math::
       out = cos(x)

    Args:
        x (Tensor): Input of Cos operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Cos operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

499
            >>> import paddle
500

501 502 503 504 505
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.cos(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.92106098, 0.98006660, 0.99500418, 0.95533651])
506
    """
507
    if in_dynamic_mode():
508
        return _C_ops.cos(x)
509 510
    else:
        check_variable_and_dtype(
511 512 513 514
            x,
            'x',
            ['float16', 'float32', 'float64', 'complex64', 'complex128'],
            'cos',
515 516 517 518 519
        )
        helper = LayerHelper('cos', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='cos', inputs={"X": x}, outputs={"Out": out})
        return out
520 521 522 523 524 525 526 527


def cosh(x, name=None):
    """
    Cosh Activation Operator.

    Input range `(-inf, inf)`, output range `(1, inf)`.

528 529
    .. math::
       out = \\frac{exp(x)+exp(-x)}{2}
530

531 532 533
    Args:
        x (Tensor): Input of Cosh operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
534

535 536
    Returns:
        Tensor. Output of Cosh operator, a Tensor with shape same as input.
537

538 539
    Examples:
        .. code-block:: python
540

541
            >>> import paddle
542

543 544 545 546 547
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.cosh(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.08107233, 1.02006674, 1.00500417, 1.04533851])
548
    """
549
    if in_dynamic_mode():
550
        return _C_ops.cosh(x)
551 552
    else:
        check_variable_and_dtype(
553
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'cosh'
554 555 556 557 558
        )
        helper = LayerHelper('cosh', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='cosh', inputs={"X": x}, outputs={"Out": out})
        return out
559

560 561 562 563 564 565 566 567 568 569

def exp(x, name=None):
    """

    Computes exp of x element-wise with a natural number `e` as the base.

    .. math::
        out = e^x

    Args:
570
        x (Tensor): Input of Exp operator, an N-D Tensor, with data type int32, int64, float32, float64 or float16.
571 572 573 574 575 576 577 578
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Exp operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

579
            >>> import paddle
580

581 582 583 584 585
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.exp(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.67032003, 0.81873077, 1.10517097, 1.34985888])
586
    """
587
    if in_dynamic_mode():
588
        return _C_ops.exp(x)
589 590 591 592 593 594 595
    else:
        check_variable_and_dtype(
            x,
            'x',
            [
                'int32',
                'int64',
596
                'uint16',
597 598 599 600 601 602 603 604 605 606 607 608
                'float16',
                'float32',
                'float64',
                'complex64',
                'complex128',
            ],
            'exp',
        )
        helper = LayerHelper('exp', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='exp', inputs={"X": x}, outputs={"Out": out})
        return out
609 610


611 612 613 614 615 616 617 618 619
def expm1(x, name=None):
    """

    Expm1 Operator. Computes expm1 of x element-wise with a natural number :math:`e` as the base.

    .. math::
        out = e^x - 1

    Args:
620
        x (Tensor): Input of Expm1 operator, an N-D Tensor, with data type int32, int64, float32, float64 or float16.
621 622 623 624 625 626 627 628
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Expm1 operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

629
            >>> import paddle
630

631 632 633 634 635
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.expm1(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-0.32967997, -0.18126924,  0.10517092,  0.34985882])
636
    """
637
    if in_dynamic_mode():
638
        return _C_ops.expm1(x)
639 640
    else:
        check_variable_and_dtype(
641 642 643 644
            x,
            'x',
            ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'],
            'expm1',
645 646 647 648 649
        )
        helper = LayerHelper('expm1', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='expm1', inputs={"X": x}, outputs={"Out": out})
        return out
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669


def floor(x, name=None):
    """

    Floor Activation Operator. Computes floor of x element-wise.

    .. math::
        out = \\lfloor x \\rfloor

    Args:
        x (Tensor): Input of Floor operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Floor operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

670
            >>> import paddle
671

672 673 674 675 676
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.floor(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-1., -1.,  0.,  0.])
677
    """
678
    if in_dynamic_mode():
679
        return _C_ops.floor(x)
680 681
    else:
        check_variable_and_dtype(
682
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'floor'
683 684 685 686 687
        )
        helper = LayerHelper('floor', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='floor', inputs={"X": x}, outputs={"Out": out})
        return out
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707


def reciprocal(x, name=None):
    """

    Reciprocal Activation Operator.

    .. math::
        out = \\frac{1}{x}

    Args:
        x (Tensor): Input of Reciprocal operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Reciprocal operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

708
            >>> import paddle
709

710 711 712 713 714
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.reciprocal(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-2.50000000, -5.        ,  10.       ,  3.33333325])
715
    """
716
    if in_dynamic_mode():
717
        return _C_ops.reciprocal(x)
718 719
    else:
        check_variable_and_dtype(
720
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'reciprocal'
721 722 723 724 725 726 727
        )
        helper = LayerHelper('reciprocal', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reciprocal', inputs={"X": x}, outputs={"Out": out}
        )
        return out
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754


def round(x, name=None):
    """

    Round the values in the input to the nearest integer value.

    .. code-block:: text

        input:
          x.shape = [4]
          x.data = [1.2, -0.9, 3.4, 0.9]

        output:
          out.shape = [4]
          out.data = [1., -1., 3., 1.]

    Args:
        x (Tensor): Input of Round operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Round operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

755
            >>> import paddle
756

757 758 759 760 761
            >>> x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5])
            >>> out = paddle.round(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-1., -0.,  1.,  2.])
762
    """
763
    if in_dynamic_mode():
764
        return _C_ops.round(x)
765 766
    else:
        check_variable_and_dtype(
767
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'round'
768 769 770 771 772
        )
        helper = LayerHelper('round', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='round', inputs={"X": x}, outputs={"Out": out})
        return out
773 774


775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
def rsqrt(x, name=None):
    """
    Rsqrt Activation Operator.

    Please make sure input is legal in case of numeric errors.

    .. math::
       out = \\frac{1}{\\sqrt{x}}

    Args:
        x (Tensor): Input of Rsqrt operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Rsqrt operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

794
            >>> import paddle
795

796 797 798 799 800
            >>> x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
            >>> out = paddle.rsqrt(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [3.16227770, 2.23606801, 1.82574177, 1.58113885])
801
    """
802
    if in_dynamic_mode():
803
        return _C_ops.rsqrt(x)
804 805
    else:
        check_variable_and_dtype(
806
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'rsqrt'
807 808 809 810 811
        )
        helper = LayerHelper('rsqrt', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='rsqrt', inputs={"X": x}, outputs={"Out": out})
        return out
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830


def sigmoid(x, name=None):
    """
    Sigmoid Activation.

    .. math::
       out = \\frac{1}{1 + e^{-x}}

    Args:
        x (Tensor): Input of Sigmoid operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Sigmoid operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

831 832
            >>> import paddle
            >>> import paddle.nn.functional as F
833

834 835 836 837 838
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = F.sigmoid(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.40131235, 0.45016602, 0.52497917, 0.57444251])
839
    """
840
    if in_dynamic_mode():
841
        return _C_ops.sigmoid(x)
842 843
    else:
        check_variable_and_dtype(
Z
zxcd 已提交
844
            x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'sigmoid'
845 846 847 848 849
        )
        helper = LayerHelper('sigmoid', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='sigmoid', inputs={"X": x}, outputs={"Out": out})
        return out
850 851


852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
def sin(x, name=None):
    """
    Sine Activation Operator.

    .. math::
       out = sin(x)

    Args:
        x (Tensor): Input of Sin operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Sin operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

869
            >>> import paddle
870

871 872 873 874 875
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.sin(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-0.38941833, -0.19866933,  0.09983342,  0.29552022])
876
    """
877
    if in_dynamic_mode():
878
        return _C_ops.sin(x)
879 880
    else:
        check_variable_and_dtype(
881 882 883 884 885 886 887 888 889 890 891
            x,
            'x',
            [
                'float16',
                'uint16',
                'float32',
                'float64',
                'complex64',
                'complex128',
            ],
            'sin',
892 893 894 895 896
        )
        helper = LayerHelper('sin', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='sin', inputs={"X": x}, outputs={"Out": out})
        return out
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915


def sinh(x, name=None):
    """
    Sinh Activation Operator.

    .. math::
       out = sinh(x)

    Args:
        x (Tensor): Input of Sinh operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Sinh operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

916
            >>> import paddle
917

918 919 920 921 922
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.sinh(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-0.41075233, -0.20133601,  0.10016675,  0.30452031])
923
    """
924
    if in_dynamic_mode():
925
        return _C_ops.sinh(x)
926 927
    else:
        check_variable_and_dtype(
928
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'sinh'
929 930 931 932 933
        )
        helper = LayerHelper('sinh', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='sinh', inputs={"X": x}, outputs={"Out": out})
        return out
934 935


936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
def sqrt(x, name=None):
    """
    Sqrt Activation Operator.

    .. math::
       out=\\sqrt{x}=x^{1/2}

    Args:
        x (Tensor): Input of Sqrt operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Sqrt operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

953
            >>> import paddle
954

955 956 957 958 959
            >>> x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
            >>> out = paddle.sqrt(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.31622776, 0.44721359, 0.54772258, 0.63245553])
960
    """
961
    if in_dynamic_mode():
962
        return _C_ops.sqrt(x)
963 964
    else:
        check_variable_and_dtype(
M
mhy-666 已提交
965 966 967 968
            x,
            'x',
            ['float16', 'uint16', 'float32', 'float64'],
            'sqrt',
969 970 971 972 973
        )
        helper = LayerHelper('sqrt', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='sqrt', inputs={"X": x}, outputs={"Out": out})
        return out
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992


def square(x, name=None):
    """
    Square each elements of the inputs.

    .. math::
       out = x^2

    Args:
        x (Tensor): Input of Square operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Square operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

993
            >>> import paddle
994

995 996 997 998 999
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.square(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.16000001, 0.04000000, 0.01000000, 0.09000000])
1000
    """
1001
    if in_dynamic_mode():
1002
        return _C_ops.square(x)
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
    else:
        check_variable_and_dtype(
            x,
            'x',
            [
                'int32',
                'int64',
                'float16',
                'float32',
                'float64',
                'complex64',
                'complex128',
            ],
            'square',
        )
        helper = LayerHelper('square', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='square', inputs={"X": x}, outputs={"Out": out})
        return out
1022 1023


1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
def tan(x, name=None):
    """
    Tangent Operator. Computes tangent of x element-wise.

    Input range is `(k*pi-pi/2, k*pi+pi/2)` and output range is `(-inf, inf)`.

    .. math::
       out = tan(x)

    Args:
        x (Tensor): Input of Tan operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor. Output of Tan operator, a Tensor with shape same as input.

    Examples:
        .. code-block:: python

1043
            >>> import paddle
1044

1045 1046 1047 1048 1049
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.tan(x)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-0.42279324, -0.20271003,  0.10033467,  0.30933627])
1050
    """
1051
    if in_dynamic_mode():
1052
        return _C_ops.tan(x)
1053 1054
    else:
        check_variable_and_dtype(
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
            x,
            'x',
            [
                'float16',
                'uint16',
                'float32',
                'float64',
                'complex64',
                'complex128',
            ],
            'tan',
1066 1067 1068 1069 1070
        )
        helper = LayerHelper('tan', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='tan', inputs={"X": x}, outputs={"Out": out})
        return out
1071 1072


1073 1074 1075 1076
_erf_ = generate_layer_fn('erf')


def erf(x, name=None):
1077
    if in_dynamic_mode():
1078
        return _C_ops.erf(x)
1079

1080
    locals_var = locals().copy()
1081
    kwargs = {}
1082 1083 1084 1085 1086 1087 1088 1089
    for name, val in locals_var.items():
        if val is not None:
            kwargs[name] = val
    return _erf_(**kwargs)


erf.__doc__ = r"""
:strong:`Erf Operator`
1090
For more details, see `Error function <https://en.wikipedia.org/wiki/Error_function>`_.
1091 1092 1093

Equation:
    ..  math::
1094
        out = \frac{2}{\sqrt{\pi}} \int_{0}^{x}e^{- \eta^{2}}d\eta
1095 1096 1097 1098

Args:

    x (Tensor): The input tensor, it's data type should be float32, float64.
1099
    name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
1100 1101 1102

Returns:

1103
    Tensor: The output of Erf, dtype: float32 or float64, the same as the input, shape: the same as the input.
1104 1105

Examples:
1106

1107
    .. code-block:: python
1108

1109
        >>> import paddle
1110

1111 1112 1113 1114 1115
        >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
        >>> out = paddle.erf(x)
        >>> print(out)
        Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
        [-0.42839241, -0.22270259,  0.11246292,  0.32862678])
1116
"""