math.py 252.5 KB
Newer Older
W
WuHaobo 已提交
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15 16
"""
math functions
"""
17

18
import numpy as np
19

20
import paddle
21 22
from paddle import _C_ops, _legacy_C_ops
from paddle.common_ops_import import VarDesc, dygraph_only, dygraph_utils
23 24
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only

25
from ..base.data_feeder import (
26
    check_dtype,
27 28
    check_type,
    check_variable_and_dtype,
29 30
    convert_dtype,
)
31
from ..common_ops_import import Variable
32 33 34 35
from ..framework import (
    LayerHelper,
    convert_np_dtype_to_dtype_,
    core,
36
    in_dynamic_mode,
37 38 39
)
from .creation import _complex_to_real_dtype
from .layer_function_generator import generate_layer_fn, templatedoc
40
from .manipulation import cast, cast_
41
from .ops import abs  # noqa: F401
G
GGBond8488 已提交
42
from .ops import abs_  # noqa: F401
43
from .ops import acos  # noqa: F401
G
GGBond8488 已提交
44
from .ops import acos_  # noqa: F401
45
from .ops import acosh  # noqa: F401
G
GGBond8488 已提交
46
from .ops import acosh_  # noqa: F401
47
from .ops import asin  # noqa: F401
G
GGBond8488 已提交
48
from .ops import asin_  # noqa: F401
49
from .ops import asinh  # noqa: F401
G
GGBond8488 已提交
50
from .ops import asinh_  # noqa: F401
51
from .ops import atan  # noqa: F401
G
GGBond8488 已提交
52
from .ops import atan_  # noqa: F401
53
from .ops import atanh  # noqa: F401
G
GGBond8488 已提交
54
from .ops import atanh_  # noqa: F401
55 56 57
from .ops import ceil  # noqa: F401
from .ops import ceil_  # noqa: F401
from .ops import cos  # noqa: F401
G
GGBond8488 已提交
58
from .ops import cos_  # noqa: F401
59
from .ops import cosh  # noqa: F401
G
GGBond8488 已提交
60
from .ops import cosh_  # noqa: F401
61
from .ops import erf  # noqa: F401
G
GGBond8488 已提交
62
from .ops import erf_  # noqa: F401
63 64 65
from .ops import exp  # noqa: F401
from .ops import exp_  # noqa: F401
from .ops import expm1  # noqa: F401
G
GGBond8488 已提交
66
from .ops import expm1_  # noqa: F401
67 68 69 70 71 72 73 74
from .ops import floor  # noqa: F401
from .ops import floor_  # noqa: F401
from .ops import reciprocal  # noqa: F401
from .ops import reciprocal_  # noqa: F401
from .ops import round  # noqa: F401
from .ops import round_  # noqa: F401
from .ops import rsqrt  # noqa: F401
from .ops import rsqrt_  # noqa: F401
75 76
from .ops import sigmoid  # noqa: F401
from .ops import sigmoid_  # noqa: F401
77
from .ops import sin  # noqa: F401
G
GGBond8488 已提交
78
from .ops import sin_  # noqa: F401
79
from .ops import sinh  # noqa: F401
G
GGBond8488 已提交
80
from .ops import sinh_  # noqa: F401
81 82
from .ops import sqrt  # noqa: F401
from .ops import sqrt_  # noqa: F401
83
from .ops import square  # noqa: F401
G
GGBond8488 已提交
84
from .ops import square_  # noqa: F401
85
from .ops import tan  # noqa: F401
G
GGBond8488 已提交
86
from .ops import tan_  # noqa: F401
87

88 89
__all__ = []

90 91 92 93 94 95 96 97 98 99 100 101 102
_supported_int_dtype_ = [
    VarDesc.VarType.UINT8,
    VarDesc.VarType.INT8,
    VarDesc.VarType.INT16,
    VarDesc.VarType.INT32,
    VarDesc.VarType.INT64,
]

_supported_float_dtype_ = [
    VarDesc.VarType.FP32,
    VarDesc.VarType.FP64,
]

103

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
def _get_reduce_axis(axis, x):
    """
    Internal function for max, min, amax and amin.
    It computes the attribute reduce_all value based on axis.
    """
    if axis is not None and not isinstance(axis, list):
        if isinstance(axis, (tuple, range)):
            axis = list(axis)
        elif isinstance(axis, int):
            axis = [axis]
        else:
            raise TypeError(
                "The type of axis must be int, list or tuple, but received {}".format(
                    type(axis)
                )
            )
    if axis is None:
        axis = []
    if axis == [] or len(axis) == len(x.shape):
        reduce_all = True
    else:
        reduce_all = False
    return reduce_all, axis


def _get_reduce_axis_with_tensor(axis, x):
    if isinstance(axis, Variable):
        if axis.shape[0] == len(x.shape):
            reduce_all = True
        else:
            reduce_all = False
    else:
        reduce_all, axis = _get_reduce_axis(axis, x)
137 138
        if paddle.utils._contain_var(axis):
            axis = paddle.utils._convert_to_tensor_list(axis)
139 140 141
    return reduce_all, axis


142 143
def log(x, name=None):
    r"""
C
Chen Long 已提交
144
    Calculates the natural log of the given input Tensor, element-wise.
145 146 147

    .. math::

148
        Out = \ln(x)
149 150

    Args:
151
        x (Tensor): Input Tensor. Must be one of the following types: int32, int64, float16, bfloat16, float32, float64.
152 153 154 155 156 157 158 159 160 161
        name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`


    Returns:
        Tensor: The natural log of the input Tensor computed element-wise.

    Examples:

        .. code-block:: python

162
            >>> import paddle
163

164 165 166 167 168 169
            >>> x = [[2, 3, 4], [7, 8, 9]]
            >>> x = paddle.to_tensor(x, dtype='float32')
            >>> print(paddle.log(x))
            Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.69314718, 1.09861231, 1.38629436],
             [1.94591010, 2.07944155, 2.19722462]])
170
    """
171
    if in_dynamic_mode():
172
        return _C_ops.log(x)
173
    else:
174
        check_variable_and_dtype(
175 176 177 178
            x,
            'x',
            ['int32', 'int64', 'uint16', 'float16', 'float32', 'float64'],
            "log",
179
        )
180 181 182 183 184 185
        inputs = {'X': [x]}
        helper = LayerHelper('log', **locals())
        dtype = helper.input_dtype(input_param_name='x')
        out = helper.create_variable_for_type_inference(dtype)
        helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out})
        return out
186 187


188 189 190 191 192 193 194 195 196 197 198
@inplace_apis_in_dygraph_only
def log_(x, name=None):
    r"""
    Inplace version of ``log`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_log`.
    """

    if in_dynamic_mode():
        return _C_ops.log_(x)


199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
    """
    Scale operator.

    Putting scale and bias to the input Tensor as following:

    ``bias_after_scale`` is True:

    .. math::
                            Out=scale*X+bias

    ``bias_after_scale`` is False:

    .. math::
                            Out=scale*(X+bias)

    Args:
216
        x (Tensor): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8.
217
        scale (float|Tensor): The scale factor of the input, it should be a float number or a 0-D Tensor with shape [] and data type as float32.
218 219 220 221
        bias (float): The bias to be put on the input.
        bias_after_scale (bool): Apply bias addition after or before scaling. It is useful for numeric stability in some circumstances.
        act (str, optional): Activation applied to the output such as tanh, softmax, sigmoid, relu.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
222 223

    Returns:
C
Chen Long 已提交
224
        Tensor: Output Tensor of scale operator, with shape and data type same as input.
225 226 227

    Examples:
        .. code-block:: python
228

229 230
            >>> # scale as a float32 number
            >>> import paddle
231

232 233 234 235 236 237 238 239 240 241
            >>> data = paddle.arange(6).astype("float32").reshape([2, 3])
            >>> print(data)
            Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0., 1., 2.],
             [3., 4., 5.]])
            >>> res = paddle.scale(data, scale=2.0, bias=1.0)
            >>> print(res)
            Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[1. , 3. , 5. ],
             [7. , 9. , 11.]])
242 243 244

        .. code-block:: python

245 246
            >>> # scale with parameter scale as a Tensor
            >>> import paddle
247

248 249 250 251 252 253 254 255 256 257 258
            >>> data = paddle.arange(6).astype("float32").reshape([2, 3])
            >>> print(data)
            Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0., 1., 2.],
             [3., 4., 5.]])
            >>> factor = paddle.to_tensor([2], dtype='float32')
            >>> res = paddle.scale(data, scale=factor, bias=1.0)
            >>> print(res)
            Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[1. , 3. , 5. ],
             [7. , 9. , 11.]])
259 260 261

    """

262
    if in_dynamic_mode():
W
Weilong Wu 已提交
263 264
        if act is None:
            return _C_ops.scale(x, scale, float(bias), bias_after_scale)
W
wanghuancoder 已提交
265 266
        out = _C_ops.scale(x, scale, float(bias), bias_after_scale)
        return dygraph_utils._append_activation_in_dygraph(out, act)
267 268
    else:
        check_variable_and_dtype(
269
            x,
270 271 272
            "x",
            [
                'float16',
273
                'bfloat16',
274 275 276 277 278 279 280 281
                'uint16',
                'float32',
                'float64',
                'int8',
                'int16',
                'int32',
                'int64',
                'uint8',
282 283
                'complex64',
                'complex128',
284 285
            ],
            "scale",
286
        )
287 288 289 290 291 292 293 294 295 296 297
        inputs = {'X': [x]}
        attrs = {
            'bias': float(bias),
            'bias_after_scale': bias_after_scale,
        }
        if isinstance(scale, Variable):
            inputs['ScaleTensor'] = [scale]
        else:
            attrs['scale'] = float(scale)
        helper = LayerHelper('scale', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
298

299 300 301 302
        helper.append_op(
            type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs
        )
        return helper.append_activation(out)
303 304 305


def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
306 307
    r"""

308 309 310 311
    stanh activation.

    .. math::

312
        out = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}
313 314 315 316 317

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        scale_a (float, optional): The scale factor a of the input. Default is 0.67.
        scale_b (float, optional): The scale factor b of the output. Default is 1.7159.
318
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
319 320 321 322 323 324 325

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

326
            >>> import paddle
327

328 329 330 331 332
            >>> x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
            >>> out = paddle.stanh(x, scale_a=0.67, scale_b=1.72)
            >>> print(out)
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.00616539, 1.49927628, 1.65933096, 1.70390463])
333 334 335

    """

336
    if in_dynamic_mode():
Z
zyfncg 已提交
337
        return _C_ops.stanh(x, scale_a, scale_b)
338 339
    else:
        check_variable_and_dtype(
340
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'stanh'
341
        )
342

343 344 345 346 347 348 349 350 351
        helper = LayerHelper('stanh', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='stanh',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'scale_a': scale_a, 'scale_b': scale_b},
        )
        return out
352

353

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
def multiplex(inputs, index, name=None):
    """

    Based on the given index parameter, the OP selects a specific row from each input Tensor to construct the output Tensor.

    If the input of this OP contains :math:`m` Tensors, where :math:`I_{i}` means the i-th input Tensor, :math:`i` between :math:`[0,m)` .

    And :math:`O` means the output, where :math:`O[i]` means the i-th row of the output, then the output satisfies that :math:`O[i] = I_{index[i]}[i]` .

    For Example:

            .. code-block:: text

                Given:

                inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]],
                          [[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]],
                          [[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]],
                          [[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]]

                index = [[3],[0],[1],[2]]

                out = [[3,0,3,4],    # out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4]
                       [0,1,3,4],    # out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4]
                       [1,2,4,2],    # out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2]
                       [2,3,3,4]]    # out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4]


    Args:
        inputs (list): The input Tensor list. The list elements are N-D Tensors of data types float32, float64, int32, int64. All input Tensor shapes should be the same and rank must be at least 2.
        index (Tensor): Used to select some rows in the input Tensor to construct an index of the output Tensor. It is a 2-D Tensor with data type int32 or int64 and shape [M, 1], where M is the number of input Tensors.
385
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
386

387 388 389 390 391 392 393
    Returns:
        Tensor: Output of multiplex OP, with data type being float32, float64, int32, int64.

    Examples:

        .. code-block:: python

394
            >>> import paddle
395

396 397 398 399 400 401 402 403 404
            >>> img1 = paddle.to_tensor([[1, 2], [3, 4]], dtype=paddle.float32)
            >>> img2 = paddle.to_tensor([[5, 6], [7, 8]], dtype=paddle.float32)
            >>> inputs = [img1, img2]
            >>> index = paddle.to_tensor([[1], [0]], dtype=paddle.int32)
            >>> res = paddle.multiplex(inputs, index)
            >>> print(res)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[5., 6.],
             [3., 4.]])
405 406

    """
407
    if in_dynamic_mode():
408
        return _C_ops.multiplex(inputs, index)
409 410
    else:
        helper = LayerHelper('multiplex', **locals())
411

412 413 414 415 416 417 418 419 420 421 422 423
        check_type(inputs, 'inputs', (list), 'multiplex')
        if len(inputs) < 2:
            raise ValueError(
                "inputs should be a list object with at least 2 elements."
            )
        for id, x in enumerate(inputs):
            check_variable_and_dtype(
                x,
                'input[' + str(id) + ']',
                ['float32', 'float64', 'int32', 'int64'],
                'multiplex',
            )
424
        check_variable_and_dtype(
425
            index, "index", ['int32', 'int64'], 'multiplex'
426
        )
427

428 429 430 431 432 433 434
        out = helper.create_variable_for_type_inference(inputs[0].dtype)
        helper.append_op(
            type='multiplex',
            inputs={'X': inputs, 'Ids': index},
            outputs={'Out': [out]},
        )
        return out
435

436

437 438 439 440
@inplace_apis_in_dygraph_only
def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
    """
    Inplace version of ``scale`` API, the output Tensor will be inplaced with input ``x``.
441
    Please refer to :ref:`api_paddle_scale`.
442
    """
443
    if in_dynamic_mode():
444
        return _C_ops.scale_(x, scale, float(bias), bias_after_scale)
445 446


447
def pow(x, y, name=None):
448
    """
C
Chen Long 已提交
449
    Compute the power of Tensor elements. The equation is:
S
swtkiwi 已提交
450

451
    .. math::
452
        out = x^{y}
453

454
    Note:
I
Infinity_lee 已提交
455 456 457
        ``paddle.pow`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensors
458 459


460
    Args:
461
        x (Tensor): An N-D Tensor, the data type is float16, float32, float64, int32 or int64.
462
        y (float|int|Tensor): If it is an N-D Tensor, its data type should be the same as `x`.
463
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
464

465
    Returns:
466
        N-D Tensor. A location into which the result is stored. Its dimension and data type are the same as `x`.
467 468 469

    Examples:

470
        .. code-block:: python
471

472
            >>> import paddle
473

474
            >>> x = paddle.to_tensor([1, 2, 3], dtype='float32')
475

476 477 478 479 480 481 482 483 484
            >>> # example 1: y is a float or int
            >>> res = paddle.pow(x, 2)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1., 4., 9.])
            >>> res = paddle.pow(x, 2.5)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.         , 5.65685415 , 15.58845711])
485

486 487 488 489 490 491
            >>> # example 2: y is a Tensor
            >>> y = paddle.to_tensor([2], dtype='float32')
            >>> res = paddle.pow(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1., 4., 9.])
492 493

    """
494
    # in dynamic graph mode
495
    if in_dynamic_mode():
496
        if isinstance(y, (int, float)):
497
            return _C_ops.pow(x, y)
498
        elif isinstance(y, (paddle.Tensor, Variable)):
499
            return _C_ops.elementwise_pow(x, y)
500
        else:
501
            raise TypeError(
502 503
                'y must be scalar or tensor type, but received: %s ' % (y.dtype)
            )
504 505
    else:
        # in static graph mode
506
        if isinstance(y, (int, float)):
507 508 509 510 511 512
            helper = LayerHelper('pow', **locals())
            inputs = {'X': x}
            attrs = {'factor': y}
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
            helper.append_op(
                type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs
513
            )
514 515 516 517 518 519
            return out
        elif isinstance(y, (paddle.Tensor, Variable)):
            # TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
            helper = LayerHelper('elementwise_pow', **locals())
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
            return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
520
        else:
521
            raise TypeError(
522
                'y must be scalar or tensor type, but received: %s ' % (type(y))
523
            )
524 525


526 527 528 529
@inplace_apis_in_dygraph_only
def pow_(x, y, name=None):
    """
    Inplace version of ``pow`` API, the output Tensor will be inplaced with input ``x``.
530
    Please refer to :ref:`api_paddle_pow`.
531 532 533 534
    """
    if isinstance(y, (int, float)):
        return _C_ops.pow_(x, y)
    else:
G
GGBond8488 已提交
535
        raise TypeError('y must be scalar type, but received: %s ' % (type(y)))
536 537


538
OP_NAMEMAPPING = {
539 540 541 542 543 544 545 546
    'elementwise_max': 'maximum',
    'elementwise_min': 'minimum',
    'elementwise_pow': 'elementwise_pow',
    'elementwise_floordiv': 'floor_divide',
    'elementwise_add': 'add',
    'elementwise_sub': 'subtract',
    'elementwise_mul': 'multiply',
    'elementwise_div': 'divide',
C
Chen Weihang 已提交
547
    'elementwise_mod': 'remainder',
548
}
549

550

551 552 553 554 555 556
def _elementwise_op(helper):
    op_type = helper.layer_type
    original_op_type = helper.kwargs.get('original_op_type', op_type)
    x = helper.kwargs.get('x', None)
    y = helper.kwargs.get('y', None)

557 558
    out = helper.kwargs.get('out', None)

559 560
    assert x is not None, f'x cannot be None in {original_op_type}'
    assert y is not None, f'y cannot be None in {original_op_type}'
561 562 563 564 565
    bf16_and_complex_supported_ops = [
        "elementwise_add",
        "elementwise_sub",
        "elementwise_mul",
        "elementwise_div",
566
        "elementwise_max",
567 568 569 570 571 572 573 574 575 576 577 578 579 580
    ]
    if original_op_type in bf16_and_complex_supported_ops:
        data_type = [
            'uint16',
            'float16',
            'float32',
            'float64',
            'int32',
            'int64',
            'bool',
            'complex64',
            'complex128',
        ]
    else:
581 582 583 584 585 586 587 588 589
        data_type = [
            'float16',
            'uint16',
            'float32',
            'float64',
            'int32',
            'int64',
            'bool',
        ]
590
    check_variable_and_dtype(
591 592
        x,
        'x',
593
        data_type,
594 595
        original_op_type,
    )
596
    check_variable_and_dtype(
597 598
        y,
        'y',
599
        data_type,
600 601
        original_op_type,
    )
602 603 604 605

    axis = helper.kwargs.get('axis', -1)
    use_mkldnn = helper.kwargs.get('use_mkldnn', False)
    name = helper.kwargs.get('name', None)
606 607 608 609 610

    if out is None:
        if name is None:
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
        else:
611 612 613 614 615 616 617 618 619 620
            out = helper.create_variable(
                name=name, dtype=x.dtype, persistable=False
            )

    helper.append_op(
        type=op_type,
        inputs={'X': x, 'Y': y},
        outputs={'Out': out},
        attrs={'axis': axis, 'use_mkldnn': use_mkldnn},
    )
621 622 623
    return helper.append_activation(out)


Y
Yang Zhang 已提交
624
def add(x, y, name=None):
625
    """
626 627 628 629 630 631 632 633
    Elementwise Add Operator.
    Add two tensors element-wise
    The equation is:

    ..  math::

        Out=X+Y

634 635
    $X$ the tensor of any dimension.
    $Y$ the tensor whose dimensions must be less than or equal to the dimensions of $X$.
636 637

    There are two cases for this operator:
638 639 640 641

    1. The shape of $Y$ is the same with $X$.
    2. The shape of $Y$ is a continuous subsequence of $X$.

642
    For case 2:
643 644

    1. Broadcast $Y$ to match the shape of $X$, where axis is the start dimension index for broadcasting $Y$ onto $X$.
H
HongyuJia 已提交
645
    2. If $axis$ is -1 (default), $axis$=rank($X$)-rank($Y$).
646
    3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of subsequence, such as shape($Y$) = (2, 1) => (2).
647 648 649

        For example:

650
        .. code-block:: text
651

652 653 654 655 656 657
            shape(X) = (2, 3, 4, 5), shape(Y) = (,)
            shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
            shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
            shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
            shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
            shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
658

659
    Args:
660 661 662
        x (Tensor): Tensor or LoDTensor of any dimensions. Its dtype should be int32, int64, float32, float64.
        y (Tensor): Tensor or LoDTensor of any dimensions. Its dtype should be int32, int64, float32, float64.
        name (string, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
663 664

    Returns:
H
HongyuJia 已提交
665
        N-D Tensor. A location into which the result is stored. It's dimension equals with x.
666 667 668

    Examples:

669
        .. code-block:: python
670

671
            >>> import paddle
672

673 674 675 676 677 678
            >>> x = paddle.to_tensor([2, 3, 4], 'float64')
            >>> y = paddle.to_tensor([1, 5, 2], 'float64')
            >>> z = paddle.add(x, y)
            >>> print(z)
            Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
            [3., 8., 6.])
679
    """
680

681
    if in_dynamic_mode():
682
        return _C_ops.add(x, y)
J
Jiabin Yang 已提交
683
    else:
684
        return _elementwise_op(LayerHelper('elementwise_add', **locals()))
685 686


687 688 689 690
@inplace_apis_in_dygraph_only
def add_(x, y, name=None):
    """
    Inplace version of ``add`` API, the output Tensor will be inplaced with input ``x``.
691
    Please refer to :ref:`api_paddle_add`.
692 693 694 695
    """

    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
696
        raise ValueError(
697 698 699 700
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
701

702
    return _C_ops.add_(x, y)
703 704


Z
zhiboniu 已提交
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
def logaddexp(x, y, name=None):
    """
    Elementwise LogAddExp Operator.
    Add of exponentiations of the inputs
    The equation is:

    ..  math::

        Out=log(X.exp()+Y.exp())

    $X$ the tensor of any dimension.
    $Y$ the tensor whose dimensions must be less than or equal to the dimensions of $X$.

    There are two cases for this operator:

    1. The shape of $Y$ is the same with $X$.
    2. The shape of $Y$ is a continuous subsequence of $X$.

    For case 2:

    1. Broadcast $Y$ to match the shape of $X$, where axis is the start dimension index for broadcasting $Y$ onto $X$.
    2. If $axis$ is -1 (default), $axis$=rank($X$)-rank($Y$).
    3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of subsequence, such as shape($Y$) = (2, 1) => (2).

        For example:

731
        .. code-block:: text
Z
zhiboniu 已提交
732 733 734 735 736 737 738 739 740

            shape(X) = (2, 3, 4, 5), shape(Y) = (,)
            shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
            shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
            shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
            shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
            shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0

    Args:
H
Hui Zhang 已提交
741 742
        x (Tensor): Tensor or LoDTensor of any dimensions. Its dtype should be int32, int64, float32, float64, float16.
        y (Tensor): Tensor or LoDTensor of any dimensions. Its dtype should be int32, int64, float32, float64, float16.
Z
zhiboniu 已提交
743 744 745 746 747 748 749
        name (string, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with x.

    Examples:

750
        .. code-block:: python
Z
zhiboniu 已提交
751

752
            >>> import paddle
Z
zhiboniu 已提交
753

754 755 756 757 758 759
            >>> x = paddle.to_tensor([-1, -2, -3], 'float64')
            >>> y = paddle.to_tensor([-1], 'float64')
            >>> z = paddle.logaddexp(x, y)
            >>> print(z)
            Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
            [-0.30685282, -0.68673831, -0.87307199])
Z
zhiboniu 已提交
760 761 762 763 764
    """

    return paddle.log1p(paddle.exp(-paddle.abs(x - y))) + paddle.maximum(x, y)


765 766
def subtract(x, y, name=None):
    """
W
Wei Shengyu 已提交
767
    Substract two tensors element-wise. The equation is:
768 769 770 771

    .. math::
        out = x - y

772
    Note:
I
Infinity_lee 已提交
773 774 775
        ``paddle.subtract`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
776 777 778 779 780 781 782 783 784 785 786 787

    Args:
        x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.

    Examples:

        .. code-block:: python
W
Wei Shengyu 已提交
788

789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
            >>> import paddle

            >>> x = paddle.to_tensor([[1, 2], [7, 8]])
            >>> y = paddle.to_tensor([[5, 6], [3, 4]])
            >>> res = paddle.subtract(x, y)
            >>> print(res)
            Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[-4, -4],
             [ 4,  4]])

            >>> x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
            >>> y = paddle.to_tensor([1, 0, 4])
            >>> res = paddle.subtract(x, y)
            >>> print(res)
            Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[[ 0,  2, -1],
              [ 0,  2, -1]]])

            >>> x = paddle.to_tensor([2, float('nan'), 5], dtype='float32')
            >>> y = paddle.to_tensor([1, 4, float('nan')], dtype='float32')
            >>> res = paddle.subtract(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1. , nan, nan])

            >>> x = paddle.to_tensor([5, float('inf'), -float('inf')], dtype='float64')
            >>> y = paddle.to_tensor([1, 4, 5], dtype='float64')
            >>> res = paddle.subtract(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
            [ 4.  ,  inf., -inf.])
820
    """
821
    if in_dynamic_mode():
822
        return _C_ops.subtract(x, y)
J
Jiabin Yang 已提交
823
    else:
824
        return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
825 826


827 828 829 830
@inplace_apis_in_dygraph_only
def subtract_(x, y, name=None):
    """
    Inplace version of ``subtract`` API, the output Tensor will be inplaced with input ``x``.
831
    Please refer to :ref:`api_paddle_subtract`.
832 833 834 835
    """

    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
836
        raise ValueError(
837 838 839 840
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
841

842
    return _C_ops.subtract_(x, y)
843 844


845
def divide(x, y, name=None):
846
    """
847
    Divide two tensors element-wise. The equation is:
848

849 850
    .. math::
        out = x / y
851

852
    Note:
I
Infinity_lee 已提交
853 854 855
        ``paddle.divide`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
856

857 858 859 860
    Args:
        x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
861

862
    Returns:
863
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.
864

865
    Examples:
866

867
        .. code-block:: python
868

869
            >>> import paddle
870

871 872 873 874 875 876
            >>> x = paddle.to_tensor([2, 3, 4], dtype='float64')
            >>> y = paddle.to_tensor([1, 5, 2], dtype='float64')
            >>> z = paddle.divide(x, y)
            >>> print(z)
            Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
            [2.        , 0.60000000, 2.        ])
877

878
    """
879
    if in_dynamic_mode():
880
        return _C_ops.divide(x, y)
J
Jiabin Yang 已提交
881
    else:
882 883
        if paddle.ir.core._use_new_ir_api():
            return paddle._ir_ops.divide(x, y)
884
        return _elementwise_op(LayerHelper('elementwise_div', **locals()))
885 886


887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
@inplace_apis_in_dygraph_only
def divide_(x, y, name=None):
    r"""
    Inplace version of ``divide`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_divide`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    return _C_ops.divide_(x, y)


903 904
def floor_divide(x, y, name=None):
    """
L
Lin Manhui 已提交
905
    Floor divide two tensors element-wise and rounds the quotinents to the nearest integer toward zero. The equation is:
906

907
    .. math::
L
Lin Manhui 已提交
908
        out = trunc(x / y)
909

H
hg-1099255210 已提交
910 911 912
    - :math:`x`: Multidimensional Tensor.
    - :math:`y`: Multidimensional Tensor.

913
    Note:
I
Infinity_lee 已提交
914 915 916 917
        ``paddle.floor_divide`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor

L
Lin Manhui 已提交
918
        Also note that the name ``floor_divide`` can be misleading, as the quotinents are actually rounded toward zero, not toward negative infinite.
919

920
    Args:
921 922
        x (Tensor): the input tensor, it's data type should be uint8, int8, int32, int64, float32, float64, float16, bfloat16.
        y (Tensor): the input tensor, it's data type should be uint8, int8, int32, int64, float32, float64, float16, bfloat16.
923
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
924

925 926
    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with $x$.
927

928
    Examples:
929

930
        .. code-block:: python
931

932
            >>> import paddle
933

934 935 936 937 938 939
            >>> x = paddle.to_tensor([2, 3, 8, 7])
            >>> y = paddle.to_tensor([1, 5, 3, 3])
            >>> z = paddle.floor_divide(x, y)
            >>> print(z)
            Tensor(shape=[4], dtype=int64, place=Place(cpu), stop_gradient=True,
            [2, 0, 2, 2])
940

941
    """
942
    if in_dynamic_mode():
943
        return _C_ops.floor_divide(x, y)
944
    else:
945
        return _elementwise_op(LayerHelper('elementwise_floordiv', **locals()))
946 947


948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
@inplace_apis_in_dygraph_only
def floor_divide_(x, y, name=None):
    r"""
    Inplace version of ``floor_divide`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_floor_divide`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    return _C_ops.floor_divide_(x, y)


964
def remainder(x, y, name=None):
965
    r"""
966 967 968
    Mod two tensors element-wise. The equation is:

    .. math::
969

970 971
        out = x \% y

972
    Note:
I
Infinity_lee 已提交
973 974 975
        ``paddle.remainder`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
976 977

    Args:
978 979
        x (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
980 981 982
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
983
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.
984 985 986

    Examples:

987
        .. code-block:: python
988

989
            >>> import paddle
990

991 992 993 994 995 996
            >>> x = paddle.to_tensor([2, 3, 8, 7])
            >>> y = paddle.to_tensor([1, 5, 3, 3])
            >>> z = paddle.remainder(x, y)
            >>> print(z)
            Tensor(shape=[4], dtype=int64, place=Place(cpu), stop_gradient=True,
            [0, 3, 2, 1])
997 998

    """
999
    if in_dynamic_mode():
1000
        return _C_ops.remainder(x, y)
1001
    else:
1002
        return _elementwise_op(LayerHelper('elementwise_mod', **locals()))
1003 1004


1005 1006 1007 1008
@inplace_apis_in_dygraph_only
def remainder_(x, y, name=None):
    r"""
    Inplace version of ``remainder`` API, the output Tensor will be inplaced with input ``x``.
1009
    Please refer to :ref:`api_paddle_remainder`.
1010 1011 1012 1013
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
1014 1015 1016 1017
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
1018
    return _C_ops.remainder_(x, y)
1019 1020


1021 1022
mod = remainder  # noqa: F841
floor_mod = remainder  # noqa: F841
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
mod_ = remainder_  # noqa: F841
mod_.__doc__ = r"""
    Inplace version of ``mod`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_mod`.
    """
floor_mod_ = remainder_  # noqa: F841
floor_mod_.__doc__ = r"""
    Inplace version of ``floor_mod_`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_floor_mod_`.
    """
1033 1034


1035
def multiply(x, y, name=None):
1036
    """
1037
    multiply two tensors element-wise. The equation is:
1038

1039 1040
    .. math::
        out = x * y
1041

1042
    Note:
I
Infinity_lee 已提交
1043 1044 1045
        ``paddle.multiply`` supports broadcasting. If you would like to know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
1046

1047
    Args:
W
will-jl944 已提交
1048 1049
        x (Tensor): the input tensor, its data type should be one of float32, float64, int32, int64, bool.
        y (Tensor): the input tensor, its data type should be one of float32, float64, int32, int64, bool.
1050
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
1051

1052
    Returns:
1053
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.
1054

1055 1056
    Examples:

1057
        .. code-block:: python
1058

1059
            >>> import paddle
1060

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
            >>> x = paddle.to_tensor([[1, 2], [3, 4]])
            >>> y = paddle.to_tensor([[5, 6], [7, 8]])
            >>> res = paddle.multiply(x, y)
            >>> print(res)
            Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[5 , 12],
             [21, 32]])
            >>> x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
            >>> y = paddle.to_tensor([2])
            >>> res = paddle.multiply(x, y)
            >>> print(res)
            Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[[2, 4, 6],
              [2, 4, 6]]])
1075 1076

    """
1077
    if in_dynamic_mode():
1078
        return _C_ops.multiply(x, y)
J
Jiabin Yang 已提交
1079
    else:
1080 1081
        if x.dtype != y.dtype:
            raise TypeError(
1082
                f'Input tensors must be same type, but received type of x: {x.dtype}, type of y: {y.dtype} '
1083
            )
1084

1085
        return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
1086

1087

1088 1089 1090 1091
@inplace_apis_in_dygraph_only
def multiply_(x, y, name=None):
    """
    Inplace version of ``multiply`` API, the output Tensor will be inplaced with input ``x``.
1092
    Please refer to :ref:`api_paddle_multiply`.
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
    """

    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )

    return _C_ops.multiply_(x, y)


1106 1107 1108 1109 1110
@dygraph_only
def _elementwise_op_with_axis_in_dygraph(
    x, y, axis=-1, name=None, op_type="Undifined"
):
    assert (
1111 1112
        in_dynamic_mode()
    ), "You can only call `_elementwise_op_with_axis_in_dygraph` function within in_dynamic_mode"
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
    assert op_type in ["add", "subtract", "multiply", "divide"], (
        "op_name input error! _elementwise_op_with_axis is an inner function to replace elementwise_add/sub/mul/div. Input op_name=%s, Expect op_name=[add|subtract|multiply|divide]\n"
        % op_type
    )
    op = getattr(_C_ops, op_type)
    x_shape = list(x.shape)
    y_shape = list(y.shape)
    if axis == -1 or len(x_shape) == len(y_shape):
        return op(x, y)
    if len(x_shape) > len(y_shape):
        padding = len(x_shape) - len(y_shape) - axis
        y = paddle.reshape(y, [1] * axis + y_shape + [1] * padding)
    else:
        padding = len(y_shape) - len(x_shape) - axis
        x = paddle.reshape(x, [1] * axis + y_shape + [1] * padding)
    return op(x, y)


def _add_with_axis(x, y, axis=-1, name=None):
    # opt performance, only dynamic mode needs reshape
1133
    if in_dynamic_mode():
1134 1135 1136
        return _elementwise_op_with_axis_in_dygraph(x, y, axis, name, "add")
    else:
        op_type = 'elementwise_add'
1137
        return _elementwise_op(LayerHelper(op_type, **locals()))
1138 1139 1140 1141


def _subtract_with_axis(x, y, axis=-1, name=None):
    # opt performance, only dynamic mode needs reshape
1142
    if in_dynamic_mode():
1143 1144 1145 1146 1147
        return _elementwise_op_with_axis_in_dygraph(
            x, y, axis, name, "subtract"
        )
    else:
        op_type = 'elementwise_sub'
1148
        return _elementwise_op(LayerHelper(op_type, **locals()))
1149 1150 1151 1152


def _multiply_with_axis(x, y, axis=-1, name=None):
    # opt performance, only dynamic mode needs reshape
1153
    if in_dynamic_mode():
1154 1155 1156 1157 1158
        return _elementwise_op_with_axis_in_dygraph(
            x, y, axis, name, "multiply"
        )
    else:
        op_type = 'elementwise_mul'
1159
        return _elementwise_op(LayerHelper(op_type, **locals()))
1160 1161 1162 1163


def _divide_with_axis(x, y, axis=-1, name=None):
    # opt performance, only dynamic mode needs reshape
1164
    if in_dynamic_mode():
1165 1166 1167
        return _elementwise_op_with_axis_in_dygraph(x, y, axis, name, "divide")
    else:
        op_type = 'elementwise_div'
1168
        return _elementwise_op(LayerHelper(op_type, **locals()))
1169 1170


1171
def maximum(x, y, name=None):
1172
    """
W
Wei Shengyu 已提交
1173
    Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is:
1174

1175 1176
    .. math::
        out = max(x, y)
1177

1178
    Note:
I
Infinity_lee 已提交
1179 1180 1181
        ``paddle.maximum`` supports broadcasting. If you want know more about broadcasting, please refer to  `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194

    Args:
        x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.

    Examples:

        .. code-block:: python

1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
            >>> import paddle

            >>> x = paddle.to_tensor([[1, 2], [7, 8]])
            >>> y = paddle.to_tensor([[3, 4], [5, 6]])
            >>> res = paddle.maximum(x, y)
            >>> print(res)
            Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[3, 4],
             [7, 8]])

            >>> x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
            >>> y = paddle.to_tensor([3, 0, 4])
            >>> res = paddle.maximum(x, y)
            >>> print(res)
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[3, 2, 4],
             [3, 2, 4]])

            >>> x = paddle.to_tensor([2, 3, 5], dtype='float32')
            >>> y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
            >>> res = paddle.maximum(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [2. , nan, nan])

            >>> x = paddle.to_tensor([5, 3, float("inf")], dtype='float32')
            >>> y = paddle.to_tensor([1, -float("inf"), 5], dtype='float32')
            >>> res = paddle.maximum(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [5.  , 3.  , inf.])
1226
    """
1227
    if in_dynamic_mode():
1228
        return _C_ops.maximum(x, y)
1229
    else:
1230
        return _elementwise_op(LayerHelper('elementwise_max', **locals()))
1231

1232

1233
def minimum(x, y, name=None):
1234
    """
C
Chen Long 已提交
1235
    Compare two tensors and return a new tensor containing the element-wise minima. The equation is:
1236

1237 1238
    .. math::
        out = min(x, y)
1239

1240
    Note:
I
Infinity_lee 已提交
1241 1242 1243
        ``paddle.minimum`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
1244 1245 1246 1247 1248 1249 1250

    Args:
        x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
C
Chen Long 已提交
1251
        Tensor. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.
1252 1253 1254 1255 1256

    Examples:

        .. code-block:: python

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
            >>> import paddle

            >>> x = paddle.to_tensor([[1, 2], [7, 8]])
            >>> y = paddle.to_tensor([[3, 4], [5, 6]])
            >>> res = paddle.minimum(x, y)
            >>> print(res)
            Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[1, 2],
             [5, 6]])

            >>> x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
            >>> y = paddle.to_tensor([3, 0, 4])
            >>> res = paddle.minimum(x, y)
            >>> print(res)
            Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[[1, 0, 3],
              [1, 0, 3]]])

            >>> x = paddle.to_tensor([2, 3, 5], dtype='float32')
            >>> y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
            >>> res = paddle.minimum(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1. , nan, nan])

            >>> x = paddle.to_tensor([5, 3, float("inf")], dtype='float64')
            >>> y = paddle.to_tensor([1, -float("inf"), 5], dtype='float64')
            >>> res = paddle.minimum(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
            [ 1.  , -inf.,  5.  ])
1288
    """
1289
    if in_dynamic_mode():
1290
        return _C_ops.minimum(x, y)
1291
    else:
1292
        return _elementwise_op(LayerHelper('elementwise_min', **locals()))
1293

1294

L
LJQ❤️ 已提交
1295 1296 1297 1298 1299 1300 1301 1302 1303
def fmax(x, y, name=None):
    """
    Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the maximum value of the element.
    If one of them is a nan value, the other value is directly returned, if both are nan values, then the first nan value is returned.
    The equation is:

    .. math::
        out = fmax(x, y)

1304
    Note:
I
Infinity_lee 已提交
1305 1306 1307
        ``paddle.fmax`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
L
LJQ❤️ 已提交
1308 1309

    Args:
1310 1311
        x (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
L
LJQ❤️ 已提交
1312 1313 1314 1315 1316 1317 1318 1319 1320
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.

    Examples:

        .. code-block:: python

1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
            >>> import paddle

            >>> x = paddle.to_tensor([[1, 2], [7, 8]])
            >>> y = paddle.to_tensor([[3, 4], [5, 6]])
            >>> res = paddle.fmax(x, y)
            >>> print(res)
            Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[3, 4],
             [7, 8]])

            >>> x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
            >>> y = paddle.to_tensor([3, 0, 4])
            >>> res = paddle.fmax(x, y)
            >>> print(res)
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[3, 2, 4],
             [3, 2, 4]])

            >>> x = paddle.to_tensor([2, 3, 5], dtype='float32')
            >>> y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
            >>> res = paddle.fmax(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [2., 3., 5.])

            >>> x = paddle.to_tensor([5, 3, float("inf")], dtype='float32')
            >>> y = paddle.to_tensor([1, -float("inf"), 5], dtype='float32')
            >>> res = paddle.fmax(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [5.  , 3.  , inf.])
L
LJQ❤️ 已提交
1352
    """
1353
    if in_dynamic_mode():
1354
        return _C_ops.fmax(x, y)
1355
    else:
1356
        return _elementwise_op(LayerHelper('elementwise_fmax', **locals()))
L
LJQ❤️ 已提交
1357

1358

L
LJQ❤️ 已提交
1359 1360 1361 1362 1363 1364 1365 1366 1367
def fmin(x, y, name=None):
    """
    Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the minimum value of the element.
    If one of them is a nan value, the other value is directly returned, if both are nan values, then the first nan value is returned.
    The equation is:

    .. math::
        out = fmin(x, y)

1368
    Note:
I
Infinity_lee 已提交
1369 1370 1371
        ``paddle.fmin`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
L
LJQ❤️ 已提交
1372 1373

    Args:
1374 1375
        x (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
L
LJQ❤️ 已提交
1376 1377 1378 1379 1380 1381 1382 1383 1384
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.

    Examples:

        .. code-block:: python

1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
            >>> import paddle

            >>> x = paddle.to_tensor([[1, 2], [7, 8]])
            >>> y = paddle.to_tensor([[3, 4], [5, 6]])
            >>> res = paddle.fmin(x, y)
            >>> print(res)
            Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[1, 2],
             [5, 6]])

            >>> x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
            >>> y = paddle.to_tensor([3, 0, 4])
            >>> res = paddle.fmin(x, y)
            >>> print(res)
            Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[[1, 0, 3],
              [1, 0, 3]]])

            >>> x = paddle.to_tensor([2, 3, 5], dtype='float32')
            >>> y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
            >>> res = paddle.fmin(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1., 3., 5.])

            >>> x = paddle.to_tensor([5, 3, float("inf")], dtype='float64')
            >>> y = paddle.to_tensor([1, -float("inf"), 5], dtype='float64')
            >>> res = paddle.fmin(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
            [ 1.  , -inf.,  5.  ])
L
LJQ❤️ 已提交
1416
    """
1417
    if in_dynamic_mode():
1418
        return _C_ops.fmin(x, y)
1419
    else:
1420
        return _elementwise_op(LayerHelper('elementwise_fmin', **locals()))
L
LJQ❤️ 已提交
1421

Y
Yang Zhang 已提交
1422

1423
def sum(x, axis=None, dtype=None, keepdim=False, name=None):
1424 1425 1426 1427
    """
    Computes the sum of tensor elements over the given dimension.

    Args:
1428
        x (Tensor): An N-D Tensor, the data type is bool, float16, float32, float64, int32 or int64.
1429 1430
        axis (int|list|tuple, optional): The dimensions along which the sum is performed. If
            :attr:`None`, sum all elements of :attr:`x` and return a
N
Noel 已提交
1431
            Tensor with a single element, otherwise must be in the
1432 1433 1434 1435 1436 1437 1438
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype
            of output is the same as input Tensor `x`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
1439
            value is False.
1440
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
1441 1442

    Returns:
1443
        Tensor: Results of summation operation on the specified axis of input Tensor `x`,
1444
        if `x.dtype='bool'`, `x.dtype='int32'`, it's data type is `'int64'`,
1445
        otherwise it's data type is the same as `x`.
1446 1447 1448 1449

    Examples:
        .. code-block:: python

1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
            >>> import paddle

            >>> # x is a Tensor with following elements:
            >>> #    [[0.2, 0.3, 0.5, 0.9]
            >>> #     [0.1, 0.2, 0.6, 0.7]]
            >>> # Each example is followed by the corresponding output tensor.
            >>> x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
            ...                       [0.1, 0.2, 0.6, 0.7]])
            >>> out1 = paddle.sum(x)
            >>> out1
            Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
            3.50000000)
            >>> out2 = paddle.sum(x, axis=0)
            >>> out2
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.30000001, 0.50000000, 1.10000002, 1.59999990])
            >>> out3 = paddle.sum(x, axis=-1)
            >>> out3
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.89999998, 1.60000002])
            >>> out4 = paddle.sum(x, axis=1, keepdim=True)
            >>> out4
            Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[1.89999998],
             [1.60000002]])

            >>> # y is a Tensor with shape [2, 2, 2] and elements as below:
            >>> #      [[[1, 2], [3, 4]],
            >>> #      [[5, 6], [7, 8]]]
            >>> # Each example is followed by the corresponding output tensor.
            >>> y = paddle.to_tensor([[[1, 2], [3, 4]],
            ...                       [[5, 6], [7, 8]]])
            >>> out5 = paddle.sum(y, axis=[1, 2])
            >>> out5
            Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [10, 26])
            >>> out6 = paddle.sum(y, axis=[0, 1])
            >>> out6
            Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [16, 20])

            >>> # x is a Tensor with following elements:
            >>> #    [[True, True, True, True]
            >>> #     [False, False, False, False]]
            >>> # Each example is followed by the corresponding output tensor.
            >>> x = paddle.to_tensor([[True, True, True, True],
            ...                       [False, False, False, False]])
            >>> out7 = paddle.sum(x)
            >>> out7
            Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
            4)
            >>> out8 = paddle.sum(x, axis=0)
            >>> out8
            Tensor(shape=[4], dtype=int64, place=Place(cpu), stop_gradient=True,
            [1, 1, 1, 1])
            >>> out9 = paddle.sum(x, axis=1)
            >>> out9
            Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [4, 0])
1509
    """
1510

1511 1512
    dtype_flag = False
    if dtype is not None:
1513 1514 1515 1516 1517
        if paddle.ir.core._use_new_ir_api():
            dtype = paddle.ir.core.convert_np_dtype_to_dtype_(dtype)
        else:
            dtype_flag = True
            dtype = convert_np_dtype_to_dtype_(dtype)
F
From00 已提交
1518

1519
    if in_dynamic_mode():
1520
        return _C_ops.sum(x, axis, dtype, keepdim)
1521
    else:
1522 1523
        if paddle.ir.core._use_new_ir_api():
            return paddle._ir_ops.sum(x, axis, dtype, keepdim)
1524 1525
        reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
        attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}
F
From00 已提交
1526

1527
        if dtype_flag:
1528
            attrs.update({'in_dtype': x.dtype, 'out_dtype': dtype})
W
wanghuancoder 已提交
1529

1530 1531 1532 1533 1534
        check_variable_and_dtype(
            x,
            'x',
            [
                'bool',
1535
                'uint16',
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
                'float16',
                'float32',
                'float64',
                'int16',
                'int32',
                'int64',
                'complex64',
                'complex128',
            ],
            'sum',
        )
1547

1548 1549 1550
        check_type(
            axis, 'axis', (int, list, tuple, type(None), Variable), 'sum'
        )
1551

1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
        helper = LayerHelper('sum', **locals())
        if dtype_flag:
            out = helper.create_variable_for_type_inference(dtype=dtype)
        else:
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_sum',
            inputs={'X': x},
            outputs={'Out': out},
            attrs=attrs,
        )
        return out
1564

1565

1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
def nan_to_num(x, nan=0.0, posinf=None, neginf=None, name=None):
    """
    Replaces NaN, positive infinity, and negative infinity values in input tensor.

    Args:
        x (Tensor): An N-D Tensor, the data type is float32, float64.
        nan (float, optional): the value to replace NaNs with. Default is 0.
        posinf (float, optional): if a Number, the value to replace positive infinity values with. If None, positive infinity values are replaced with the greatest finite value representable by input’s dtype. Default is None.
        neginf (float, optional): if a Number, the value to replace negative infinity values with. If None, negative infinity values are replaced with the lowest finite value representable by input’s dtype. Default is None.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: Results of nan_to_num operation input Tensor ``x``.

    Examples:
        .. code-block:: python

1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
            >>> import paddle

            >>> x = paddle.to_tensor([float('nan'), 0.3, float('+inf'), float('-inf')], dtype='float32')
            >>> out1 = paddle.nan_to_num(x)
            >>> out1
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [ 0.                                      ,
              0.30000001                              ,
              340282346638528859811704183484516925440.,
             -340282346638528859811704183484516925440.])
            >>> out2 = paddle.nan_to_num(x, nan=1)
            >>> out2
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [ 1.                                      ,
              0.30000001                              ,
              340282346638528859811704183484516925440.,
             -340282346638528859811704183484516925440.])
            >>> out3 = paddle.nan_to_num(x, posinf=5)
            >>> out3
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [ 0.                                      ,
              0.30000001                              ,
              5.                                      ,
             -340282346638528859811704183484516925440.])
            >>> out4 = paddle.nan_to_num(x, nan=10, neginf=-99)
            >>> out4
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [ 10.                                    ,
              0.30000001                             ,
             340282346638528859811704183484516925440.,
             -99.                                    ])
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
    """
    # NOTE(tiancaishaonvjituizi): it seems that paddle handles the dtype of python float number
    # incorrectly, so we have to explicitly contruct tensors here
    posinf_value = paddle.full_like(x, float("+inf"))
    neginf_value = paddle.full_like(x, float("-inf"))
    nan = paddle.full_like(x, nan)
    assert x.dtype in [paddle.float32, paddle.float64]
    is_float32 = x.dtype == paddle.float32
    if posinf is None:
        posinf = (
            np.finfo(np.float32).max if is_float32 else np.finfo(np.float64).max
        )
    posinf = paddle.full_like(x, posinf)
    if neginf is None:
        neginf = (
            np.finfo(np.float32).min if is_float32 else np.finfo(np.float64).min
        )
    neginf = paddle.full_like(x, neginf)
    x = paddle.where(paddle.isnan(x), nan, x)
    x = paddle.where(x == posinf_value, posinf, x)
    x = paddle.where(x == neginf_value, neginf, x)
    return x


1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
@inplace_apis_in_dygraph_only
def nan_to_num_(x, nan=0.0, posinf=None, neginf=None, name=None):
    r"""
    Inplace version of ``nan_to_num`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_nan_to_num`.
    """
    # NOTE(tiancaishaonvjituizi): it seems that paddle handles the dtype of python float number
    # incorrectly, so we have to explicitly contruct tensors here
    posinf_value = paddle.full_like(x, float("+inf"))
    neginf_value = paddle.full_like(x, float("-inf"))
    nan = paddle.full_like(x, nan)
    assert x.dtype in [paddle.float32, paddle.float64]
    is_float32 = x.dtype == paddle.float32
    if posinf is None:
        posinf = (
            np.finfo(np.float32).max if is_float32 else np.finfo(np.float64).max
        )
    posinf = paddle.full_like(x, posinf)
    if neginf is None:
        neginf = (
            np.finfo(np.float32).min if is_float32 else np.finfo(np.float64).min
        )
    neginf = paddle.full_like(x, neginf)
    x_not_nan = paddle.logical_not(paddle.isnan(x))
    x = paddle.where_(x_not_nan, x, nan)
    x = paddle.where_(x != posinf_value, x, posinf)
    x = paddle.where_(x != neginf_value, x, neginf)
    return x


W
wangguanqun 已提交
1668 1669 1670 1671 1672
def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
    """
    Computes the sum of tensor elements over the given axis, treating Not a Numbers (NaNs) as zero.

    Args:
1673
        x (Tensor): An N-D Tensor, the data type is float16, float32, float64, int32 or int64.
W
wangguanqun 已提交
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
        axis (int|list|tuple, optional): The dimensions along which the nansum is performed. If
            :attr:`None`, nansum all elements of :attr:`x` and return a
            Tensor with a single element, otherwise must be in the
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype
            of output is the same as input Tensor `x`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
            value is False.
1685
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
W
wangguanqun 已提交
1686 1687 1688 1689 1690 1691 1692

    Returns:
        Tensor: Results of summation operation on the specified axis of input Tensor `x`,

    Examples:
        .. code-block:: python

1693
            >>> import paddle
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732

            >>> # x is a Tensor with following elements:
            >>> #    [[nan, 0.3, 0.5, 0.9]
            >>> #     [0.1, 0.2, -nan, 0.7]]
            >>> # Each example is followed by the corresponding output tensor.
            >>> x = paddle.to_tensor([[float('nan'), 0.3, 0.5, 0.9],
            ...                       [0.1, 0.2, float('-nan'), 0.7]],dtype="float32")
            >>> out1 = paddle.nansum(x)
            >>> out1
            Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
            2.69999981)
            >>> out2 = paddle.nansum(x, axis=0)
            >>> out2
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.10000000, 0.50000000, 0.50000000, 1.59999990])
            >>> out3 = paddle.nansum(x, axis=-1)
            >>> out3
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.70000005, 1.        ])
            >>> out4 = paddle.nansum(x, axis=1, keepdim=True)
            >>> out4
            Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[1.70000005],
             [1.        ]])

            >>> # y is a Tensor with shape [2, 2, 2] and elements as below:
            >>> #      [[[1, nan], [3, 4]],
            >>> #       [[5, 6], [-nan, 8]]]
            >>> # Each example is followed by the corresponding output tensor.
            >>> y = paddle.to_tensor([[[1, float('nan')], [3, 4]],
            ...                       [[5, 6], [float('-nan'), 8]]])
            >>> out5 = paddle.nansum(y, axis=[1, 2])
            >>> out5
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [8. , 19.])
            >>> out6 = paddle.nansum(y, axis=[0, 1])
            >>> out6
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [9. , 18.])
W
wangguanqun 已提交
1733
    """
1734
    check_variable_and_dtype(
1735
        x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'nansum'
1736
    )
W
wangguanqun 已提交
1737 1738 1739 1740 1741 1742 1743
    check_type(axis, 'axis', (int, list, tuple, type(None)), 'nansum')

    zero_tensor = paddle.zeros_like(x)
    tmp_tensor = paddle.where(isnan(x), zero_tensor, x)
    return sum(tmp_tensor, axis, dtype, keepdim, name)


1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
def nanmean(x, axis=None, keepdim=False, name=None):
    r"""
    Compute the arithmetic mean along the specified axis, ignoring NaNs.

    Args:
        x (Tensor): The input Tensor with data type uint16, float16, float32, float64.
        axis (int|list|tuple, optional):The axis along which to perform nanmean
            calculations. ``axis`` should be int, list(int) or tuple(int). If
            ``axis`` is a list/tuple of dimension(s), nanmean is calculated along
            all element(s) of ``axis`` . ``axis`` or element(s) of ``axis``
            should be in range [-D, D), where D is the dimensions of ``x`` . If
            ``axis`` or element(s) of ``axis`` is less than 0, it works the
            same way as :math:`axis + D` . If ``axis`` is None, nanmean is
            calculated over all elements of ``x``. Default is None.
        keepdim (bool, optional): Whether to reserve the reduced dimension(s)
            in the output Tensor. If ``keepdim`` is True, the dimensions of
            the output Tensor is the same as ``x`` except in the reduced
            dimensions(it is of size 1 in this case). Otherwise, the shape of
            the output Tensor is squeezed in ``axis`` . Default is False.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, results of arithmetic mean along ``axis`` of ``x``, with the same data
        type as ``x``.

    Examples:

        .. code-block:: python
            :name: code-example1

1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
            >>> import paddle
            >>> # x is a 2-D Tensor:
            >>> x = paddle.to_tensor([[float('nan'), 0.3, 0.5, 0.9],
            ...                       [0.1, 0.2, float('-nan'), 0.7]])
            >>> out1 = paddle.nanmean(x)
            >>> out1
            Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
            0.44999996)
            >>> out2 = paddle.nanmean(x, axis=0)
            >>> out2
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.10000000, 0.25000000, 0.50000000, 0.79999995])
            >>> out3 = paddle.nanmean(x, axis=0, keepdim=True)
            >>> out3
            Tensor(shape=[1, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.10000000, 0.25000000, 0.50000000, 0.79999995]])
            >>> out4 = paddle.nanmean(x, axis=1)
            >>> out4
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.56666666, 0.33333334])
            >>> out5 = paddle.nanmean(x, axis=1, keepdim=True)
            >>> out5
            Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.56666666],
             [0.33333334]])

            >>> # y is a 3-D Tensor:
            >>> y = paddle.to_tensor([[[1, float('nan')], [3, 4]],
            ...                       [[5, 6], [float('-nan'), 8]]])
            >>> out6 = paddle.nanmean(y, axis=[1, 2])
            >>> out6
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [2.66666675, 6.33333349])
            >>> out7 = paddle.nanmean(y, axis=[0, 1])
            >>> out7
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [3., 6.])
1812 1813 1814
    """
    if isinstance(axis, int):
        axis = [axis]
1815 1816 1817
    check_variable_and_dtype(
        x, 'x/input', ['uint16', 'float16', 'float32', 'float64'], 'nanmean'
    )
1818 1819 1820
    if axis is not None:
        check_type(axis, 'axis/dim', (int, list, tuple), 'nanmean')

1821 1822 1823
    cnt = paddle.sum(~paddle.isnan(x), axis=axis, keepdim=keepdim)
    return paddle.divide(
        paddle.nansum(x, axis=axis, keepdim=keepdim, name=name),
1824 1825
        cnt.astype(x.dtype),
    )
1826 1827


1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
def count_nonzero(x, axis=None, keepdim=False, name=None):
    r"""
    Counts the number of non-zero values in the tensor x along the specified axis.

    Args:
        x (Tensor): An N-D Tensor, the data type is bool, float16, float32, float64, int32 or int64.
        axis (int|list|tuple, optional): The dimensions along which the sum is performed. If
            :attr:`None`, sum all elements of :attr:`x` and return a
            Tensor with a single element, otherwise must be in the
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
            value is False.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: Results of count operation on the specified axis of input Tensor `x`, it's data type is `'int64'`.

    Examples:

        .. code-block:: python

1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
            >>> import paddle
            >>> # x is a 2-D Tensor:
            >>> x = paddle.to_tensor([[0., 1.1, 1.2], [0., 0., 1.3], [0., 0., 0.]])
            >>> out1 = paddle.count_nonzero(x)
            >>> out1
            Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
            3)
            >>> out2 = paddle.count_nonzero(x, axis=0)
            >>> out2
            Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [0, 1, 2])
            >>> out3 = paddle.count_nonzero(x, axis=0, keepdim=True)
            >>> out3
            Tensor(shape=[1, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0, 1, 2]])
            >>> out4 = paddle.count_nonzero(x, axis=1)
            >>> out4
            Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [2, 1, 0])
            >>> out5 = paddle.count_nonzero(x, axis=1, keepdim=True)
            >>> out5
            Tensor(shape=[3, 1], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[2],
             [1],
             [0]])

            >>> # y is a 3-D Tensor:
            >>> y = paddle.to_tensor([[[0., 1.1, 1.2], [0., 0., 1.3], [0., 0., 0.]],
            ...                         [[0., 2.5, 2.6], [0., 0., 2.4], [2.1, 2.2, 2.3]]])
            >>> out6 = paddle.count_nonzero(y, axis=[1, 2])
            >>> out6
            Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [3, 6])
            >>> out7 = paddle.count_nonzero(y, axis=[0, 1])
            >>> out7
            Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [1, 3, 5])
1889 1890
    """

1891 1892
    if isinstance(axis, int):
        axis = [axis]
1893 1894 1895 1896 1897 1898

    bool_tensor = paddle.cast(x, 'bool')
    int_tensor = paddle.cast(bool_tensor, 'int64')
    return paddle.sum(int_tensor, axis=axis, keepdim=keepdim, name=name)


1899
@templatedoc(op_type="sum")
S
Steffy-zxf 已提交
1900
def add_n(inputs, name=None):
1901
    """
1902
    Sum one or more Tensor of the input.
1903

S
Steffy-zxf 已提交
1904 1905 1906
    For example:

    .. code-block:: text
1907

S
Steffy-zxf 已提交
1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
        Case 1:

            Input:
                input.shape = [2, 3]
                input = [[1, 2, 3],
                         [4, 5, 6]]

            Output:
                output.shape = [2, 3]
                output = [[1, 2, 3],
                          [4, 5, 6]]

        Case 2:
1921

S
Steffy-zxf 已提交
1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
            Input:
                First input:
                    input1.shape = [2, 3]
                    Input1 = [[1, 2, 3],
                              [4, 5, 6]]

                The second input:
                    input2.shape = [2, 3]
                    input2 = [[7, 8, 9],
                              [10, 11, 12]]

                Output:
                    output.shape = [2, 3]
                    output = [[8, 10, 12],
                              [14, 16, 18]]
1937 1938

    Args:
1939
        inputs (Tensor|list[Tensor]|tuple[Tensor]):  A Tensor or a list/tuple of Tensors. The shape and data type of the list/tuple elements should be consistent.
S
Steffy-zxf 已提交
1940
            Input can be multi-dimensional Tensor, and data types can be: float32, float64, int32, int64.
1941
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
1942 1943

    Returns:
S
Steffy-zxf 已提交
1944
        Tensor, the sum of input :math:`inputs` , its shape and data types are consistent with :math:`inputs`.
1945 1946 1947

    Examples:
        .. code-block:: python
1948

1949
            >>> import paddle
1950

1951 1952 1953 1954 1955 1956 1957
            >>> input0 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32')
            >>> input1 = paddle.to_tensor([[7, 8, 9], [10, 11, 12]], dtype='float32')
            >>> output = paddle.add_n([input0, input1])
            >>> output
            Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[8. , 10., 12.],
             [14., 16., 18.]])
1958
    """
1959
    if in_dynamic_mode():
1960 1961
        if isinstance(inputs, Variable):
            inputs = [inputs]
1962
        return _C_ops.add_n(inputs)
1963
    else:
1964 1965 1966
        if paddle.ir.core._use_new_ir_api():
            return paddle._ir_ops.add_n(inputs)

1967 1968
        helper = LayerHelper('add_n', **locals())
        check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n')
1969
        if isinstance(inputs, (list, tuple)):
1970 1971 1972 1973 1974
            if len(inputs) > 0:
                for input in inputs:
                    check_variable_and_dtype(
                        input,
                        "inputs",
1975 1976 1977 1978 1979 1980 1981 1982
                        [
                            'float16',
                            'float32',
                            'float64',
                            'int32',
                            'int64',
                            'uint16',
                        ],
1983 1984 1985 1986 1987 1988
                        'add_n',
                    )
        else:
            check_variable_and_dtype(
                inputs,
                "inputs",
1989
                ['float16', 'float32', 'float64', 'int32', 'int64', 'uint16'],
1990 1991
                'add_n',
            )
1992

1993 1994 1995 1996 1997 1998 1999 2000 2001
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype('inputs')
        )
        helper.append_op(
            type='sum',
            inputs={'X': inputs},
            outputs={'Out': out},
            attrs={'use_mkldnn': False},
        )
2002

2003
        return out
2004 2005


2006 2007 2008
def trunc(input, name=None):
    '''
    This API is used to returns a new tensor with the truncated integer values of input.
2009

2010 2011 2012
    Args:
        input (Tensor): The input tensor, it's data type should be int32, int64, float32, float64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2013

2014 2015
    Returns:
        Tensor: The output Tensor of trunc.
2016

2017 2018 2019
    Examples:
        .. code-block:: python

2020
            >>> import paddle
2021

2022 2023 2024 2025 2026 2027
            >>> input = paddle.to_tensor([[0.1, 1.5], [-0.2, -2.4]], 'float32')
            >>> output = paddle.trunc(input)
            >>> output
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[ 0.,  1.],
             [-0., -2.]])
2028
    '''
2029
    if in_dynamic_mode():
2030
        return _C_ops.trunc(input)
2031
    else:
2032 2033
        inputs = {"X": input}
        attrs = {}
2034

2035 2036 2037 2038 2039
        helper = LayerHelper("trunc", **locals())
        check_variable_and_dtype(
            input, 'X', ['int32', 'int64', 'float32', 'float64'], 'trunc'
        )
        out = helper.create_variable_for_type_inference(dtype=input.dtype)
2040

2041 2042 2043 2044
        helper.append_op(
            type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": out}
        )
        return out
2045 2046


2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
@inplace_apis_in_dygraph_only
def trunc_(input, name=None):
    r"""
    Inplace version of ``trunc`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_trunc`.
    """
    if in_dynamic_mode():
        return _C_ops.trunc_(input)


W
WuHaobo 已提交
2057
def mm(input, mat2, name=None):
2058
    """
S
swtkiwi 已提交
2059

2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
    Applies matrix multiplication to two tensors.

    Currently, the input tensors' rank can be any, but when the rank of any
    inputs is bigger than 3, this two inputs' rank should be equal.


    Also note that if the raw tensor :math:`x` or :math:`mat2` is rank-1 and
    nontransposed, the prepended or appended dimension :math:`1` will be
    removed after matrix multiplication.

    Args:
2071
        input (Tensor): The input tensor which is a Tensor.
N
Noel 已提交
2072
        mat2 (Tensor): The input tensor which is a Tensor.
2073
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2074 2075

    Returns:
N
Noel 已提交
2076
        Tensor: The product Tensor.
2077

W
wawltor 已提交
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
    ::

        * example 1:

        input: [B, ..., M, K], mat2: [B, ..., K, N]
        out: [B, ..., M, N]

        * example 2:

        input: [B, M, K], mat2: [B, K, N]
        out: [B, M, N]

        * example 3:

        input: [B, M, K], mat2: [K, N]
        out: [B, M, N]

        * example 4:

        input: [M, K], mat2: [K, N]
        out: [M, N]

        * example 5:

        input: [B, M, K], mat2: [K]
        out: [B, M]

        * example 6:

        input: [K], mat2: [K]
        out: [1]

2110 2111 2112
    Examples:
        .. code-block:: python

2113 2114 2115 2116 2117 2118 2119 2120 2121
            >>> import paddle
            >>> input = paddle.arange(1, 7).reshape((3, 2)).astype('float32')
            >>> mat2 = paddle.arange(1, 9).reshape((2, 4)).astype('float32')
            >>> out = paddle.mm(input, mat2)
            >>> out
            Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[11., 14., 17., 20.],
             [23., 30., 37., 44.],
             [35., 46., 57., 68.]])
2122

N
Noel 已提交
2123

2124
    """
2125
    if in_dynamic_mode():
2126
        return _C_ops.matmul(input, mat2, False, False)
2127
    else:
2128

2129 2130 2131 2132 2133
        def __check_input(x, y):
            var_names = {'x': x, 'y': y}
            for name, val in var_names.items():
                check_variable_and_dtype(
                    val, name, ['float16', 'float32', 'float64'], 'mm'
2134
                )
2135 2136 2137 2138 2139 2140
            x_shape = list(x.shape)
            y_shape = list(y.shape)
            if len(x_shape) == 1:
                x_shape = [1] + x_shape
            if len(y_shape) == 1:
                y_shape = y_shape + [1]
2141

2142 2143 2144
            # check the inner 2 dimensions
            if x_shape[-1] != y_shape[-2]:
                if not ((x_shape[-1] == -1) or (y_shape[-2] == -1)):
2145
                    raise ValueError(
2146 2147
                        "After performing an optional transpose, Input X's width should be "
                        "equal to Y's width for multiplication "
2148 2149 2150
                        "prerequisites. But received X's shape: {}, Y's shape: {}\n".format(
                            x_shape, y_shape
                        )
2151
                    )
2152

2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
            if len(y_shape) > 2 and len(x_shape) > 2:
                for i, dim_x in enumerate(x_shape[:-2]):
                    # don't check neg shape
                    if dim_x < 0 or y_shape[i] < 0:
                        continue
                    if dim_x != y_shape[i]:
                        raise ValueError(
                            "When the matrix is larger than 2 dimensions, the higher "
                            "dimensional values of the two matrices need to be equal. "
                            "But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
                            "Y's shape: %s.\n" % (i, i, x_shape, y_shape)
                        )

        __check_input(input, mat2)

        helper = LayerHelper('mm', **locals())
        out = helper.create_variable_for_type_inference(dtype=input.dtype)
        helper.append_op(
            type='matmul_v2',
            inputs={'X': input, 'Y': mat2},
            outputs={'Out': out},
        )
        return out
2176

2177

Y
yaoxuefeng 已提交
2178
def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
2179 2180 2181
    """
    **addmm**

2182
    Perform matrix multiplication for input $x$ and $y$.
2183 2184 2185 2186 2187 2188 2189 2190 2191
    $input$ is added to the final result.
    The equation is:

    ..  math::
        Out = alpha * x * y + beta * input

    $Input$, $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $input$.

    Args:
Y
yaoxuefeng 已提交
2192 2193 2194
        input (Tensor): The input Tensor to be added to the final result.
        x (Tensor): The first input Tensor for matrix multiplication.
        y (Tensor): The second input Tensor for matrix multiplication.
2195 2196
        beta (float, optional): Coefficient of $input$, default is 1.
        alpha (float, optional): Coefficient of $x*y$, default is 1.
2197
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2198 2199

    Returns:
2200
        Tensor: The output Tensor of addmm.
2201 2202

    Examples:
2203
        .. code-block:: python
2204

2205
            >>> import paddle
2206

2207 2208 2209
            >>> x = paddle.ones([2, 2])
            >>> y = paddle.ones([2, 2])
            >>> input = paddle.ones([2, 2])
Y
yaoxuefeng 已提交
2210

2211
            >>> out = paddle.addmm(input=input, x=x, y=y, beta=0.5, alpha=5.0)
Y
yaoxuefeng 已提交
2212

2213 2214 2215 2216
            >>> print(out)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[10.50000000, 10.50000000],
             [10.50000000, 10.50000000]])
2217
    """
Y
yaoxuefeng 已提交
2218 2219 2220
    input_shape = input.shape
    x_shape = x.shape
    y_shape = y.shape
2221
    if not len(x_shape) == len(y_shape) == 2:
2222
        raise ValueError(
2223 2224 2225 2226
            "The dimention of x, y should be 2 but receive x's shape: {}, y's shape: {}".format(
                x_shape, y_shape
            )
        )
Y
yaoxuefeng 已提交
2227
    if x_shape[1] != y_shape[0]:
2228
        raise ValueError(
2229 2230 2231 2232
            "The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(
                x_shape, y_shape
            )
        )
2233 2234 2235
    if len(input_shape) == 2:
        if input_shape[0] != x_shape[0]:
            if input_shape[0] != 1:
2236
                raise ValueError(
2237 2238 2239 2240
                    "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(
                        input_shape[0]
                    )
                )
2241
            if input_shape[1] != y_shape[1] and input_shape[1] != 1:
2242
                raise ValueError(
2243 2244 2245 2246
                    "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(
                        input_shape[1]
                    )
                )
2247 2248
        if input_shape[1] != y_shape[1]:
            if input_shape[1] != 1:
2249
                raise ValueError(
2250 2251 2252 2253
                    "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(
                        input_shape[1]
                    )
                )
2254 2255
    elif len(input_shape) == 1:
        if input_shape[0] not in (y_shape[1], 1):
2256
            raise ValueError(
2257 2258 2259 2260
                "The input's shape: {} is not broadcastable with [x.shape[0], y.shape[1]]: [{},{}]".format(
                    input_shape, x_shape[0], y_shape[1]
                )
            )
2261
    else:
2262
        raise ValueError(
2263 2264 2265 2266
            "The dimention of input should be 2 or 1 but receive input's shape: {}".format(
                input_shape
            )
        )
Y
yaoxuefeng 已提交
2267

2268
    if in_dynamic_mode():
2269
        return _C_ops.addmm(input, x, y, beta, alpha)
J
Jiabin Yang 已提交
2270
    else:
2271 2272
        inputs = {'Input': input, "X": x, "Y": y}
        attrs = {'Alpha': alpha, 'Beta': beta}
2273

2274 2275
        helper = LayerHelper("addmm", **locals())
        check_variable_and_dtype(
2276 2277 2278 2279 2280 2281 2282
            input, 'Input', ['float16', 'float32', 'float64', 'uint16'], 'addmm'
        )
        check_variable_and_dtype(
            x, 'X', ['float16', 'float32', 'float64', 'uint16'], 'addmm'
        )
        check_variable_and_dtype(
            y, 'Y', ['float16', 'float32', 'float64', 'uint16'], 'addmm'
2283 2284
        )
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
2285

2286 2287 2288 2289
        helper.append_op(
            type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out}
        )
        return out
2290

2291

G
GGBond8488 已提交
2292 2293 2294 2295
@inplace_apis_in_dygraph_only
def addmm_(input, x, y, beta=1.0, alpha=1.0, name=None):
    """
    Inplace version of ``addmm`` API, the output Tensor will be inplaced with input ``x``.
2296
    Please refer to :ref:`api_paddle_addmm`.
G
GGBond8488 已提交
2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
    """
    input_shape = input.shape
    x_shape = x.shape
    y_shape = y.shape
    if not len(x_shape) == len(y_shape) == 2:
        raise ValueError(
            "The dimention of x, y should be 2 but receive x's shape: {}, y's shape: {}".format(
                x_shape, y_shape
            )
        )
    if x_shape[1] != y_shape[0]:
        raise ValueError(
            "The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(
                x_shape, y_shape
            )
        )
    if len(input_shape) == 2:
        if input_shape[0] != x_shape[0]:
            if input_shape[0] != 1:
                raise ValueError(
                    "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(
                        input_shape[0]
                    )
                )
            if input_shape[1] != y_shape[1] and input_shape[1] != 1:
                raise ValueError(
                    "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(
                        input_shape[1]
                    )
                )
        if input_shape[1] != y_shape[1]:
            if input_shape[1] != 1:
                raise ValueError(
                    "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(
                        input_shape[1]
                    )
                )
    elif len(input_shape) == 1:
        if input_shape[0] not in (y_shape[1], 1):
            raise ValueError(
                "The input's shape: {} is not broadcastable with [x.shape[0], y.shape[1]]: [{},{}]".format(
                    input_shape, x_shape[0], y_shape[1]
                )
            )
    else:
        raise ValueError(
            "The dimention of input should be 2 or 1 but receive input's shape: {}".format(
                input_shape
            )
        )

    if in_dynamic_mode():
        return _C_ops.addmm_(input, x, y, beta, alpha)


S
seemingwang 已提交
2352 2353 2354 2355 2356 2357 2358
def renorm(x, p, axis, max_norm):
    """
    **renorm**

    This operator is used to calculate the p-norm along the axis,
    suppose the input-shape on axis dimension has the value of T, then
    the tensor is split into T parts, the p-norm should be calculated for each
2359
    part, if the p-norm for part i is larger than max-norm, then each element
S
seemingwang 已提交
2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372
    in part i should be re-normalized at the same scale so that part-i' p-norm equals
    max-norm exactly, otherwise part-i stays unchanged.

    Args:
        x (Tensor): The input Tensor
        p (float): The power of the norm operation.
        axis (int): the dimension to slice the tensor.
        max-norm (float): the maximal norm limit.

    Returns:
        Tensor: the renorm Tensor.

    Examples:
2373
        .. code-block:: python
2374

2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
            >>> import paddle
            >>> input = [[[2.0, 2, -2], [3, 0.3, 3]],
            ...          [[2, -8, 2],   [3.1, 3.7, 3]]]
            >>> x = paddle.to_tensor(input,dtype='float32')
            >>> y = paddle.renorm(x, 1.0, 2, 2.05)
            >>> print(y)
            Tensor(shape=[2, 2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[[ 0.40594056,  0.29285714, -0.41000000],
              [ 0.60891086,  0.04392857,  0.61500001]],
             [[ 0.40594056, -1.17142856,  0.41000000],
              [ 0.62920785,  0.54178572,  0.61500001]]])
2386

S
seemingwang 已提交
2387 2388 2389
    """
    input_shape = x.shape
    if not axis < len(input_shape):
2390 2391
        raise ValueError(
            "the axis:{} should be less then the shape's size {}:{}".format(
2392 2393 2394
                axis, len(input_shape), input_shape
            )
        )
2395
    if not axis >= 0:
S
seemingwang 已提交
2396
        if not axis >= -1 * len(input_shape):
2397
            raise ValueError(
2398 2399 2400 2401
                "the axis:{} should not be less than -1 * length of input_shape:{}".format(
                    axis, -1 * len(input_shape)
                )
            )
S
seemingwang 已提交
2402
        axis = axis + len(input_shape)
2403
    if in_dynamic_mode():
2404
        out = _C_ops.renorm(x, p, axis, max_norm)
S
seemingwang 已提交
2405
        return out
2406
    else:
2407
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'renorm')
2408 2409
        inputs = {'X': x}
        attrs = {'p': p, 'axis': axis, 'max_norm': max_norm}
S
seemingwang 已提交
2410

2411 2412
        helper = LayerHelper("renorm", **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
S
seemingwang 已提交
2413

2414 2415 2416 2417
        helper.append_op(
            type="renorm", inputs=inputs, attrs=attrs, outputs={"Out": out}
        )
        return out
S
seemingwang 已提交
2418

2419

2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
@inplace_apis_in_dygraph_only
def renorm_(x, p, axis, max_norm):
    """
    Inplace version of ``renorm`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_renorm`.
    """
    input_shape = x.shape
    if not axis < len(input_shape):
        raise ValueError(
            "the axis:{} should be less then the shape's size {}:{}".format(
                axis, len(input_shape), input_shape
            )
        )
    if not axis >= 0:
        if not axis >= -1 * len(input_shape):
            raise ValueError(
                "the axis:{} should not be less than -1 * length of input_shape:{}".format(
                    axis, -1 * len(input_shape)
                )
            )
        axis = axis + len(input_shape)
    if in_dynamic_mode():
        out = _C_ops.renorm_(x, p, axis, max_norm)
        return out


Z
zhiboniu 已提交
2446 2447 2448 2449
def inner(x, y, name=None):
    """

    Inner product of two input Tensor.
2450

Z
zhiboniu 已提交
2451 2452 2453 2454 2455
    Ordinary inner product for 1-D Tensors, in higher dimensions a sum product over the last axes.

    Args:
        x (Tensor): An N-D Tensor or a Scalar Tensor. If its not a scalar Tensor, its last dimensions must match y's.
        y (Tensor): An N-D Tensor or a Scalar Tensor. If its not a scalar Tensor, its last dimensions must match x's.
2456
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Z
zhiboniu 已提交
2457 2458 2459 2460 2461 2462 2463

    Returns:
        Tensor: The inner-product Tensor, the output shape is x.shape[:-1] + y.shape[:-1].

    Examples:
        .. code-block:: python

2464 2465 2466 2467 2468 2469 2470 2471
            >>> import paddle
            >>> x = paddle.arange(1, 7).reshape((2, 3)).astype('float32')
            >>> y = paddle.arange(1, 10).reshape((3, 3)).astype('float32')
            >>> out = paddle.inner(x, y)
            >>> print(out)
            Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[14. , 32. , 50. ],
             [32. , 77. , 122.]])
Z
zhiboniu 已提交
2472 2473 2474 2475 2476 2477 2478 2479


    """
    if x.size == 1 or y.size == 1:
        return multiply(x, y)
    else:
        xshape = x.shape
        yshape = y.shape
2480
        dstshape = list(xshape[:-1]) + list(yshape[:-1])
2481

Z
zhiboniu 已提交
2482 2483 2484
        nx = x.reshape((-1, xshape[-1]))
        ny = y.reshape((-1, yshape[-1]))

2485
        if in_dynamic_mode():
2486
            return _C_ops.matmul(nx, ny.T, False, False).reshape(dstshape)
2487
        else:
Z
zhiboniu 已提交
2488

2489 2490 2491 2492 2493
            def __check_input(x, y):
                var_names = {'x': x, 'y': y}
                for name, val in var_names.items():
                    check_variable_and_dtype(
                        val, name, ['float16', 'float32', 'float64'], 'inner'
2494
                    )
2495 2496 2497 2498 2499 2500 2501 2502 2503
                x_shape = list(xshape)
                y_shape = list(yshape)

                # check the inner 2 dimensions
                if x_shape[-1] != y_shape[-1]:
                    if not ((x_shape[-1] == -1) or (y_shape[-1] == -1)):
                        raise ValueError(
                            "After performing an optional transpose, Input X's last dim should be "
                            "equal to Y's last dim for multiplication "
2504 2505 2506
                            "prerequisites. But received X's shape: {}, Y's shape: {}\n".format(
                                x_shape, y_shape
                            )
2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518
                        )

            __check_input(nx, ny)

            helper = LayerHelper('inner', **locals())
            out = helper.create_variable_for_type_inference(dtype=nx.dtype)
            helper.append_op(
                type='matmul_v2',
                inputs={'X': nx, 'Y': ny.T},
                outputs={'Out': out},
            )
            return out.reshape(dstshape)
Z
zhiboniu 已提交
2519 2520 2521 2522 2523 2524 2525 2526 2527 2528


def outer(x, y, name=None):
    """

    Outer product of two Tensors.

    Input is flattened if not already 1-dimensional.

    Args:
2529 2530
        x (Tensor): An N-D Tensor or a Scalar Tensor.
        y (Tensor): An N-D Tensor or a Scalar Tensor.
2531
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Z
zhiboniu 已提交
2532 2533 2534 2535 2536 2537 2538

    Returns:
        Tensor: The outer-product Tensor.

    Examples:
        .. code-block:: python

2539 2540 2541 2542 2543 2544 2545 2546 2547
            >>> import paddle
            >>> x = paddle.arange(1, 4).astype('float32')
            >>> y = paddle.arange(1, 6).astype('float32')
            >>> out = paddle.outer(x, y)
            >>> print(out)
            Tensor(shape=[3, 5], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[1. , 2. , 3. , 4. , 5. ],
             [2. , 4. , 6. , 8. , 10.],
             [3. , 6. , 9. , 12., 15.]])
Z
zhiboniu 已提交
2548 2549 2550 2551 2552 2553


    """
    nx = x.reshape((-1, 1))
    ny = y.reshape((1, -1))

2554
    if in_dynamic_mode():
2555
        return _C_ops.matmul(nx, ny, False, False)
2556
    else:
Z
zhiboniu 已提交
2557

2558 2559 2560 2561
        def __check_input(x, y):
            var_names = {'x': x, 'y': y}
            for name, val in var_names.items():
                check_variable_and_dtype(
2562 2563 2564 2565
                    val,
                    name,
                    ['float16', 'float32', 'float64', 'int32', 'int64'],
                    'outer',
2566
                )
Z
zhiboniu 已提交
2567

2568
        __check_input(nx, ny)
Z
zhiboniu 已提交
2569

2570 2571 2572 2573 2574 2575
        helper = LayerHelper('outer', **locals())
        out = helper.create_variable_for_type_inference(dtype=nx.dtype)
        helper.append_op(
            type='matmul_v2', inputs={'X': nx, 'Y': ny}, outputs={'Out': out}
        )
        return out
Z
zhiboniu 已提交
2576 2577


2578
def logsumexp(x, axis=None, keepdim=False, name=None):
2579
    r"""
2580
    Calculates the log of the sum of exponentials of ``x`` along ``axis`` .
2581

2582
    .. math::
2583
       logsumexp(x) = \log\sum exp(x)
2584

2585
    Args:
2586
        x (Tensor): The input Tensor with data type float16, float32 or float64, which
S
Shang Zhizhou 已提交
2587
            have no more than 4 dimensions.
2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603
        axis (int|list|tuple, optional): The axis along which to perform
            logsumexp calculations. ``axis`` should be int, list(int) or
            tuple(int). If ``axis`` is a list/tuple of dimension(s), logsumexp
            is calculated along all element(s) of ``axis`` . ``axis`` or
            element(s) of ``axis`` should be in range [-D, D), where D is the
            dimensions of ``x`` . If ``axis`` or element(s) of ``axis`` is
            less than 0, it works the same way as :math:`axis + D` . If
            ``axis`` is None, logsumexp is calculated along all elements of
            ``x``. Default is None.
        keepdim (bool, optional): Whether to reserve the reduced dimension(s)
            in the output Tensor. If ``keep_dim`` is True, the dimensions of
            the output Tensor is the same as ``x`` except in the reduced
            dimensions(it is of size 1 in this case). Otherwise, the shape of
            the output Tensor is squeezed in ``axis`` . Default is False.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
2604

2605
    Returns:
2606 2607
        Tensor, results of logsumexp along ``axis`` of ``x``, with the same data
        type as ``x``.
2608

2609
    Examples:
2610

2611
    .. code-block:: python
2612

2613
        >>> import paddle
2614

2615 2616 2617 2618 2619 2620 2621 2622 2623
        >>> x = paddle.to_tensor([[-1.5, 0., 2.], [3., 1.2, -2.4]])
        >>> out1 = paddle.logsumexp(x)
        >>> out1
        Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
        3.46912265)
        >>> out2 = paddle.logsumexp(x, 1)
        >>> out2
        Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
        [2.15317822, 3.15684605])
2624 2625

    """
2626
    reduce_all, axis = _get_reduce_axis(axis, x)
2627

2628
    if in_dynamic_mode():
2629
        return _C_ops.logsumexp(x, axis, keepdim, reduce_all)
2630
    else:
2631
        check_variable_and_dtype(
2632
            x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'logsumexp'
2633
        )
2634 2635 2636 2637 2638 2639

        helper = LayerHelper('logsumexp', **locals())
        attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all': reduce_all}
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(
            type='logsumexp', inputs={'X': x}, outputs={'Out': out}, attrs=attrs
2640
        )
2641
        return out
2642

S
swtkiwi 已提交
2643

2644 2645
def inverse(x, name=None):
    """
2646 2647 2648 2649 2650
    Takes the inverse of the square matrix. A square matrix is a matrix with
    the same number of rows and columns. The input can be a square matrix
    (2-D Tensor) or batches of square matrices.

    Args:
2651
        x (Tensor): The input tensor. The last two
2652 2653 2654
            dimensions should be equal. When the number of dimensions is
            greater than 2, it is treated as batches of square matrix. The data
            type can be float32 and float64.
2655
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2656 2657

    Returns:
2658
        Tensor: A Tensor holds the inverse of x. The shape and data type
2659
                        is the same as x.
2660 2661 2662 2663

    Examples:
        .. code-block:: python

2664
            >>> import paddle
2665

2666 2667 2668 2669 2670 2671
            >>> mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32')
            >>> inv = paddle.inverse(mat)
            >>> print(inv)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.50000000, 0.        ],
             [0.        , 0.50000000]])
2672 2673

    """
2674
    if in_dynamic_mode():
W
wanghuancoder 已提交
2675
        return _C_ops.inverse(x)
2676
    else:
2677

2678 2679 2680 2681 2682 2683 2684 2685
        def _check_input(x):
            check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'inverse')
            if len(x.shape) < 2:
                raise ValueError(
                    "The input of inverse is expected to be a Tensor whose number "
                    "of dimensions is no less than 2. But reviced: %d, "
                    "x's shape: %s." % (len(x.shape), x.shape)
                )
2686

2687 2688 2689 2690 2691 2692 2693
        _check_input(x)
        helper = LayerHelper('inverse', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='inverse', inputs={'Input': [x]}, outputs={'Output': [out]}
        )
        return out
2694

2695

2696
def max(x, axis=None, keepdim=False, name=None):
2697
    """
S
swtkiwi 已提交
2698

2699
    Computes the maximum of tensor elements over the given axis.
2700

T
Tao Luo 已提交
2701 2702
    Note:
        The difference between max and amax is: If there are multiple maximum elements,
2703
        amax evenly distributes gradient between these equal values,
T
Tao Luo 已提交
2704 2705 2706
        while max propagates gradient to all of them.


2707
    Args:
2708 2709
        x (Tensor): A tensor, the data type is float32, float64, int32, int64.
        axis (int|list|tuple, optional): The axis along which the maximum is computed.
2710
            If :attr:`None`, compute the maximum over all elements of
N
Noel 已提交
2711
            `x` and return a Tensor with a single element,
2712 2713
            otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`.
            If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
2714
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
2715
            output Tensor. The result tensor will have one fewer dimension
2716
            than the `x` unless :attr:`keepdim` is true, default
2717
            value is False.
2718
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2719 2720

    Returns:
2721
        Tensor, results of maximum on the specified axis of input tensor,
2722
        it's data type is the same as `x`.
2723 2724 2725

    Examples:
        .. code-block:: python
2726

2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806
            >>> import paddle

            >>> # data_x is a Tensor with shape [2, 4]
            >>> # the axis is a int element
            >>> x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
            ...                       [0.1, 0.2, 0.6, 0.7]],
            ...                       dtype='float64', stop_gradient=False)
            >>> result1 = paddle.max(x)
            >>> result1.backward()
            >>> result1
            Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=False,
            0.90000000)
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0., 0., 0., 1.],
             [0., 0., 0., 0.]])

            >>> x.clear_grad()
            >>> result2 = paddle.max(x, axis=0)
            >>> result2.backward()
            >>> result2
            Tensor(shape=[4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.20000000, 0.30000000, 0.60000000, 0.90000000])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[1., 1., 0., 1.],
             [0., 0., 1., 0.]])

            >>> x.clear_grad()
            >>> result3 = paddle.max(x, axis=-1)
            >>> result3.backward()
            >>> result3
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.90000000, 0.70000000])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0., 0., 0., 1.],
             [0., 0., 0., 1.]])

            >>> x.clear_grad()
            >>> result4 = paddle.max(x, axis=1, keepdim=True)
            >>> result4.backward()
            >>> result4
            Tensor(shape=[2, 1], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.90000000],
             [0.70000000]])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0., 0., 0., 1.],
             [0., 0., 0., 1.]])

            >>> # data_y is a Tensor with shape [2, 2, 2]
            >>> # the axis is list
            >>> y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
            ...                         [[5.0, 6.0], [7.0, 8.0]]],
            ...                         dtype='float64', stop_gradient=False)
            >>> result5 = paddle.max(y, axis=[1, 2])
            >>> result5.backward()
            >>> result5
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [4., 8.])
            >>> y.grad
            Tensor(shape=[2, 2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[[0., 0.],
              [0., 1.]],
             [[0., 0.],
              [0., 1.]]])

            >>> y.clear_grad()
            >>> result6 = paddle.max(y, axis=[0, 1])
            >>> result6.backward()
            >>> result6
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [7., 8.])
            >>> y.grad
            Tensor(shape=[2, 2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[[0., 0.],
              [0., 0.]],
             [[0., 0.],
              [1., 1.]]])
2807 2808
    """

2809
    if in_dynamic_mode():
2810
        return _C_ops.max(x, axis, keepdim)
2811 2812 2813 2814
    else:
        reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
        helper = LayerHelper('max', **locals())
        check_variable_and_dtype(
2815 2816 2817 2818
            x,
            'x',
            ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'],
            'max',
2819
        )
2820 2821
        if not isinstance(axis, Variable) and paddle.utils._contain_var(axis):
            axis = paddle.utils._convert_to_tensor_list(axis)
2822

2823 2824 2825 2826 2827 2828 2829 2830
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_max',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all},
        )
        return out
2831

2832

2833
def min(x, axis=None, keepdim=False, name=None):
2834
    """
S
swtkiwi 已提交
2835

2836
    Computes the minimum of tensor elements over the given axis
2837

T
Tao Luo 已提交
2838 2839
    Note:
        The difference between min and amin is: If there are multiple minimum elements,
2840
        amin evenly distributes gradient between these equal values,
T
Tao Luo 已提交
2841 2842
        while min propagates gradient to all of them.

2843
    Args:
2844 2845
        x (Tensor): A tensor, the data type is float32, float64, int32, int64.
        axis (int|list|tuple, optional): The axis along which the minimum is computed.
2846
            If :attr:`None`, compute the minimum over all elements of
N
Noel 已提交
2847
            `x` and return a Tensor with a single element,
2848 2849
            otherwise must be in the range :math:`[-x.ndim, x.ndim)`.
            If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
2850
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
2851
            output Tensor. The result tensor will have one fewer dimension
2852
            than the `x` unless :attr:`keepdim` is true, default
2853
            value is False.
2854
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2855

2856
    Returns:
2857
        Tensor, results of minimum on the specified axis of input tensor,
2858
        it's data type is the same as input's Tensor.
2859

2860 2861 2862
    Examples:
        .. code-block:: python

2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
            >>> import paddle

            >>> # data_x is a Tensor with shape [2, 4]
            >>> # the axis is a int element
            >>> x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
            ...                       [0.1, 0.2, 0.6, 0.7]],
            ...                       dtype='float64', stop_gradient=False)
            >>> result1 = paddle.min(x)
            >>> result1.backward()
            >>> result1
            Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=False,
            0.10000000)
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0., 0., 0., 0.],
             [1., 0., 0., 0.]])

            >>> x.clear_grad()
            >>> result2 = paddle.min(x, axis=0)
            >>> result2.backward()
            >>> result2
            Tensor(shape=[4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.10000000, 0.20000000, 0.50000000, 0.70000000])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0., 0., 1., 0.],
             [1., 1., 0., 1.]])

            >>> x.clear_grad()
            >>> result3 = paddle.min(x, axis=-1)
            >>> result3.backward()
            >>> result3
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.20000000, 0.10000000])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[1., 0., 0., 0.],
             [1., 0., 0., 0.]])

            >>> x.clear_grad()
            >>> result4 = paddle.min(x, axis=1, keepdim=True)
            >>> result4.backward()
            >>> result4
            Tensor(shape=[2, 1], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.20000000],
             [0.10000000]])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[1., 0., 0., 0.],
             [1., 0., 0., 0.]])

            >>> # data_y is a Tensor with shape [2, 2, 2]
            >>> # the axis is list
            >>> y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
            ...                       [[5.0, 6.0], [7.0, 8.0]]],
            ...                       dtype='float64', stop_gradient=False)
            >>> result5 = paddle.min(y, axis=[1, 2])
            >>> result5.backward()
            >>> result5
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [1., 5.])
            >>> y.grad
            Tensor(shape=[2, 2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[[1., 0.],
              [0., 0.]],
             [[1., 0.],
              [0., 0.]]])

            >>> y.clear_grad()
            >>> result6 = paddle.min(y, axis=[0, 1])
            >>> result6.backward()
            >>> result6
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [1., 2.])
            >>> y.grad
            Tensor(shape=[2, 2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[[1., 1.],
              [0., 0.]],
             [[0., 0.],
              [0., 0.]]])
2943
    """
2944

2945
    if in_dynamic_mode():
2946
        return _C_ops.min(x, axis, keepdim)
2947 2948 2949 2950
    else:
        reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
        helper = LayerHelper('min', **locals())
        check_variable_and_dtype(
2951 2952 2953 2954
            x,
            'x',
            ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'],
            'min',
2955
        )
2956

2957 2958 2959 2960 2961 2962 2963 2964
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_min',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all},
        )
        return out
2965

2966

T
Tao Luo 已提交
2967 2968 2969 2970 2971 2972
def amax(x, axis=None, keepdim=False, name=None):
    """
    Computes the maximum of tensor elements over the given axis.

    Note:
        The difference between max and amax is: If there are multiple maximum elements,
2973
        amax evenly distributes gradient between these equal values,
T
Tao Luo 已提交
2974 2975 2976
        while max propagates gradient to all of them.

    Args:
2977
        x (Tensor): A tensor, the data type is float32, float64, int32, int64,
2978
            the dimension is no more than 4.
2979
        axis (int|list|tuple, optional): The axis along which the maximum is computed.
T
Tao Luo 已提交
2980 2981 2982 2983
            If :attr:`None`, compute the maximum over all elements of
            `x` and return a Tensor with a single element,
            otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`.
            If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
2984
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
T
Tao Luo 已提交
2985 2986 2987
            output Tensor. The result tensor will have one fewer dimension
            than the `x` unless :attr:`keepdim` is true, default
            value is False.
2988
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
T
Tao Luo 已提交
2989 2990 2991 2992 2993 2994 2995 2996

    Returns:
        Tensor, results of maximum on the specified axis of input tensor,
        it's data type is the same as `x`.

    Examples:
        .. code-block:: python

2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092
            >>> import paddle
            >>> # data_x is a Tensor with shape [2, 4] with multiple maximum elements
            >>> # the axis is a int element

            >>> x = paddle.to_tensor([[0.1, 0.9, 0.9, 0.9],
            ...                         [0.9, 0.9, 0.6, 0.7]],
            ...                         dtype='float64', stop_gradient=False)
            >>> # There are 5 maximum elements:
            >>> # 1) amax evenly distributes gradient between these equal values,
            >>> #    thus the corresponding gradients are 1/5=0.2;
            >>> # 2) while max propagates gradient to all of them,
            >>> #    thus the corresponding gradient are 1.
            >>> result1 = paddle.amax(x)
            >>> result1.backward()
            >>> result1
            Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=False,
            0.90000000)
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.        , 0.20000000, 0.20000000, 0.20000000],
             [0.20000000, 0.20000000, 0.        , 0.        ]])

            >>> x.clear_grad()
            >>> result1_max = paddle.max(x)
            >>> result1_max.backward()
            >>> result1_max
            Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=False,
            0.90000000)
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0., 1., 1., 1.],
             [1., 1., 0., 0.]])

            >>> x.clear_grad()
            >>> result2 = paddle.amax(x, axis=0)
            >>> result2.backward()
            >>> result2
            Tensor(shape=[4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.90000000, 0.90000000, 0.90000000, 0.90000000])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.        , 0.50000000, 1.        , 1.        ],
             [1.        , 0.50000000, 0.        , 0.        ]])

            >>> x.clear_grad()
            >>> result3 = paddle.amax(x, axis=-1)
            >>> result3.backward()
            >>> result3
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.90000000, 0.90000000])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.        , 0.33333333, 0.33333333, 0.33333333],
             [0.50000000, 0.50000000, 0.        , 0.        ]])

            >>> x.clear_grad()
            >>> result4 = paddle.amax(x, axis=1, keepdim=True)
            >>> result4.backward()
            >>> result4
            Tensor(shape=[2, 1], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.90000000],
             [0.90000000]])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.        , 0.33333333, 0.33333333, 0.33333333],
             [0.50000000, 0.50000000, 0.        , 0.        ]])

            >>> # data_y is a Tensor with shape [2, 2, 2]
            >>> # the axis is list
            >>> y = paddle.to_tensor([[[0.1, 0.9], [0.9, 0.9]],
            ...                         [[0.9, 0.9], [0.6, 0.7]]],
            ...                         dtype='float64', stop_gradient=False)
            >>> result5 = paddle.amax(y, axis=[1, 2])
            >>> result5.backward()
            >>> result5
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.90000000, 0.90000000])
            >>> y.grad
            Tensor(shape=[2, 2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[[0.        , 0.33333333],
              [0.33333333, 0.33333333]],
             [[0.50000000, 0.50000000],
              [0.        , 0.        ]]])

            >>> y.clear_grad()
            >>> result6 = paddle.amax(y, axis=[0, 1])
            >>> result6.backward()
            >>> result6
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.90000000, 0.90000000])
            >>> y.grad
            Tensor(shape=[2, 2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[[0.        , 0.33333333],
              [0.50000000, 0.33333333]],
             [[0.50000000, 0.33333333],
              [0.        , 0.        ]]])
T
Tao Luo 已提交
3093
    """
3094
    if in_dynamic_mode():
3095
        return _C_ops.amax(x, axis, keepdim)
3096

3097 3098 3099 3100 3101
    else:
        reduce_all, axis = _get_reduce_axis(axis, x)
        helper = LayerHelper('amax', **locals())
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'amax'
3102
        )
T
Tao Luo 已提交
3103

3104 3105 3106 3107 3108 3109 3110 3111
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_amax',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all},
        )
        return out
T
Tao Luo 已提交
3112

3113

T
Tao Luo 已提交
3114 3115 3116 3117 3118 3119 3120
def amin(x, axis=None, keepdim=False, name=None):
    """

    Computes the minimum of tensor elements over the given axis

    Note:
        The difference between min and amin is: If there are multiple minimum elements,
3121
        amin evenly distributes gradient between these equal values,
T
Tao Luo 已提交
3122 3123 3124
        while min propagates gradient to all of them.

    Args:
3125
        x (Tensor): A tensor, the data type is float32, float64, int32, int64,
3126
            the dimension is no more than 4.
3127
        axis (int|list|tuple, optional): The axis along which the minimum is computed.
T
Tao Luo 已提交
3128 3129 3130 3131
            If :attr:`None`, compute the minimum over all elements of
            `x` and return a Tensor with a single element,
            otherwise must be in the range :math:`[-x.ndim, x.ndim)`.
            If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
3132
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
T
Tao Luo 已提交
3133 3134 3135
            output Tensor. The result tensor will have one fewer dimension
            than the `x` unless :attr:`keepdim` is true, default
            value is False.
3136
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
T
Tao Luo 已提交
3137 3138 3139 3140 3141 3142 3143 3144

    Returns:
        Tensor, results of minimum on the specified axis of input tensor,
        it's data type is the same as input's Tensor.

    Examples:
        .. code-block:: python

3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240
            >>> import paddle
            >>> # data_x is a Tensor with shape [2, 4] with multiple minimum elements
            >>> # the axis is a int element

            >>> x = paddle.to_tensor([[0.2, 0.1, 0.1, 0.1],
            ...                         [0.1, 0.1, 0.6, 0.7]],
            ...                         dtype='float64', stop_gradient=False)
            >>> # There are 5 minimum elements:
            >>> # 1) amin evenly distributes gradient between these equal values,
            >>> #    thus the corresponding gradients are 1/5=0.2;
            >>> # 2) while min propagates gradient to all of them,
            >>> #    thus the corresponding gradient are 1.
            >>> result1 = paddle.amin(x)
            >>> result1.backward()
            >>> result1
            Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=False,
            0.10000000)
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.        , 0.20000000, 0.20000000, 0.20000000],
             [0.20000000, 0.20000000, 0.        , 0.        ]])

            >>> x.clear_grad()
            >>> result1_min = paddle.min(x)
            >>> result1_min.backward()
            >>> result1_min
            Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=False,
            0.10000000)
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0., 1., 1., 1.],
             [1., 1., 0., 0.]])

            >>> x.clear_grad()
            >>> result2 = paddle.amin(x, axis=0)
            >>> result2.backward()
            >>> result2
            Tensor(shape=[4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.10000000, 0.10000000, 0.10000000, 0.10000000])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.        , 0.50000000, 1.        , 1.        ],
             [1.        , 0.50000000, 0.        , 0.        ]])

            >>> x.clear_grad()
            >>> result3 = paddle.amin(x, axis=-1)
            >>> result3.backward()
            >>> result3
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.10000000, 0.10000000])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.        , 0.33333333, 0.33333333, 0.33333333],
             [0.50000000, 0.50000000, 0.        , 0.        ]])

            >>> x.clear_grad()
            >>> result4 = paddle.amin(x, axis=1, keepdim=True)
            >>> result4.backward()
            >>> result4
            Tensor(shape=[2, 1], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.10000000],
             [0.10000000]])
            >>> x.grad
            Tensor(shape=[2, 4], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[0.        , 0.33333333, 0.33333333, 0.33333333],
             [0.50000000, 0.50000000, 0.        , 0.        ]])

            >>> # data_y is a Tensor with shape [2, 2, 2]
            >>> # the axis is list
            >>> y = paddle.to_tensor([[[0.2, 0.1], [0.1, 0.1]],
            ...                       [[0.1, 0.1], [0.6, 0.7]]],
            ...                       dtype='float64', stop_gradient=False)
            >>> result5 = paddle.amin(y, axis=[1, 2])
            >>> result5.backward()
            >>> result5
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.10000000, 0.10000000])
            >>> y.grad
            Tensor(shape=[2, 2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[[0.        , 0.33333333],
              [0.33333333, 0.33333333]],
             [[0.50000000, 0.50000000],
              [0.        , 0.        ]]])

            >>> y.clear_grad()
            >>> result6 = paddle.amin(y, axis=[0, 1])
            >>> result6.backward()
            >>> result6
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [0.10000000, 0.10000000])
            >>> y.grad
            Tensor(shape=[2, 2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
            [[[0.        , 0.33333333],
              [0.50000000, 0.33333333]],
             [[0.50000000, 0.33333333],
              [0.        , 0.        ]]])
T
Tao Luo 已提交
3241
    """
3242
    if in_dynamic_mode():
3243
        return _C_ops.amin(x, axis, keepdim)
3244

3245 3246 3247 3248 3249
    else:
        reduce_all, axis = _get_reduce_axis(axis, x)
        helper = LayerHelper('amin', **locals())
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'amin'
3250
        )
T
Tao Luo 已提交
3251

3252 3253 3254 3255 3256 3257 3258 3259
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_amin',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all},
        )
        return out
T
Tao Luo 已提交
3260

3261

W
WuHaobo 已提交
3262
def log1p(x, name=None):
3263
    r"""
3264
    Calculates the natural log of the given input tensor, element-wise.
N
Noel 已提交
3265

3266
    .. math::
3267
        Out = \ln(x+1)
S
Steffy-zxf 已提交
3268

3269
    Args:
3270
        x (Tensor): Input Tensor. Must be one of the following types: int32, int64, float16, bfloat16, float32, float64.
3271
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
3272

3273
    Returns:
S
Steffy-zxf 已提交
3274
        Tensor, the natural log of the input Tensor computed element-wise.
3275

3276 3277
    Examples:
        .. code-block:: python
S
Steffy-zxf 已提交
3278

3279
            >>> import paddle
S
Steffy-zxf 已提交
3280

3281 3282 3283 3284 3285 3286
            >>> data = paddle.to_tensor([[0], [1]], dtype='float32')
            >>> res = paddle.log1p(data)
            >>> res
            Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.        ],
             [0.69314718]])
3287 3288
    """

3289
    if in_dynamic_mode():
W
wanghuancoder 已提交
3290
        return _C_ops.log1p(x)
3291
    else:
3292
        check_variable_and_dtype(
3293 3294 3295 3296
            x,
            'x',
            ['int32', 'int64', 'float16', 'uint16', 'float32', 'float64'],
            "log1p",
3297
        )
3298 3299 3300 3301 3302 3303
        inputs = {'X': [x]}
        helper = LayerHelper('log1p', **locals())
        dtype = helper.input_dtype(input_param_name='x')
        out = helper.create_variable_for_type_inference(dtype)
        helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
        return out
B
Bai Yifan 已提交
3304

3305

3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316
@inplace_apis_in_dygraph_only
def log1p_(x, name=None):
    r"""
    Inplace version of ``log1p`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_log1p`.
    """

    if in_dynamic_mode():
        return _C_ops.log1p_(x)


J
joejiong 已提交
3317
def log2(x, name=None):
3318
    r"""
J
joejiong 已提交
3319 3320 3321 3322
    Calculates the log to the base 2 of the given input tensor, element-wise.

    .. math::

3323
        Out = \log_2x
J
joejiong 已提交
3324 3325

    Args:
3326
        x (Tensor): Input tensor must be one of the following types: int32, int64, float16, bfloat16, float32, float64.
3327
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
J
joejiong 已提交
3328 3329 3330 3331 3332 3333 3334 3335


    Returns:
        Tensor: The log to the base 2 of the input Tensor computed element-wise.

    Examples:

        .. code-block:: python
3336

3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361
            >>> import paddle

            >>> # example 1: x is a float
            >>> x_i = paddle.to_tensor([[1.0], [2.0]])
            >>> res = paddle.log2(x_i)
            >>> res
            Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.],
             [1.]])

            >>> # example 2: x is float32
            >>> x_i = paddle.full(shape=[1], fill_value=2, dtype='float32')
            >>> paddle.to_tensor(x_i)
            >>> res = paddle.log2(x_i)
            >>> res
            Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.])

            >>> # example 3: x is float64
            >>> x_i = paddle.full(shape=[1], fill_value=2, dtype='float64')
            >>> paddle.to_tensor(x_i)
            >>> res = paddle.log2(x_i)
            >>> res
            Tensor(shape=[1], dtype=float64, place=Place(cpu), stop_gradient=True,
            [1.])
J
joejiong 已提交
3362
    """
3363
    if in_dynamic_mode():
W
wanghuancoder 已提交
3364
        return _C_ops.log2(x)
3365 3366
    else:
        check_variable_and_dtype(
3367 3368 3369 3370
            x,
            'x',
            ['int32', 'int64', 'float16', 'uint16', 'float32', 'float64'],
            "log2",
3371 3372 3373 3374 3375 3376 3377
        )
        inputs = {'X': [x]}
        helper = LayerHelper('log2', **locals())
        dtype = helper.input_dtype(input_param_name='x')
        out = helper.create_variable_for_type_inference(dtype)
        helper.append_op(type="log2", inputs={"X": x}, outputs={"Out": out})
        return out
W
WuHaobo 已提交
3378

J
joejiong 已提交
3379

3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390
@inplace_apis_in_dygraph_only
def log2_(x, name=None):
    r"""
    Inplace version of ``log2`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_log2`.
    """

    if in_dynamic_mode():
        return _C_ops.log2_(x)


J
joejiong 已提交
3391
def log10(x, name=None):
3392
    r"""
J
joejiong 已提交
3393 3394 3395 3396
    Calculates the log to the base 10 of the given input tensor, element-wise.

    .. math::

3397
        Out = \log_10_x
J
joejiong 已提交
3398 3399

    Args:
3400
        x (Tensor): Input tensor must be one of the following types: int32, int64, float16, bfloat16, float32, float64.
3401
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
J
joejiong 已提交
3402 3403 3404 3405 3406 3407 3408 3409


    Returns:
        Tensor: The log to the base 10 of the input Tensor computed element-wise.

    Examples:

        .. code-block:: python
3410

3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435
            >>> import paddle

            >>> # example 1: x is a float
            >>> x_i = paddle.to_tensor([[1.0], [10.0]])
            >>> res = paddle.log10(x_i)
            >>> res
            Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.],
             [1.]])

            >>> # example 2: x is float32
            >>> x_i = paddle.full(shape=[1], fill_value=10, dtype='float32')
            >>> paddle.to_tensor(x_i)
            >>> res = paddle.log10(x_i)
            >>> res
            Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.])

            >>> # example 3: x is float64
            >>> x_i = paddle.full(shape=[1], fill_value=10, dtype='float64')
            >>> paddle.to_tensor(x_i)
            >>> res = paddle.log10(x_i)
            >>> res
            Tensor(shape=[1], dtype=float64, place=Place(cpu), stop_gradient=True,
            [1.])
J
joejiong 已提交
3436
    """
3437
    if in_dynamic_mode():
W
wanghuancoder 已提交
3438
        return _C_ops.log10(x)
3439 3440
    else:
        check_variable_and_dtype(
3441 3442 3443 3444
            x,
            'x',
            ['int32', 'int64', 'float16', 'uint16', 'float32', 'float64'],
            "log10",
3445 3446 3447 3448 3449 3450 3451
        )
        inputs = {'X': [x]}
        helper = LayerHelper('log10', **locals())
        dtype = helper.input_dtype(input_param_name='x')
        out = helper.create_variable_for_type_inference(dtype)
        helper.append_op(type="log10", inputs={"X": x}, outputs={"Out": out})
        return out
J
joejiong 已提交
3452 3453


3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464
@inplace_apis_in_dygraph_only
def log10_(x, name=None):
    r"""
    Inplace version of ``log10`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_log10`.
    """

    if in_dynamic_mode():
        return _C_ops.log10_(x)


Y
Yang Zhang 已提交
3465
def clip(x, min=None, max=None, name=None):
3466
    """
Y
Yang Zhang 已提交
3467
    This operator clip all elements in input into the range [ min, max ] and return
3468 3469 3470 3471
    a resulting tensor as the following equation:

    .. math::

3472
        Out = MIN(MAX(x, min), max)
3473 3474

    Args:
3475
        x (Tensor): An N-D Tensor with data type float16, float32, float64, int32 or int64.
3476 3477 3478 3479
        min (float|int|Tensor, optional): The lower bound with type ``float`` , ``int`` or a ``0-D Tensor``
            with shape [] and type ``int32``, ``float16``, ``float32``, ``float64``.
        max (float|int|Tensor, optional): The upper bound with type ``float``, ``int`` or a ``0-D Tensor``
            with shape [] and type ``int32``, ``float16``, ``float32``, ``float64``.
3480
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
3481 3482

    Returns:
Y
Yang Zhang 已提交
3483
        Tensor: A Tensor with the same data type and data shape as input.
3484 3485 3486 3487

    Examples:
        .. code-block:: python

3488
            >>> import paddle
N
Noel 已提交
3489

3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500
            >>> x1 = paddle.to_tensor([[1.2, 3.5], [4.5, 6.4]], 'float32')
            >>> out1 = paddle.clip(x1, min=3.5, max=5.0)
            >>> out1
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[3.50000000, 3.50000000],
             [4.50000000, 5.        ]])
            >>> out2 = paddle.clip(x1, min=2.5)
            >>> out2
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[2.50000000, 3.50000000],
             [4.50000000, 6.40000010]])
3501 3502
    """

3503 3504 3505 3506 3507 3508 3509
    x_dtype = str(x.dtype)
    if x_dtype == 'paddle.int32':
        min_ = np.iinfo(np.int32).min
        max_ = np.iinfo(np.int32).max - 2**7
    elif x_dtype == 'paddle.int64':
        min_ = np.iinfo(np.int64).min
        max_ = np.iinfo(np.int64).max - 2**39
3510 3511 3512
    elif x_dtype == 'paddle.float16':
        min_ = float(np.finfo(np.float16).min)
        max_ = float(np.finfo(np.float16).max)
3513 3514 3515
    else:
        min_ = float(np.finfo(np.float32).min)
        max_ = float(np.finfo(np.float32).max)
3516

3517
    if in_dynamic_mode():
C
chentianyu03 已提交
3518
        if isinstance(min, Variable):
3519
            min = min.item(0)
C
chentianyu03 已提交
3520
        if isinstance(max, Variable):
3521
            max = max.item(0)
C
chentianyu03 已提交
3522 3523
        min = min_ if min is None else min
        max = max_ if max is None else max
3524
        return _C_ops.clip(x, min, max)
3525 3526 3527 3528 3529 3530 3531
    else:
        if min is not None:
            check_type(min, 'min', (float, int, Variable), 'clip')
            if isinstance(min, Variable):
                check_dtype(
                    min.dtype,
                    'min',
3532
                    ['float16', 'float32', 'float64', 'int32', 'uint16'],
3533 3534 3535 3536 3537 3538 3539 3540 3541
                    'clip',
                    '(When the type of min in clip is Variable.)',
                )
        if max is not None:
            check_type(max, 'max', (float, int, Variable), 'clip')
            if isinstance(max, Variable):
                check_dtype(
                    max.dtype,
                    'max',
3542
                    ['float16', 'float32', 'float64', 'int32', 'uint16'],
3543 3544 3545
                    'clip',
                    '(When the type of max in clip is Variable.)',
                )
C
chentianyu03 已提交
3546

3547
        check_variable_and_dtype(
3548 3549 3550 3551
            x,
            'x',
            ['float16', 'float32', 'float64', 'int32', 'int64', 'uint16'],
            'clip',
3552
        )
Y
Yang Zhang 已提交
3553

3554 3555
        inputs = {'X': x}
        attrs = {'min': min_, 'max': max_}
3556

3557 3558 3559 3560 3561
        if isinstance(min, Variable):
            min.stop_gradient = True
            inputs['Min'] = min
        elif min is not None:
            attrs['min'] = min
3562

3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575
        if isinstance(max, Variable):
            max.stop_gradient = True
            inputs['Max'] = max
        elif max is not None:
            attrs['max'] = max

        helper = LayerHelper('clip', **locals())
        output = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype('x')
        )
        helper.append_op(
            type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs
        )
3576

3577
        return output
F
Feiyu Chan 已提交
3578

W
WuHaobo 已提交
3579

3580 3581 3582 3583
@inplace_apis_in_dygraph_only
def clip_(x, min=None, max=None, name=None):
    """
    Inplace version of ``clip`` API, the output Tensor will be inplaced with input ``x``.
3584
    Please refer to :ref:`api_paddle_clip`.
3585 3586 3587 3588
    """
    fmin = float(np.finfo(np.float32).min)
    fmax = float(np.finfo(np.float32).max)
    if isinstance(min, Variable):
3589
        min = min.item(0)
3590
    if isinstance(max, Variable):
3591
        max = max.item(0)
3592 3593
    min = fmin if min is None else min
    max = fmax if max is None else max
C
chentianyu03 已提交
3594

3595
    if in_dynamic_mode():
3596
        return _C_ops.clip_(x, min, max)
C
chentianyu03 已提交
3597

3598

3599
def trace(x, offset=0, axis1=0, axis2=1, name=None):
L
Li Fuchen 已提交
3600
    """
S
swtkiwi 已提交
3601

3602
    Computes the sum along diagonals of the input tensor x.
3603 3604

    If ``x`` is 2D, returns the sum of diagonal.
L
Li Fuchen 已提交
3605

3606
    If ``x`` has larger dimensions, then returns an tensor of diagonals sum, diagonals be taken from
3607
    the 2D planes specified by axis1 and axis2. By default, the 2D planes formed by the first and second axes
3608
    of the input tensor x.
L
Li Fuchen 已提交
3609

3610
    The argument ``offset`` determines where diagonals are taken from input tensor x:
L
Li Fuchen 已提交
3611 3612 3613 3614

    - If offset = 0, it is the main diagonal.
    - If offset > 0, it is above the main diagonal.
    - If offset < 0, it is below the main diagonal.
3615
    - Note that if offset is out of input's shape indicated by axis1 and axis2, 0 will be returned.
3616

L
Li Fuchen 已提交
3617
    Args:
3618 3619 3620 3621 3622
        x (Tensor): The input tensor x. Must be at least 2-dimensional. The input data type should be float32, float64, int32, int64.
        offset (int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).
        axis1 (int, optional): The first axis with respect to take diagonal. Default: 0.
        axis2 (int, optional): The second axis with respect to take diagonal. Default: 1.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
L
Li Fuchen 已提交
3623 3624

    Returns:
3625
        Tensor: the output data type is the same as input data type.
L
Li Fuchen 已提交
3626 3627 3628 3629

    Examples:
        .. code-block:: python

3630
            >>> import paddle
3631

3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643
            >>> case1 = paddle.randn([2, 3])
            >>> case2 = paddle.randn([3, 10, 10])
            >>> case3 = paddle.randn([3, 10, 5, 10])
            >>> data1 = paddle.trace(case1)
            >>> data1.shape
            []
            >>> data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2)
            >>> data2.shape
            [3]
            >>> data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1)
            >>> data3.shape
            [3, 5]
L
Li Fuchen 已提交
3644
    """
3645

Z
zyfncg 已提交
3646
    def __check_input(x, offset, axis1, axis2):
3647 3648 3649 3650 3651 3652
        check_dtype(
            x.dtype,
            'Input',
            ['int32', 'int64', 'float16', 'float32', 'float64'],
            'trace',
        )
L
Li Fuchen 已提交
3653

3654
        input_shape = list(x.shape)
3655 3656 3657 3658
        assert len(input_shape) >= 2, (
            "The x must be at least 2-dimensional, "
            "But received Input x's dimensional: %s.\n" % len(input_shape)
        )
L
Li Fuchen 已提交
3659

3660 3661
        axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
        axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
L
Li Fuchen 已提交
3662

3663 3664
        assert (0 <= axis1_) and (axis1_ < len(input_shape)), (
            "The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n"
3665
            % (-(len(input_shape)), len(input_shape) - 1, axis1)
3666
        )
L
Li Fuchen 已提交
3667

3668 3669
        assert (0 <= axis2_) and (axis2_ < len(input_shape)), (
            "The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n"
3670
            % (-(len(input_shape)), len(input_shape) - 1, axis2)
3671
        )
L
Li Fuchen 已提交
3672

3673 3674 3675 3676
        assert axis1_ != axis2_, (
            "axis1 and axis2 cannot be the same axis."
            "But received axis1 = %d, axis2 = %d\n" % (axis1, axis2)
        )
L
Li Fuchen 已提交
3677

3678
    if in_dynamic_mode():
3679
        return _C_ops.trace(x, offset, axis1, axis2)
3680 3681
    else:
        __check_input(x, offset, axis1, axis2)
H
hong 已提交
3682

3683 3684
        helper = LayerHelper('trace', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
L
Li Fuchen 已提交
3685

3686 3687 3688 3689 3690 3691 3692
        helper.append_op(
            type='trace',
            inputs={'Input': [x]},
            attrs={'offset': offset, 'axis1': axis1, 'axis2': axis2},
            outputs={'Out': [out]},
        )
        return out
L
Li Fuchen 已提交
3693

3694

3695 3696
def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
    """
3697
    Computes the diagonals of the input tensor x.
3698 3699

    If ``x`` is 2D, returns the diagonal.
3700
    If ``x`` has larger dimensions, diagonals be taken from the 2D planes specified by axis1 and axis2.
3701 3702 3703 3704 3705 3706 3707
    By default, the 2D planes formed by the first and second axis of the input tensor x.

    The argument ``offset`` determines where diagonals are taken from input tensor x:

    - If offset = 0, it is the main diagonal.
    - If offset > 0, it is above the main diagonal.
    - If offset < 0, it is below the main diagonal.
3708

3709
    Args:
3710 3711 3712 3713 3714
        x (Tensor): The input tensor x. Must be at least 2-dimensional. The input data type should be bool, int32, int64, float16, float32, float64.
        offset (int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).
        axis1 (int, optional): The first axis with respect to take diagonal. Default: 0.
        axis2 (int, optional): The second axis with respect to take diagonal. Default: 1.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
3715 3716 3717 3718 3719 3720 3721

    Returns:
        Tensor: a partial view of input tensor in specify two dimensions, the output data type is the same as input data type.

    Examples:
        .. code-block:: python

3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757
            >>> import paddle

            >>> paddle.seed(2023)
            >>> x = paddle.rand([2, 2, 3],'float32')
            >>> print(x)
            Tensor(shape=[2, 2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[[0.86583614, 0.52014720, 0.25960937],
              [0.90525323, 0.42400089, 0.40641287]],
             [[0.97020894, 0.74437362, 0.51785129],
              [0.73292869, 0.97786582, 0.04315904]]])

            >>> out1 = paddle.diagonal(x)
            >>> print(out1)
            Tensor(shape=[3, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.86583614, 0.73292869],
             [0.52014720, 0.97786582],
             [0.25960937, 0.04315904]])

            >>> out2 = paddle.diagonal(x, offset=0, axis1=2, axis2=1)
            >>> print(out2)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.86583614, 0.42400089],
             [0.97020894, 0.97786582]])

            >>> out3 = paddle.diagonal(x, offset=1, axis1=0, axis2=1)
            >>> print(out3)
            Tensor(shape=[3, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.90525323],
             [0.42400089],
             [0.40641287]])

            >>> out4 = paddle.diagonal(x, offset=0, axis1=1, axis2=2)
            >>> print(out4)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.86583614, 0.42400089],
             [0.97020894, 0.97786582]])
3758

3759
    """
3760
    if in_dynamic_mode():
3761
        return _C_ops.diagonal(x, offset, axis1, axis2)
J
Jiabin Yang 已提交
3762
    else:
W
wanghuancoder 已提交
3763

3764 3765 3766 3767
        def __check_input(x, offset, axis1, axis2):
            check_dtype(
                x.dtype,
                'Input',
3768 3769 3770 3771 3772 3773 3774 3775 3776
                [
                    'bool',
                    'int32',
                    'int64',
                    'float16',
                    'uint16',
                    'float32',
                    'float64',
                ],
3777 3778
                'diagonal',
            )
3779

3780 3781 3782 3783 3784
            input_shape = list(x.shape)
            assert len(input_shape) >= 2, (
                "The x must be at least 2-dimensional, "
                "But received Input x's dimensional: %s.\n" % len(input_shape)
            )
3785

3786 3787
            axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
            axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
3788

3789 3790 3791 3792
            assert axis1_ < len(input_shape), (
                "The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n"
                % (-(len(input_shape)), len(input_shape) - 1, axis1)
            )
3793

3794 3795 3796 3797
            assert axis2_ < len(input_shape), (
                "The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n"
                % (-(len(input_shape)), len(input_shape) - 1, axis2)
            )
3798

3799 3800 3801 3802
            assert axis1_ != axis2_, (
                "axis1 and axis2 cannot be the same axis."
                "But received axis1 = %d, axis2 = %d\n" % (axis1, axis2)
            )
3803

3804 3805 3806
        __check_input(x, offset, axis1, axis2)
        helper = LayerHelper('diagonal', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
3807

3808 3809 3810 3811 3812 3813 3814
        helper.append_op(
            type='diagonal',
            inputs={'Input': [x]},
            attrs={'offset': offset, 'axis1': axis1, 'axis2': axis2},
            outputs={'Out': [out]},
        )
        return out
3815 3816


W
WuHaobo 已提交
3817
def kron(x, y, name=None):
3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836
    r"""
    Compute the Kronecker product of two tensors, a
    composite tensor made of blocks of the second tensor scaled by the
    first.
    Assume that the rank of the two tensors, $X$ and $Y$
    are the same, if necessary prepending the smallest with ones. If the
    shape of $X$ is [$r_0$, $r_1$, ..., $r_N$] and the shape of $Y$ is
    [$s_0$, $s_1$, ..., $s_N$], then the shape of the output tensor is
    [$r_{0}s_{0}$, $r_{1}s_{1}$, ..., $r_{N}s_{N}$]. The elements are
    products of elements from $X$ and $Y$.
    The equation is:
    $$
    output[k_{0}, k_{1}, ..., k_{N}] = X[i_{0}, i_{1}, ..., i_{N}] *
    Y[j_{0}, j_{1}, ..., j_{N}]
    $$
    where
    $$
    k_{t} = i_{t} * s_{t} + j_{t}, t = 0, 1, ..., N
    $$
F
Feiyu Chan 已提交
3837 3838

    Args:
3839 3840
        x (Tensor): the fist operand of kron op, data type: float16, float32, float64, int32 or int64.
        y (Tensor): the second operand of kron op, data type: float16, float32, float64, int32 or int64. Its data type should be the same with x.
3841
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
F
Feiyu Chan 已提交
3842 3843

    Returns:
3844
        Tensor: The output of kron, data type: float16, float32, float64, int32 or int64. Its data is the same with x.
F
Feiyu Chan 已提交
3845 3846 3847

    Examples:
        .. code-block:: python
3848

3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860
            >>> import paddle
            >>> x = paddle.to_tensor([[1, 2], [3, 4]], dtype='int64')
            >>> y = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='int64')
            >>> out = paddle.kron(x, y)
            >>> out
            Tensor(shape=[6, 6], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[1 , 2 , 3 , 2 , 4 , 6 ],
             [4 , 5 , 6 , 8 , 10, 12],
             [7 , 8 , 9 , 14, 16, 18],
             [3 , 6 , 9 , 4 , 8 , 12],
             [12, 15, 18, 16, 20, 24],
             [21, 24, 27, 28, 32, 36]])
F
Feiyu Chan 已提交
3861
    """
3862
    if in_dynamic_mode():
3863 3864 3865 3866 3867 3868 3869 3870 3871
        return _legacy_C_ops.kron(x, y)
    else:
        helper = LayerHelper('kron', **locals())
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron'
        )
        check_variable_and_dtype(
            y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron'
        )
F
Feiyu Chan 已提交
3872

3873 3874 3875 3876 3877
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out}
        )
        return out
3878 3879 3880 3881


def cumsum(x, axis=None, dtype=None, name=None):
    """
3882 3883
    The cumulative sum of the elements along a given axis.

3884
    Note:
3885
        The first element of the result is the same as the first element of the input.
3886 3887

    Args:
3888
        x (Tensor): The input tensor needed to be cumsumed.
3889
        axis (int, optional): The dimension to accumulate along. -1 means the last dimension. The default (None) is to compute the cumsum over the flattened array.
3890
        dtype (str, optional): The data type of the output tensor, can be float16, float32, float64, int32, int64. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None.
3891 3892 3893
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
3894
        Tensor, the result of cumsum operator.
3895 3896 3897

    Examples:
        .. code-block:: python
3898

3899
            >>> import paddle
3900

3901 3902
            >>> data = paddle.arange(12)
            >>> data = paddle.reshape(data, (3, 4))
3903

3904 3905 3906 3907
            >>> y = paddle.cumsum(data)
            >>> y
            Tensor(shape=[12], dtype=int64, place=Place(cpu), stop_gradient=True,
            [0 , 1 , 3 , 6 , 10, 15, 21, 28, 36, 45, 55, 66])
3908

3909 3910 3911 3912 3913 3914
            >>> y = paddle.cumsum(data, axis=0)
            >>> y
            Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0 , 1 , 2 , 3 ],
             [4 , 6 , 8 , 10],
             [12, 15, 18, 21]])
3915

3916 3917 3918 3919 3920 3921
            >>> y = paddle.cumsum(data, axis=-1)
            >>> y
            Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0 , 1 , 3 , 6 ],
             [4 , 9 , 15, 22],
             [8 , 17, 27, 38]])
3922

3923 3924
            >>> y = paddle.cumsum(data, dtype='float64')
            >>> assert y.dtype == paddle.float64
3925 3926 3927 3928 3929 3930
    """
    if axis is None:
        flatten = True
    else:
        flatten = False
    if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
Z
zhiboniu 已提交
3931
        x = cast(x, dtype)
3932

3933
    if in_dynamic_mode():
3934 3935
        if axis is None:
            axis = -1
3936
        return _C_ops.cumsum(x, axis, flatten, False, False)
3937
    else:
3938 3939 3940
        check_variable_and_dtype(
            x,
            'x',
3941
            ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'],
3942 3943
            'cumsum',
        )
3944 3945
        check_type(x, 'x', (Variable), 'cumsum')
        locals_var = locals().copy()
3946
        kwargs = {}
3947 3948 3949 3950 3951
        for name, val in locals_var.items():
            if val is not None:
                kwargs[name] = val
        _cum_sum_ = generate_layer_fn('cumsum')
        return _cum_sum_(**kwargs)
G
guofei 已提交
3952

3953

3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972
@inplace_apis_in_dygraph_only
def cumsum_(x, axis=None, dtype=None, name=None):
    r"""
    Inplace version of ``cumprod`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_cumprod`.
    """
    if axis is None:
        flatten = True
    else:
        flatten = False
    if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
        x = cast_(x, dtype)

    if in_dynamic_mode():
        if axis is None:
            axis = -1
        return _C_ops.cumsum_(x, axis, flatten, False, False)


3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993
def cummax(x, axis=None, dtype='int64', name=None):
    """
    The cumulative max of the elements along a given axis.

    Note:
        The first element of the result is the same as the first element of the input.

    Args:
        x (Tensor): The input tensor needed to be cummaxed.
        axis (int, optional): The dimension to accumulate along. -1 means the last dimension. The default (None) is to compute the cummax over the flattened array.
        dtype (str, optional): The data type of the indices tensor, can be int32, int64. The default value is int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor), The result of cummax operation. The dtype of cummax result is same with input x.

        indices (Tensor), The corresponding index results of cummax operation.

    Examples:
        .. code-block:: python

3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028
            >>> import paddle

            >>> data = paddle.to_tensor([-1, 5, 0, -2, -3, 2])
            >>> data = paddle.reshape(data, (2, 3))

            >>> value, indices = paddle.cummax(data)
            >>> value
            Tensor(shape=[6], dtype=int64, place=Place(cpu), stop_gradient=True,
            [-1,  5,  5,  5,  5,  5])
            >>> indices
            Tensor(shape=[6], dtype=int64, place=Place(cpu), stop_gradient=True,
            [0, 1, 1, 1, 1, 1])

            >>> value, indices = paddle.cummax(data, axis=0)
            >>> value
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[-1,  5,  0],
             [-1,  5,  2]])
            >>> indices
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0, 0, 0],
             [0, 0, 1]])

            >>> value, indices = paddle.cummax(data, axis=-1)
            >>> value
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[-1,  5,  5],
             [-2, -2,  2]])
            >>> indices
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0, 1, 1],
             [0, 0, 2]])

            >>> value, indices = paddle.cummax(data, dtype='int64')
            >>> assert indices.dtype == paddle.int64
4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079
    """
    if axis is None:
        axis = -1
        x = x.flatten(0, len(x.shape) - 1)

    check_dtype(dtype, 'dtype', ['int32', 'int64'], 'cummax')
    dtype = convert_np_dtype_to_dtype_(dtype)

    if in_dynamic_mode():
        return _C_ops.cummax(x, axis, dtype)
    else:
        check_variable_and_dtype(
            x,
            'x',
            ['float32', 'float64', 'int32', 'int64'],
            'cummax',
        )
        check_type(x, 'x', (Variable), 'cummax')
        helper = LayerHelper('cummax', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        indices = helper.create_variable_for_type_inference(dtype='int64')
        helper.append_op(
            type='cummax',
            inputs={'x': x},
            outputs={'out': out, 'indices': indices},
            attrs={'axis': axis, 'dtype': dtype},
        )
        return out, indices


def cummin(x, axis=None, dtype='int64', name=None):
    """
    The cumulative min of the elements along a given axis.

    Note:
        The first element of the result is the same as the first element of the input.

    Args:
        x (Tensor): The input tensor needed to be cummined.
        axis (int, optional): The dimension to accumulate along. -1 means the last dimension. The default (None) is to compute the cummin over the flattened array.
        dtype (str, optional): The data type of the indices tensor, can be int32, int64. The default value is int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor), The result of cummin operation. The dtype of cummin result is same with input x.

        indices (Tensor), The corresponding index results of cummin operation.

    Examples:
        .. code-block:: python

4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113
            >>> import paddle
            >>> data = paddle.to_tensor([-1, 5, 0, -2, -3, 2])
            >>> data = paddle.reshape(data, (2, 3))

            >>> value, indices = paddle.cummin(data)
            >>> value
            Tensor(shape=[6], dtype=int64, place=Place(cpu), stop_gradient=True,
            [-1, -1, -1, -2, -3, -3])
            >>> indices
            Tensor(shape=[6], dtype=int64, place=Place(cpu), stop_gradient=True,
            [0, 0, 0, 3, 4, 4])

            >>> value, indices = paddle.cummin(data, axis=0)
            >>> value
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[-1,  5,  0],
             [-2, -3,  0]])
            >>> indices
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0, 0, 0],
             [1, 1, 0]])

            >>> value, indices = paddle.cummin(data, axis=-1)
            >>> value
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[-1, -1, -1],
             [-2, -3, -3]])
            >>> indices
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0, 0, 0],
             [0, 1, 1]])

            >>> value, indices = paddle.cummin(data, dtype='int64')
            >>> assert indices.dtype == paddle.int64
4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143
    """
    if axis is None:
        axis = -1
        x = x.flatten(0, len(x.shape) - 1)

    check_dtype(dtype, 'dtype', ['int32', 'int64'], 'cummin')
    dtype = convert_np_dtype_to_dtype_(dtype)

    if in_dynamic_mode():
        return _C_ops.cummin(x, axis, dtype)
    else:
        check_variable_and_dtype(
            x,
            'x',
            ['float32', 'float64', 'int32', 'int64'],
            'cummin',
        )
        check_type(x, 'x', (Variable), 'cummin')
        helper = LayerHelper('cummin', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        indices = helper.create_variable_for_type_inference(dtype='int64')
        helper.append_op(
            type='cummin',
            inputs={'x': x},
            outputs={'out': out, 'indices': indices},
            attrs={'axis': axis, 'dtype': dtype},
        )
        return out, indices


4144 4145
def logcumsumexp(x, axis=None, dtype=None, name=None):
    r"""
4146
    The logarithm of the cumulative summation of the exponentiation of the elements along a given axis.
4147 4148 4149 4150 4151 4152

    For summation index j given by `axis` and other indices i, the result is

    .. math::

        logcumsumexp(x)_{ij} = log \sum_{i=0}^{j}exp(x_{ij})
4153

4154 4155 4156 4157 4158 4159
    Note:
        The first element of the result is the same as the first element of the input.

    Args:
        x (Tensor): The input tensor.
        axis (int, optional): The dimension to do the operation along. -1 means the last dimension. The default (None) is to compute the cumsum over the flattened array.
4160
        dtype (str, optional): The data type of the output tensor, can be float16, float32, float64. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None.
4161 4162 4163
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
4164
        Tensor, the result of logcumsumexp operator.
4165 4166 4167

    Examples:
        .. code-block:: python
4168

4169
            >>> import paddle
4170

4171 4172
            >>> data = paddle.arange(12, dtype='float64')
            >>> data = paddle.reshape(data, (3, 4))
4173

4174 4175 4176 4177 4178 4179
            >>> y = paddle.logcumsumexp(data)
            >>> y
            Tensor(shape=[12], dtype=float64, place=Place(cpu), stop_gradient=True,
            [0.         , 1.31326169 , 2.40760596 , 3.44018970 , 4.45191440 ,
             5.45619332 , 6.45776285 , 7.45833963 , 8.45855173 , 9.45862974 ,
             10.45865844, 11.45866900])
4180

4181 4182 4183 4184 4185 4186
            >>> y = paddle.logcumsumexp(data, axis=0)
            >>> y
            Tensor(shape=[3, 4], dtype=float64, place=Place(cpu), stop_gradient=True,
            [[0.         , 1.         , 2.         , 3.         ],
             [4.01814993 , 5.01814993 , 6.01814993 , 7.01814993 ],
             [8.01847930 , 9.01847930 , 10.01847930, 11.01847930]])
4187

4188 4189 4190 4191 4192 4193
            >>> y = paddle.logcumsumexp(data, axis=-1)
            >>> y
            Tensor(shape=[3, 4], dtype=float64, place=Place(cpu), stop_gradient=True,
            [[0.         , 1.31326169 , 2.40760596 , 3.44018970 ],
             [4.         , 5.31326169 , 6.40760596 , 7.44018970 ],
             [8.         , 9.31326169 , 10.40760596, 11.44018970]])
4194

4195 4196
            >>> y = paddle.logcumsumexp(data, dtype='float64')
            >>> assert y.dtype == paddle.float64
4197 4198 4199 4200 4201 4202 4203 4204
    """
    if axis is None:
        flatten = True
    else:
        flatten = False
    if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
        x = cast(x, dtype)

4205
    if in_dynamic_mode():
4206 4207
        if axis is None:
            axis = -1
4208
        return _C_ops.logcumsumexp(x, axis, flatten, False, False)
4209 4210
    else:
        check_variable_and_dtype(
4211
            x, 'x', ['float16', 'float32', 'float64', 'uint16'], "logcumsumexp"
4212
        )
4213

4214 4215 4216 4217 4218 4219 4220 4221 4222
        helper = LayerHelper('logcumsumexp', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(
            type='logcumsumexp',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'axis': axis, 'flatten': flatten},
        )
        return out
4223 4224


H
hlygit66666 已提交
4225 4226 4227 4228
def cumprod(x, dim=None, dtype=None, name=None):
    """
    Compute the cumulative product of the input tensor x along a given dimension dim.

4229 4230
    Note:
        The first element of the result is the same as the first element of the input.
H
hlygit66666 已提交
4231 4232 4233

    Args:
        x (Tensor): the input tensor need to be cumproded.
Z
Zman 已提交
4234 4235 4236 4237 4238 4239 4240
        dim (int, optional): the dimension along which the input tensor will be accumulated. It need to be in the range of [-x.rank, x.rank),
                    where x.rank means the dimensions of the input tensor x and -1 means the last dimension.
        dtype (str, optional): The data type of the output tensor, can be float32, float64, int32, int64, complex64,
                    complex128. If specified, the input tensor is casted to dtype before the operation is performed.
                    This is useful for preventing data type overflows. The default value is None.
        name (str, optional): Name for the operation (optional, default is None). For more information,
                    please refer to :ref:`api_guide_Name`.
H
hlygit66666 已提交
4241 4242 4243 4244 4245 4246 4247

    Returns:
        Tensor, the result of cumprod operator.

    Examples:
        .. code-block:: python

4248
            >>> import paddle
H
hlygit66666 已提交
4249

4250 4251 4252 4253 4254 4255 4256
            >>> data = paddle.arange(12)
            >>> data = paddle.reshape(data, (3, 4))
            >>> data
            Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0 , 1 , 2 , 3 ],
             [4 , 5 , 6 , 7 ],
             [8 , 9 , 10, 11]])
H
hlygit66666 已提交
4257

4258 4259 4260 4261 4262 4263
            >>> y = paddle.cumprod(data, dim=0)
            >>> y
            Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0  , 1  , 2  , 3  ],
             [0  , 5  , 12 , 21 ],
             [0  , 45 , 120, 231]])
H
hlygit66666 已提交
4264

4265 4266 4267 4268 4269 4270
            >>> y = paddle.cumprod(data, dim=-1)
            >>> y
            Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0   , 0   , 0   , 0   ],
             [4   , 20  , 120 , 840 ],
             [8   , 72  , 720 , 7920]])
H
hlygit66666 已提交
4271

4272 4273 4274 4275 4276 4277
            >>> y = paddle.cumprod(data, dim=1, dtype='float64')
            >>> y
            Tensor(shape=[3, 4], dtype=float64, place=Place(cpu), stop_gradient=True,
            [[0.   , 0.   , 0.   , 0.   ],
             [4.   , 20.  , 120. , 840. ],
             [8.   , 72.  , 720. , 7920.]])
H
hlygit66666 已提交
4278

4279
            >>> assert y.dtype == paddle.float64
H
hlygit66666 已提交
4280 4281 4282 4283

    """

    if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
Z
zhiboniu 已提交
4284
        x = cast(x, dtype)
H
hlygit66666 已提交
4285

4286
    if in_dynamic_mode():
4287
        return _C_ops.cumprod(x, dim)
4288 4289 4290 4291
    else:
        check_variable_and_dtype(
            x,
            "x",
4292 4293 4294 4295 4296 4297 4298 4299 4300 4301
            [
                'complex64',
                'complex128',
                'float16',
                'uint16',
                'float32',
                'float64',
                'int32',
                'int64',
            ],
4302 4303 4304
            'cumprod',
        )
        check_type(dim, 'dim', int, 'cumprod')
H
hlygit66666 已提交
4305

4306 4307 4308 4309 4310 4311 4312 4313 4314
        helper = LayerHelper('cumprod', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(
            type='cumprod',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': dim},
        )
        return out
H
hlygit66666 已提交
4315

4316

4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329
@inplace_apis_in_dygraph_only
def cumprod_(x, dim=None, dtype=None, name=None):
    r"""
    Inplace version of ``cumprod`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_cumprod`.
    """
    if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
        x = cast_(x, dtype)

    if in_dynamic_mode():
        return _C_ops.cumprod_(x, dim)


J
Jack Zhou 已提交
4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344
def isfinite(x, name=None):
    """

    Return whether every element of input tensor is finite number or not.

    Args:
        x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        `Tensor`, the bool result which shows every element of `x` whether it is finite number or not.

    Examples:
        .. code-block:: python

4345
            >>> import paddle
N
Noel 已提交
4346

4347 4348 4349 4350 4351
            >>> x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
            >>> out = paddle.isfinite(x)
            >>> out
            Tensor(shape=[7], dtype=bool, place=Place(cpu), stop_gradient=True,
            [False, True , True , False, True , False, False])
J
Jack Zhou 已提交
4352
    """
4353
    if in_dynamic_mode():
4354
        return _C_ops.isfinite(x)
4355 4356 4357 4358 4359
    else:
        helper = LayerHelper("isfinite_v2", **locals())
        check_variable_and_dtype(
            x,
            'x',
4360 4361 4362 4363 4364 4365 4366 4367
            [
                'float16',
                'float32',
                'float64',
                'int32',
                'int64',
                'uint16',
            ],
4368 4369 4370 4371 4372 4373 4374
            'isfinite',
        )
        out = helper.create_variable_for_type_inference('bool')
        helper.append_op(
            type="isfinite_v2", inputs={"X": x}, outputs={"Out": out}
        )
        return out
J
Jack Zhou 已提交
4375

4376

J
Jack Zhou 已提交
4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391
def isinf(x, name=None):
    """

    Return whether every element of input tensor is `+/-INF` or not.

    Args:
        x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        `Tensor`, the bool result which shows every element of `x` whether it is `+/-INF` or not.

    Examples:
        .. code-block:: python

4392
            >>> import paddle
C
Chen Long 已提交
4393

4394 4395 4396 4397 4398
            >>> x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
            >>> out = paddle.isinf(x)
            >>> out
            Tensor(shape=[7], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True , False, False, True , False, False, False])
J
Jack Zhou 已提交
4399
    """
4400
    if in_dynamic_mode():
4401
        return _C_ops.isinf(x)
4402 4403 4404
    else:
        helper = LayerHelper("isinf_v2", **locals())
        check_variable_and_dtype(
4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415
            x,
            'x',
            [
                'float16',
                'float32',
                'float64',
                'int32',
                'int64',
                'uint16',
            ],
            'isinf',
4416 4417 4418 4419
        )
        out = helper.create_variable_for_type_inference(dtype='bool')
        helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out})
        return out
J
Jack Zhou 已提交
4420

4421

J
Jack Zhou 已提交
4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436
def isnan(x, name=None):
    """

    Return whether every element of input tensor is `NaN` or not.

    Args:
        x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        `Tensor`, the bool result which shows every element of `x` whether it is `NaN` or not.

    Examples:
        .. code-block:: python

4437
            >>> import paddle
4438

4439 4440 4441 4442 4443
            >>> x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
            >>> out = paddle.isnan(x)
            >>> out
            Tensor(shape=[7], dtype=bool, place=Place(cpu), stop_gradient=True,
            [False, False, False, False, False, True , True ])
J
Jack Zhou 已提交
4444
    """
4445
    if in_dynamic_mode():
4446
        return _C_ops.isnan(x)
4447 4448 4449
    else:
        helper = LayerHelper("isnan_v2", **locals())
        check_variable_and_dtype(
4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460
            x,
            'x',
            [
                'float16',
                'float32',
                'float64',
                'int32',
                'int64',
                'uint16',
            ],
            'isnan',
4461 4462 4463 4464
        )
        out = helper.create_variable_for_type_inference(dtype='bool')
        helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out})
        return out
J
Jack Zhou 已提交
4465 4466


G
guofei 已提交
4467 4468 4469 4470 4471
def prod(x, axis=None, keepdim=False, dtype=None, name=None):
    """
    Compute the product of tensor elements over the given axis.

    Args:
4472
        x (Tensor): The input tensor, its data type should be float32, float64, int32, int64.
4473 4474 4475
        axis (int|list|tuple, optional): The axis along which the product is computed. If :attr:`None`,
            multiply all elements of `x` and return a Tensor with a single element,
            otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`,
G
guofei 已提交
4476
            the axis to reduce is :math:`x.ndim + axis[i]`. Default is None.
4477
        keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result
4478
            tensor will have one fewer dimension than the input unless `keepdim` is true. Default is False.
4479 4480 4481
        dtype (str|np.dtype, optional): The desired date type of returned tensor, can be float32, float64,
            int32, int64. If specified, the input tensor is casted to dtype before operator performed.
            This is very useful for avoiding data type overflows. The default value is None, the dtype
G
guofei 已提交
4482
            of output is the same as input Tensor `x`.
4483
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
G
guofei 已提交
4484 4485 4486

    Returns:
        Tensor, result of product on the specified dim of input tensor.
4487

G
guofei 已提交
4488 4489 4490
    Examples:
        .. code-block:: python

4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532
            >>> import paddle

            >>> # the axis is a int element
            >>> x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
            ...                       [0.1, 0.2, 0.6, 0.7]])
            >>> out1 = paddle.prod(x)
            >>> out1
            Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
            0.00022680)

            >>> out2 = paddle.prod(x, -1)
            >>> out2
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.02700000, 0.00840000])

            >>> out3 = paddle.prod(x, 0)
            >>> out3
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.02000000, 0.06000000, 0.30000001, 0.63000000])

            >>> out4 = paddle.prod(x, 0, keepdim=True)
            >>> out4
            Tensor(shape=[1, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.02000000, 0.06000000, 0.30000001, 0.63000000]])

            >>> out5 = paddle.prod(x, 0, dtype='int64')
            >>> out5
            Tensor(shape=[4], dtype=int64, place=Place(cpu), stop_gradient=True,
            [0, 0, 0, 0])

            >>> # the axis is list
            >>> y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
            ...                         [[5.0, 6.0], [7.0, 8.0]]])
            >>> out6 = paddle.prod(y, [0, 1])
            >>> out6
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [105., 384.])

            >>> out7 = paddle.prod(y, (1, 2))
            >>> out7
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [24.  , 1680.])
G
guofei 已提交
4533 4534 4535

    """
    if dtype is not None:
4536
        check_dtype(
4537 4538 4539 4540
            dtype,
            'dtype',
            ['float32', 'float64', 'int32', 'int64', "float16", "uint16"],
            'prod',
4541
        )
G
guofei 已提交
4542
        if x.dtype != convert_np_dtype_to_dtype_(dtype):
Z
zhiboniu 已提交
4543
            x = cast(x, dtype)
G
guofei 已提交
4544

4545
    reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
4546
    if in_dynamic_mode():
4547
        return _C_ops.prod(x, axis, keepdim, reduce_all)
4548 4549 4550 4551 4552
    else:
        helper = LayerHelper('reduce_prod', **locals())
        check_variable_and_dtype(
            x,
            'x/input',
4553
            ['float32', 'float64', 'int32', 'int64', "float16", "uint16"],
4554
            'reduce_prod',
4555
        )
4556 4557 4558 4559 4560 4561 4562 4563 4564 4565
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype()
        )
        helper.append_op(
            type='reduce_prod',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all},
        )
        return out
W
WangXi 已提交
4566 4567 4568 4569


def sign(x, name=None):
    """
4570
    Returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
W
WangXi 已提交
4571 4572

    Args:
4573 4574
        x (Tensor): The input tensor. The data type can be float16, float32 or float64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
W
WangXi 已提交
4575 4576 4577 4578 4579 4580 4581

    Returns:
        Tensor: The output sign tensor with identical shape and data type to the input :attr:`x`.

    Examples:
        .. code-block:: python

4582
            >>> import paddle
W
WangXi 已提交
4583

4584 4585 4586 4587 4588
            >>> x = paddle.to_tensor([3.0, 0.0, -2.0, 1.7], dtype='float32')
            >>> out = paddle.sign(x=x)
            >>> out
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [ 1.,  0., -1.,  1.])
W
WangXi 已提交
4589
    """
4590
    if in_dynamic_mode():
4591
        return _C_ops.sign(x)
4592 4593
    else:
        check_variable_and_dtype(
C
chenxujun 已提交
4594
            x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'sign'
4595 4596 4597
        )
        helper = LayerHelper("sign", **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
H
hong 已提交
4598

4599
        helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})
W
WangXi 已提交
4600

4601
        return out
W
WangXi 已提交
4602 4603 4604


def tanh(x, name=None):
4605
    r"""
W
WangXi 已提交
4606 4607 4608
    Tanh Activation Operator.

    .. math::
4609
        out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}
W
WangXi 已提交
4610 4611

    Args:
4612
        x (Tensor): Input of Tanh operator, an N-D Tensor, with data type bfloat16, float32, float64 or float16.
W
WangXi 已提交
4613 4614 4615 4616 4617 4618 4619 4620 4621
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Output of Tanh operator, a Tensor with same data type and shape as input.

    Examples:

        .. code-block:: python

4622
            >>> import paddle
W
WangXi 已提交
4623

4624 4625 4626 4627 4628
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.tanh(x)
            >>> out
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-0.37994900, -0.19737528,  0.09966799,  0.29131261])
W
WangXi 已提交
4629
    """
4630
    if in_dynamic_mode():
4631
        return _C_ops.tanh(x)
4632 4633
    else:
        check_variable_and_dtype(
4634
            x, 'x', ['uint16', 'float16', 'float32', 'float64'], 'tanh'
4635 4636 4637 4638 4639 4640
        )
        check_type(x, 'x', (Variable), 'tanh')
        helper = LayerHelper('tanh', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out})
        return out
S
Steffy-zxf 已提交
4641

4642

4643
@inplace_apis_in_dygraph_only
4644 4645 4646
def tanh_(x, name=None):
    r"""
    Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
4647
    Please refer to :ref:`api_paddle_tanh`.
4648
    """
4649
    return _C_ops.tanh_(x)
4650 4651


S
Steffy-zxf 已提交
4652 4653
def increment(x, value=1.0, name=None):
    """
4654
    The API is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`.
S
Steffy-zxf 已提交
4655 4656 4657 4658
    Notice that the number of elements in :attr:`x` must be equal to 1.

    Args:
        x (Tensor): A tensor that must always contain only one element, its data type supports float32, float64, int32 and int64.
4659
        value (float, optional): The amount to increment the data of :attr:`x`. Default: 1.0.
S
Steffy-zxf 已提交
4660 4661 4662 4663 4664 4665 4666 4667
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, the elementwise-incremented tensor with the same shape and data type as :attr:`x`.

    Examples:
        .. code-block:: python

4668
            >>> import paddle
S
Steffy-zxf 已提交
4669

4670 4671 4672 4673 4674
            >>> data = paddle.zeros(shape=[1], dtype='float32')
            >>> counter = paddle.increment(data)
            >>> counter
            Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.])
S
Steffy-zxf 已提交
4675 4676

    """
4677
    if in_dynamic_mode():
4678
        return _C_ops.increment_(x, value)
4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690
    else:
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'increment'
        )
        helper = LayerHelper("increment", **locals())
        helper.append_op(
            type='increment',
            inputs={'X': [x]},
            outputs={'Out': [x]},
            attrs={'step': float(value)},
        )
        return x
4691 4692 4693 4694


def all(x, axis=None, keepdim=False, name=None):
    """
4695
    Computes the ``logical and`` of tensor elements over the given dimension.
4696 4697 4698 4699 4700

    Args:
        x (Tensor): An N-D Tensor, the input data type should be `bool`.
        axis (int|list|tuple, optional): The dimensions along which the ``logical and`` is compute. If
            :attr:`None`, and all elements of :attr:`x` and return a
N
Noel 已提交
4701
            Tensor with a single element, otherwise must be in the
4702 4703 4704 4705 4706 4707
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
            value is False.
4708
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
4709 4710 4711 4712 4713 4714 4715

    Returns:
        Tensor: Results the ``logical and`` on the specified axis of input Tensor `x`,  it's data type is bool.

    Examples:
        .. code-block:: python

4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751
            >>> import paddle

            >>> # x is a bool Tensor with following elements:
            >>> #    [[True, False]
            >>> #     [True, True]]
            >>> x = paddle.to_tensor([[1, 0], [1, 1]], dtype='int32')
            >>> x
            Tensor(shape=[2, 2], dtype=int32, place=Place(cpu), stop_gradient=True,
            [[1, 0],
             [1, 1]])
            >>> x = paddle.cast(x, 'bool')

            >>> # out1 should be False
            >>> out1 = paddle.all(x)
            >>> out1
            Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
            False)

            >>> # out2 should be [True, False]
            >>> out2 = paddle.all(x, axis=0)
            >>> out2
            Tensor(shape=[2], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True , False])

            >>> # keepdim=False, out3 should be [False, True], out.shape should be (2,)
            >>> out3 = paddle.all(x, axis=-1)
            >>> out3
            Tensor(shape=[2], dtype=bool, place=Place(cpu), stop_gradient=True,
            [False, True ])

            >>> # keepdim=True, out4 should be [[False], [True]], out.shape should be (2, 1)
            >>> out4 = paddle.all(x, axis=1, keepdim=True)
            >>> out4
            Tensor(shape=[2, 1], dtype=bool, place=Place(cpu), stop_gradient=True,
            [[False],
             [True ]])
4752

4753
    """
4754
    if in_dynamic_mode():
4755
        return _C_ops.all(x, axis, keepdim)
4756 4757 4758 4759 4760 4761 4762
    else:
        reduce_all, axis = _get_reduce_axis(axis, x)
        attrs = {
            'dim': axis,
            'keep_dim': keepdim,
            'reduce_all': reduce_all,
        }
4763 4764 4765
        check_variable_and_dtype(
            x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'all'
        )
4766
        check_type(axis, 'axis', (int, list, tuple, type(None)), 'all')
4767

4768
        helper = LayerHelper('all', **locals())
4769
        out = helper.create_variable_for_type_inference(dtype=paddle.bool)
4770 4771 4772 4773 4774 4775 4776
        helper.append_op(
            type='reduce_all',
            inputs={'X': x},
            outputs={'Out': out},
            attrs=attrs,
        )
        return out
4777 4778 4779 4780


def any(x, axis=None, keepdim=False, name=None):
    """
C
Chen Long 已提交
4781
    Computes the ``logical or`` of tensor elements over the given dimension, and return the result.
4782 4783 4784 4785 4786

    Args:
        x (Tensor): An N-D Tensor, the input data type should be `bool`.
        axis (int|list|tuple, optional): The dimensions along which the ``logical or`` is compute. If
            :attr:`None`, and all elements of :attr:`x` and return a
N
Noel 已提交
4787
            Tensor with a single element, otherwise must be in the
4788 4789 4790 4791 4792 4793
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
            value is False.
4794
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
4795 4796 4797 4798 4799 4800 4801

    Returns:
        Tensor: Results the ``logical or`` on the specified axis of input Tensor `x`,  it's data type is bool.

    Examples:
        .. code-block:: python

4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838
            >>> import paddle

            >>> x = paddle.to_tensor([[1, 0], [1, 1]], dtype='int32')
            >>> x = paddle.assign(x)
            >>> x
            Tensor(shape=[2, 2], dtype=int32, place=Place(cpu), stop_gradient=True,
            [[1, 0],
             [1, 1]])
            >>> x = paddle.cast(x, 'bool')
            >>> # x is a bool Tensor with following elements:
            >>> #    [[True, False]
            >>> #     [True, True]]

            >>> # out1 should be True
            >>> out1 = paddle.any(x)
            >>> out1
            Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
            True)

            >>> # out2 should be [True, True]
            >>> out2 = paddle.any(x, axis=0)
            >>> out2
            Tensor(shape=[2], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True, True])

            >>> # keepdim=False, out3 should be [True, True], out.shape should be (2,)
            >>> out3 = paddle.any(x, axis=-1)
            >>> out3
            Tensor(shape=[2], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True, True])

            >>> # keepdim=True, result should be [[True], [True]], out.shape should be (2,1)
            >>> out4 = paddle.any(x, axis=1, keepdim=True)
            >>> out4
            Tensor(shape=[2, 1], dtype=bool, place=Place(cpu), stop_gradient=True,
            [[True],
             [True]])
4839

4840
    """
4841
    if in_dynamic_mode():
4842
        return _C_ops.any(x, axis, keepdim)
4843 4844 4845 4846 4847 4848 4849
    else:
        reduce_all, axis = _get_reduce_axis(axis, x)
        attrs = {
            'dim': axis,
            'keep_dim': keepdim,
            'reduce_all': reduce_all,
        }
4850 4851 4852
        check_variable_and_dtype(
            x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'any'
        )
4853
        check_type(axis, 'axis', (int, list, tuple, type(None)), 'any')
4854

4855
        helper = LayerHelper('any', **locals())
4856
        out = helper.create_variable_for_type_inference(dtype=paddle.bool)
4857 4858 4859 4860 4861 4862 4863
        helper.append_op(
            type='reduce_any',
            inputs={'X': x},
            outputs={'Out': out},
            attrs=attrs,
        )
        return out
L
Leo Chen 已提交
4864

4865

L
Leo Chen 已提交
4866 4867
def broadcast_shape(x_shape, y_shape):
    """
I
Infinity_lee 已提交
4868 4869 4870 4871 4872 4873
    The function returns the shape of doing operation with broadcasting on tensors of x_shape and y_shape.

    Note:
        If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
L
Leo Chen 已提交
4874 4875 4876 4877

    Args:
        x_shape (list[int]|tuple[int]): A shape of tensor.
        y_shape (list[int]|tuple[int]): A shape of tensor.
4878

L
Leo Chen 已提交
4879 4880 4881 4882 4883 4884 4885

    Returns:
        list[int], the result shape.

    Examples:
        .. code-block:: python

4886
            >>> import paddle
L
Leo Chen 已提交
4887

4888 4889 4890
            >>> shape = paddle.broadcast_shape([2, 1, 3], [1, 3, 1])
            >>> shape
            [2, 3, 3]
4891

4892 4893
            >>> # shape = paddle.broadcast_shape([2, 1, 3], [3, 3, 1])
            >>> # ValueError (terminated with error message).
L
Leo Chen 已提交
4894 4895 4896 4897

    """

    return core.broadcast_shape(x_shape, y_shape)
4898

4899

4900 4901 4902 4903 4904
def conj(x, name=None):
    r"""
    This function computes the conjugate of the Tensor elementwisely.

    Args:
4905
        x (Tensor): The input Tensor which hold the complex numbers.
4906
            Optional data types are:float16, complex64, complex128, float32, float64, int32 or int64.
4907
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
4908 4909

    Returns:
C
Chen Long 已提交
4910
        out (Tensor): The conjugate of input. The shape and data type is the same with input. If the elements of tensor is real type such as float32, float64, int32 or int64, the out is the same with input.
4911 4912 4913 4914

    Examples:
        .. code-block:: python

4915
            >>> import paddle
4916

4917 4918 4919 4920 4921
            >>> data = paddle.to_tensor([[1+1j, 2+2j, 3+3j], [4+4j, 5+5j, 6+6j]])
            >>> data
            Tensor(shape=[2, 3], dtype=complex64, place=Place(cpu), stop_gradient=True,
            [[(1+1j), (2+2j), (3+3j)],
             [(4+4j), (5+5j), (6+6j)]])
4922

4923 4924 4925 4926 4927
            >>> conj_data = paddle.conj(data)
            >>> conj_data
            Tensor(shape=[2, 3], dtype=complex64, place=Place(cpu), stop_gradient=True,
            [[(1-1j), (2-2j), (3-3j)],
             [(4-4j), (5-5j), (6-6j)]])
4928 4929

    """
4930
    if in_dynamic_mode():
4931
        return _C_ops.conj(x)
4932 4933 4934 4935
    else:
        check_variable_and_dtype(
            x,
            "x",
4936 4937 4938 4939
            [
                'complex64',
                'complex128',
                'float16',
4940
                'uint16',
4941 4942 4943 4944 4945
                'float32',
                'float64',
                'int32',
                'int64',
            ],
4946 4947
            'conj',
        )
H
hong 已提交
4948

4949 4950 4951 4952
        helper = LayerHelper('conj', **locals())
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype()
        )
4953

4954 4955
        helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]})
        return out
4956

4957

Z
zyfncg 已提交
4958 4959 4960 4961 4962 4963 4964 4965 4966
def digamma(x, name=None):
    r"""
    Calculates the digamma of the given input tensor, element-wise.

    .. math::
        Out = \Psi(x) = \frac{ \Gamma^{'}(x) }{ \Gamma(x) }

    Args:
        x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
4967
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Z
zyfncg 已提交
4968 4969 4970 4971 4972 4973
    Returns:
        Tensor, the digamma of the input Tensor, the shape and data type is the same with input.

    Examples:
        .. code-block:: python

4974
            >>> import paddle
Z
zyfncg 已提交
4975

4976 4977 4978 4979 4980 4981
            >>> data = paddle.to_tensor([[1, 1.5], [0, -2.2]], dtype='float32')
            >>> res = paddle.digamma(data)
            >>> res
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[-0.57721591,  0.03648996],
             [ nan       ,  5.32286835]])
Z
zyfncg 已提交
4982 4983
    """

4984
    if in_dynamic_mode():
4985
        return _C_ops.digamma(x)
J
Jiabin Yang 已提交
4986
    else:
4987 4988 4989
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'digamma'
        )
4990 4991 4992 4993
        helper = LayerHelper('digamma', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(type='digamma', inputs={'X': x}, outputs={'Out': out})
        return out
Z
zyfncg 已提交
4994

4995

4996 4997 4998 4999 5000 5001 5002 5003 5004 5005
@inplace_apis_in_dygraph_only
def digamma_(x, name=None):
    r"""
    Inplace version of ``digamma`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_digamma`.
    """
    if in_dynamic_mode():
        return _C_ops.digamma_(x)


5006 5007 5008 5009 5010 5011 5012 5013 5014
def lgamma(x, name=None):
    r"""
    Calculates the lgamma of the given input tensor, element-wise.

    This operator performs elementwise lgamma for input $X$.
    :math:`out = log\Gamma(x)`


    Args:
5015
        x (Tensor): Input Tensor. Must be one of the following types: float16, float32, float64, uint16.
5016 5017 5018 5019 5020 5021 5022 5023
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, the lgamma of the input Tensor, the shape and data type is the same with input.

    Examples:
        .. code-block:: python

5024
            >>> import paddle
5025

5026 5027 5028 5029 5030
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.lgamma(x)
            >>> out
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.31452453, 1.76149762, 2.25271273, 1.09579790])
5031
    """
5032
    if in_dynamic_mode():
5033
        return _C_ops.lgamma(x)
5034
    else:
5035 5036 5037
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'lgamma'
        )
5038 5039 5040 5041
        helper = LayerHelper('lgamma', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(type='lgamma', inputs={'X': x}, outputs={'Out': out})
        return out
5042 5043


5044 5045 5046 5047 5048 5049 5050 5051 5052 5053
@inplace_apis_in_dygraph_only
def lgamma_(x, name=None):
    r"""
    Inplace version of ``lgamma`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_lgamma`.
    """
    if in_dynamic_mode():
        return _C_ops.lgamma_(x)


5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067
def neg(x, name=None):
    """
    This function computes the negative of the Tensor elementwisely.

    Args:
        x (Tensor): Input of neg operator, an N-D Tensor, with data type float32, float64, int8, int16, int32, or int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): The negative of input Tensor. The shape and data type are the same with input Tensor.

    Examples:
        .. code-block:: python

5068
            >>> import paddle
5069

5070 5071 5072 5073 5074
            >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            >>> out = paddle.neg(x)
            >>> out
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [ 0.40000001,  0.20000000, -0.10000000, -0.30000001])
5075 5076
    """

5077 5078 5079
    return scale(
        x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name
    )
5080

R
ronnywang 已提交
5081

5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092
@inplace_apis_in_dygraph_only
def neg_(x, name=None):
    r"""
    Inplace version of ``neg`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_neg`.
    """
    return x.scale_(
        scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name
    )


5093
def atan2(x, y, name=None):
R
ronnywang 已提交
5094
    r"""
5095
    Element-wise arctangent of x/y with consideration of the quadrant.
R
ronnywang 已提交
5096 5097 5098 5099

    Equation:
        .. math::

5100 5101 5102 5103 5104 5105 5106 5107
            atan2(x,y)=\left\{\begin{matrix}
            & tan^{-1}(\frac{x}{y}) & y > 0 \\
            & tan^{-1}(\frac{x}{y}) + \pi & x>=0, y < 0 \\
            & tan^{-1}(\frac{x}{y}) - \pi & x<0, y < 0 \\
            & +\frac{\pi}{2} & x>0, y = 0 \\
            & -\frac{\pi}{2} & x<0, y = 0 \\
            &\text{undefined} & x=0, y = 0
            \end{matrix}\right.
R
ronnywang 已提交
5108 5109

    Args:
5110 5111
        x (Tensor): An N-D Tensor, the data type is int32, int64, float16, float32, float64.
        y (Tensor): An N-D Tensor, must have the same type as `x`.
R
ronnywang 已提交
5112 5113 5114 5115 5116 5117 5118 5119
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float64 when the input data type is int).

    Examples:
        .. code-block:: python

5120
            >>> import paddle
R
ronnywang 已提交
5121

5122 5123 5124 5125
            >>> x = paddle.to_tensor([-1, +1, +1, -1]).astype('float32')
            >>> x
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-1,  1,  1, -1])
R
ronnywang 已提交
5126

5127 5128 5129 5130
            >>> y = paddle.to_tensor([-1, -1, +1, +1]).astype('float32')
            >>> y
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-1,  -1,  1, 1])
R
ronnywang 已提交
5131

5132 5133 5134 5135
            >>> out = paddle.atan2(x, y)
            >>> out
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-2.35619450,  2.35619450,  0.78539819, -0.78539819])
R
ronnywang 已提交
5136 5137 5138

    """

5139
    if in_dynamic_mode():
5140
        return _C_ops.atan2(x, y)
R
ronnywang 已提交
5141
    else:
5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153
        check_variable_and_dtype(
            x,
            'x',
            ['int32', 'int64', 'float16', 'float32', 'float64'],
            'atan2',
        )
        check_variable_and_dtype(
            y,
            'y',
            ['int32', 'int64', 'float16', 'float32', 'float64'],
            'atan2',
        )
R
ronnywang 已提交
5154

5155 5156 5157 5158 5159
        helper = LayerHelper('atan2', **locals())
        inputs = {'X1': x, 'X2': y}
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='atan2', inputs=inputs, outputs={'Out': out})
        return out
A
andyjpaddle 已提交
5160

5161

W
wangzhen38 已提交
5162 5163 5164 5165 5166
def logit(x, eps=None, name=None):
    r"""
    This function generates a new tensor with the logit of the elements of input x. x is clamped to [eps, 1-eps] when eps is not zero. When eps is zero and x < 0 or x > 1, the function will yields NaN.

    .. math::
5167

W
wangzhen38 已提交
5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182
        logit(x) = ln(\frac{x}{1 - x})

    where

    .. math::

        x_i=
            \left\{\begin{array}{rcl}
                x_i & &\text{if } eps == Default \\
                eps & &\text{if } x_i < eps \\
                x_i & &\text{if } eps <= x_i <= 1-eps \\
                1-eps & &\text{if } x_i > 1-eps
            \end{array}\right.

    Args:
5183
        x (Tensor): The input Tensor with data type bfloat16, float16, float32, float64.
W
wangzhen38 已提交
5184 5185 5186 5187 5188 5189 5190 5191 5192 5193
        eps (float, optional):  the epsilon for input clamp bound. Default is None.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out(Tensor): A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

5194
            >>> import paddle
W
wangzhen38 已提交
5195

5196 5197 5198 5199 5200
            >>> x = paddle.to_tensor([0.2635, 0.0106, 0.2780, 0.2097, 0.8095])
            >>> out1 = paddle.logit(x)
            >>> out1
            Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
            [-1.02785587, -4.53624487, -0.95440406, -1.32673466,  1.44676447])
W
wangzhen38 已提交
5201 5202

    """
5203
    if eps is None:
W
wangzhen38 已提交
5204
        eps = 0.0
5205
    if in_dynamic_mode():
5206
        return _C_ops.logit(x, eps)
5207 5208
    else:
        check_variable_and_dtype(
5209
            x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'logit'
5210 5211 5212 5213 5214 5215 5216 5217 5218 5219
        )
        helper = LayerHelper("logit", **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(
            type='logit',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'eps': eps},
        )
        return out
W
wangzhen38 已提交
5220

5221

5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233
@inplace_apis_in_dygraph_only
def logit_(x, eps=None, name=None):
    r"""
    Inplace version of ``logit`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_logit`.
    """
    if eps is None:
        eps = 0.0
    if in_dynamic_mode():
        return _C_ops.logit_(x, eps)


5234 5235 5236 5237 5238 5239 5240 5241 5242 5243
def lerp(x, y, weight, name=None):
    r"""
    Does a linear interpolation between x and y based on weight.

    Equation:
        .. math::

            lerp(x, y, weight) = x + weight * (y - x).

    Args:
5244 5245 5246
        x (Tensor): An N-D Tensor with starting points, the data type is bfloat16, float16, float32, float64.
        y (Tensor): An N-D Tensor with ending points, the data type is bfloat16, float16, float32, float64.
        weight (float|Tensor): The weight for the interpolation formula. When weight is Tensor, the data type is bfloat16, float16, float32, float64.
5247 5248 5249 5250 5251 5252 5253 5254
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the shape and data type is the same with input.

    Example:
        .. code-block:: python

5255
            >>> import paddle
5256

5257 5258 5259 5260 5261 5262 5263
            >>> x = paddle.arange(1., 5., dtype='float32')
            >>> y = paddle.empty([4], dtype='float32')
            >>> y.fill_(10.)
            >>> out = paddle.lerp(x, y, 0.5)
            >>> out
            Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [5.50000000, 6.        , 6.50000000, 7.        ])
5264 5265

    """
5266 5267
    if isinstance(weight, float):
        weight = paddle.full(shape=[], fill_value=weight, dtype=x.dtype)
H
hong 已提交
5268

5269
    if in_dynamic_mode():
5270
        return _C_ops.lerp(x, y, weight)
5271 5272
    else:
        check_variable_and_dtype(
5273
            x, 'x', ['uint16', 'float16', 'float32', 'float64'], 'lerp'
5274 5275
        )
        check_variable_and_dtype(
5276
            y, 'y', ['uint16', 'float16', 'float32', 'float64'], 'lerp'
5277 5278
        )
        check_variable_and_dtype(
5279 5280 5281 5282
            weight,
            'weight',
            ['uint16', 'float16', 'float32', 'float64'],
            'lerp',
5283
        )
5284

5285 5286 5287 5288 5289
        helper = LayerHelper('lerp', **locals())
        inputs = {'X': x, 'Y': y, 'Weight': weight}
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='lerp', inputs=inputs, outputs={'Out': out})
        return out
5290

5291

5292 5293 5294 5295
@inplace_apis_in_dygraph_only
def lerp_(x, y, weight, name=None):
    r"""
    Inplace version of ``lerp`` API, the output Tensor will be inplaced with input ``x``.
5296
    Please refer to :ref:`api_paddle_lerp`.
5297 5298 5299 5300 5301 5302 5303 5304
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')
    if isinstance(weight, float):
        weight = paddle.to_tensor([weight], dtype=x.dtype)
    elif isinstance(weight, (paddle.Tensor, Variable)):
        out_shape = broadcast_shape(out_shape, weight.shape)
    if out_shape != x.shape:
5305
        raise ValueError(
5306 5307 5308 5309
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
5310
    return _C_ops.lerp_(x, y, weight)
5311

5312

W
wuhuanzhou 已提交
5313 5314
def erfinv(x, name=None):
    r"""
5315
    The inverse error function of x. Please refer to :ref:`api_paddle_erf`
W
wuhuanzhou 已提交
5316 5317 5318 5319 5320 5321

        .. math::

            erfinv(erf(x)) = x.

    Args:
5322
        x (Tensor): An N-D Tensor, the data type is float16, bfloat16, float32, float64.
W
wuhuanzhou 已提交
5323 5324 5325
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
5326
        out (Tensor), an N-D Tensor, the shape and data type is the same with input.
W
wuhuanzhou 已提交
5327 5328 5329 5330

    Example:
        .. code-block:: python

5331
            >>> import paddle
5332

5333 5334 5335 5336 5337
            >>> x = paddle.to_tensor([0, 0.5, -1.], dtype="float32")
            >>> out = paddle.erfinv(x)
            >>> out
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [ 0.       , 0.47693631, -inf.     ])
W
wuhuanzhou 已提交
5338 5339

    """
5340
    if in_dynamic_mode():
5341
        return _C_ops.erfinv(x)
5342
    else:
5343 5344 5345
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'float16', 'uint16'], 'erfinv'
        )
5346 5347 5348 5349
        helper = LayerHelper('erfinv', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='erfinv', inputs={'X': x}, outputs={'Out': out})
        return out
W
wuhuanzhou 已提交
5350

5351

W
wuhuanzhou 已提交
5352 5353 5354 5355
@inplace_apis_in_dygraph_only
def erfinv_(x, name=None):
    r"""
    Inplace version of ``erfinv`` API, the output Tensor will be inplaced with input ``x``.
5356
    Please refer to :ref:`api_paddle_erfinv`.
W
wuhuanzhou 已提交
5357 5358
    """
    check_type(x, 'x', (paddle.Tensor, Variable), 'erfinv')
5359
    return _C_ops.erfinv_(x)
W
wuhuanzhou 已提交
5360

5361

5362
def rad2deg(x, name=None):
5363
    r"""
5364
    Convert each of the elements of input x from angles in radians to degrees.
5365

5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380
    Equation:
        .. math::

            rad2deg(x)=180/ \pi * x

    Args:
        x (Tensor): An N-D Tensor, the data type is float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float32 when the input data type is int).

    Examples:
        .. code-block:: python

5381 5382
            >>> import paddle
            >>> import math
5383

5384 5385 5386 5387 5388 5389
            >>> x1 = paddle.to_tensor([3.142, -3.142, 6.283, -6.283, 1.570, -1.570])
            >>> result1 = paddle.rad2deg(x1)
            >>> result1
            Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True,
            [ 180.02334595, -180.02334595,  359.98937988, -359.98937988,
              89.95437622 , -89.95437622 ])
5390

5391 5392 5393 5394 5395
            >>> x2 = paddle.to_tensor(math.pi/2)
            >>> result2 = paddle.rad2deg(x2)
            >>> result2
            Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
            90.)
5396

5397 5398 5399 5400 5401
            >>> x3 = paddle.to_tensor(1)
            >>> result3 = paddle.rad2deg(x3)
            >>> result3
            Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
            57.29578018)
5402 5403
    """
    rad2deg_scale = 180 / np.pi
5404
    if in_dynamic_mode():
5405 5406
        if convert_dtype(x.dtype) in ['int32', 'int64']:
            x = cast(x, dtype="float32")
5407
        return _C_ops.scale(x, rad2deg_scale, 0.0, True)
5408
    else:
5409 5410 5411
        check_variable_and_dtype(
            x, 'x', ['int32', 'int64', 'float32', 'float64'], 'rad2deg'
        )
5412 5413 5414
        helper = LayerHelper('rad2deg', **locals())
        out_cast = x
        if convert_dtype(x.dtype) in ['int32', 'int64']:
5415
            out_cast = helper.create_variable_for_type_inference(
5416 5417 5418 5419 5420 5421 5422 5423
                dtype=paddle.float32
            )
            helper.append_op(
                type='cast',
                inputs={'X': x},
                outputs={'Out': out_cast},
                attrs={'in_dtype': x.dtype, 'out_dtype': paddle.float32},
            )
5424
        out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
5425 5426 5427 5428 5429 5430
        helper.append_op(
            type='scale',
            inputs={'X': out_cast},
            outputs={'Out': out},
            attrs={'scale': rad2deg_scale},
        )
5431 5432
        return out

5433

5434
def deg2rad(x, name=None):
5435
    r"""
5436
    Convert each of the elements of input x from degrees to angles in radians.
5437

5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451
        .. math::

            deg2rad(x)=\pi * x / 180

    Args:
        x (Tensor): An N-D Tensor, the data type is float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float32 when the input data type is int).

    Examples:
        .. code-block:: python

5452
            >>> import paddle
5453

5454 5455 5456 5457 5458 5459
            >>> x1 = paddle.to_tensor([180.0, -180.0, 360.0, -360.0, 90.0, -90.0])
            >>> result1 = paddle.deg2rad(x1)
            >>> result1
            Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True,
            [3.14159274, -3.14159274,  6.28318548, -6.28318548,  1.57079637,
            -1.57079637])
5460

5461 5462 5463 5464 5465
            >>> x2 = paddle.to_tensor(180)
            >>> result2 = paddle.deg2rad(x2)
            >>> result2
            Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
            3.14159274)
5466 5467
    """
    deg2rad_scale = np.pi / 180.0
5468
    if in_dynamic_mode():
5469 5470
        if convert_dtype(x.dtype) in ['int32', 'int64']:
            x = cast(x, dtype="float32")
5471
        return _C_ops.scale(x, deg2rad_scale, 0.0, True)
5472
    else:
5473 5474 5475
        check_variable_and_dtype(
            x, 'x', ['int32', 'int64', 'float32', 'float64'], 'deg2rad'
        )
5476 5477 5478
        helper = LayerHelper('deg2rad', **locals())
        out_cast = x
        if convert_dtype(x.dtype) in ['int32', 'int64']:
5479
            out_cast = helper.create_variable_for_type_inference(
5480 5481 5482 5483 5484 5485 5486 5487
                dtype=paddle.float32
            )
            helper.append_op(
                type='cast',
                inputs={'X': x},
                outputs={'Out': out_cast},
                attrs={'in_dtype': x.dtype, 'out_dtype': paddle.float32},
            )
5488
        out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
5489 5490 5491 5492 5493 5494
        helper.append_op(
            type='scale',
            inputs={'X': out_cast},
            outputs={'Out': out},
            attrs={'scale': deg2rad_scale},
        )
5495
        return out
A
andyjpaddle 已提交
5496

5497

T
Tao Luo 已提交
5498 5499 5500 5501
def gcd(x, y, name=None):
    """
    Computes the element-wise greatest common divisor (GCD) of input |x| and |y|.
    Both x and y must have integer types.
5502

T
Tao Luo 已提交
5503 5504 5505
    Note:
        gcd(0,0)=0, gcd(0, y)=|y|

T
Tao Luo 已提交
5506 5507
        If x.shape != y.shape, they must be broadcastable to a common shape (which becomes the shape of the output).

T
Tao Luo 已提交
5508
    Args:
C
cyberslack_lee 已提交
5509 5510
        x (Tensor): An N-D Tensor, the data type is int32, int64.
        y (Tensor): An N-D Tensor, the data type is int32, int64.
T
Tao Luo 已提交
5511 5512 5513 5514 5515 5516 5517 5518
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the data type is the same with input.

    Examples:
        .. code-block:: python

5519
            >>> import paddle
5520

5521 5522 5523 5524 5525
            >>> x1 = paddle.to_tensor(12)
            >>> x2 = paddle.to_tensor(20)
            >>> paddle.gcd(x1, x2)
            Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
            4)
T
Tao Luo 已提交
5526

5527 5528 5529 5530
            >>> x3 = paddle.arange(6)
            >>> paddle.gcd(x3, x2)
            Tensor(shape=[6], dtype=int64, place=Place(cpu), stop_gradient=True,
            [20, 1 , 2 , 1 , 4 , 5])
T
Tao Luo 已提交
5531

5532 5533 5534 5535
            >>> x4 = paddle.to_tensor(0)
            >>> paddle.gcd(x4, x2)
            Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
            20)
T
Tao Luo 已提交
5536

5537 5538 5539
            >>> paddle.gcd(x4, x4)
            Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
            0)
5540

5541 5542 5543 5544
            >>> x5 = paddle.to_tensor(-20)
            >>> paddle.gcd(x1, x5)
            Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
            4)
T
Tao Luo 已提交
5545 5546 5547 5548 5549 5550 5551 5552
    """
    shape = paddle.broadcast_shape(x.shape, y.shape)
    x = paddle.broadcast_to(x, shape)
    y = paddle.broadcast_to(y, shape)
    x = paddle.abs(x)
    y = paddle.abs(y)

    def _gcd_cond_fn(x, y):
5553
        return paddle.any(y != 0)
T
Tao Luo 已提交
5554 5555 5556 5557 5558

    def _gcd_body_fn(x, y):
        # paddle.mod will raise an error when any element of y is 0. To avoid
        # that, we change those zeros to ones. Their values don't matter because
        # they won't be used.
5559
        y_not_equal_0 = y != 0
T
Tao Luo 已提交
5560
        y_safe = paddle.where(y_not_equal_0, y, paddle.ones(y.shape, y.dtype))
5561 5562 5563 5564 5565 5566 5567 5568
        x, y = (
            paddle.where(y_not_equal_0, y, x),
            paddle.where(
                y_not_equal_0,
                paddle.mod(x, y_safe),
                paddle.zeros(y.shape, y.dtype),
            ),
        )
T
Tao Luo 已提交
5569 5570
        return (paddle.where(x < y, y, x), paddle.where(x < y, x, y))

5571
    if in_dynamic_mode():
T
Tao Luo 已提交
5572 5573 5574 5575 5576
        while _gcd_cond_fn(x, y):
            x, y = _gcd_body_fn(x, y)

        return x
    else:
T
Tao Luo 已提交
5577 5578
        check_variable_and_dtype(x, 'x', ['int32', 'int64'], 'gcd')
        check_variable_and_dtype(y, 'y', ['int32', 'int64'], 'gcd')
T
Tao Luo 已提交
5579 5580 5581
        out, _ = paddle.static.nn.while_loop(_gcd_cond_fn, _gcd_body_fn, [x, y])
        return out

5582

5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627
def gcd_(x, y, name=None):
    r"""
    Inplace version of ``gcd`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_gcd`.
    """
    shape = paddle.broadcast_shape(x.shape, y.shape)
    if shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                shape, x.shape
            )
        )
    y = paddle.broadcast_to(y, shape)
    x = paddle.abs_(x)
    y = paddle.abs(y)

    def _gcd_cond_fn(x, y):
        return paddle.any(y != 0)

    def _gcd_body_fn(x, y):
        # paddle.mod will raise an error when any element of y is 0. To avoid
        # that, we change those zeros to ones. Their values don't matter because
        # they won't be used.
        y_equal_0 = y == 0
        y_safe = paddle.where(y_equal_0, paddle.ones(y.shape, y.dtype), y)
        y, x = (
            paddle.where(
                y_equal_0,
                paddle.zeros(y.shape, y.dtype),
                paddle.mod(x, y_safe),
            ),
            paddle.where_(y_equal_0, x, y),
        )
        return (
            paddle.where(x < y, x, y),
            paddle.where_(x >= y, x, y),
        )

    if in_dynamic_mode():
        while _gcd_cond_fn(x, y):
            y, x = _gcd_body_fn(x, y)

        return x


T
Tao Luo 已提交
5628 5629 5630 5631
def lcm(x, y, name=None):
    """
    Computes the element-wise least common multiple (LCM) of input |x| and |y|.
    Both x and y must have integer types.
5632

T
Tao Luo 已提交
5633 5634 5635
    Note:
        lcm(0,0)=0, lcm(0, y)=0

T
Tao Luo 已提交
5636 5637
        If x.shape != y.shape, they must be broadcastable to a common shape (which becomes the shape of the output).

T
Tao Luo 已提交
5638
    Args:
C
cyberslack_lee 已提交
5639 5640
        x (Tensor): An N-D Tensor, the data type is int32, int64.
        y (Tensor): An N-D Tensor, the data type is int32, int64.
T
Tao Luo 已提交
5641 5642 5643 5644 5645 5646 5647 5648
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the data type is the same with input.

    Examples:
        .. code-block:: python

5649
            >>> import paddle
5650

5651 5652 5653 5654 5655
            >>> x1 = paddle.to_tensor(12)
            >>> x2 = paddle.to_tensor(20)
            >>> paddle.lcm(x1, x2)
            Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
            60)
T
Tao Luo 已提交
5656

5657 5658 5659 5660
            >>> x3 = paddle.arange(6)
            >>> paddle.lcm(x3, x2)
            Tensor(shape=[6], dtype=int64, place=Place(cpu), stop_gradient=True,
            [0, 20, 20, 60, 20, 20])
T
Tao Luo 已提交
5661

5662 5663 5664 5665
            >>> x4 = paddle.to_tensor(0)
            >>> paddle.lcm(x4, x2)
            Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
            0)
T
Tao Luo 已提交
5666

5667 5668 5669
            >>> paddle.lcm(x4, x4)
            Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
            0)
5670

5671 5672 5673 5674
            >>> x5 = paddle.to_tensor(-20)
            >>> paddle.lcm(x1, x5)
            Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
            60)
T
Tao Luo 已提交
5675 5676 5677 5678 5679 5680 5681
    """
    d = paddle.gcd(x, y)
    # paddle.mod will raise an error when any element of y is 0. To avoid
    # that, we change those zeros to ones. Their values don't matter because
    # they won't be used.
    d_equal_0 = paddle.equal(d, 0)
    d_safe = paddle.where(d_equal_0, paddle.ones(d.shape, d.dtype), d)
5682 5683 5684
    out = paddle.where(
        d_equal_0, paddle.zeros(d.shape, d.dtype), paddle.abs(x * y) // d_safe
    )
T
Tao Luo 已提交
5685 5686
    return out

5687

5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706
def lcm_(x, y, name=None):
    r"""
    Inplace version of ``lcm`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_lcm`.
    """
    d = paddle.gcd(x, y)
    # paddle.mod will raise an error when any element of y is 0. To avoid
    # that, we change those zeros to ones. Their values don't matter because
    # they won't be used.
    d_not_equal_0 = d != 0
    d_safe = paddle.where(d_not_equal_0, d, paddle.ones(d.shape, d.dtype))
    out = paddle.where_(
        d_not_equal_0,
        paddle.abs_(x.multiply_(y)).floor_divide_(d_safe),
        paddle.zeros(d.shape, d.dtype),
    )
    return out


A
andyjpaddle 已提交
5707 5708 5709
def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
    r"""
    Computes the n-th forward difference along the given axis.
5710
    The first-order differences is computed by using the following formula:
A
andyjpaddle 已提交
5711 5712 5713 5714

    .. math::

        out[i] = x[i+1] - x[i]
5715 5716

    Higher-order differences are computed by using paddle.diff() recursively.
5717
    The number of n supports any positive integer value.
A
andyjpaddle 已提交
5718 5719

    Args:
5720
        x (Tensor): The input tensor to compute the forward difference on, the data type is float16, float32, float64, bool, int32, int64.
5721
        n (int, optional): The number of times to recursively compute the difference.
5722
                            Supports any positive integer value. Default:1
5723 5724
        axis (int, optional): The axis to compute the difference along. Default:-1
        prepend (Tensor, optional): The tensor to prepend to input along axis before computing the difference.
5725
                                   It's dimensions must be equivalent to that of x,
A
andyjpaddle 已提交
5726
                                   and its shapes must match x's shape except on axis.
5727 5728
        append (Tensor, optional): The tensor to append to input along axis before computing the difference,
                                   It's dimensions must be equivalent to that of x,
A
andyjpaddle 已提交
5729
                                   and its shapes must match x's shape except on axis.
5730
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
5731

A
andyjpaddle 已提交
5732 5733 5734 5735 5736 5737
    Returns:
        Tensor: The output tensor with same dtype with x.

    Examples:
        .. code-block:: python

5738
            >>> import paddle
5739

5740 5741 5742 5743 5744
            >>> x = paddle.to_tensor([1, 4, 5, 2])
            >>> out = paddle.diff(x)
            >>> out
            Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [ 3,  1, -3])
A
andyjpaddle 已提交
5745

5746 5747 5748 5749 5750 5751
            >>> x_2 = paddle.to_tensor([1, 4, 5, 2])
            >>> out = paddle.diff(x_2, n=2)
            >>> out
            Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [ -2,  -4])

5752 5753 5754 5755 5756
            >>> y = paddle.to_tensor([7, 9])
            >>> out = paddle.diff(x, append=y)
            >>> out
            Tensor(shape=[5], dtype=int64, place=Place(cpu), stop_gradient=True,
            [ 3,  1, -3,  5,  2])
A
andyjpaddle 已提交
5757

5758 5759 5760 5761 5762 5763 5764 5765 5766 5767
            >>> z = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
            >>> out = paddle.diff(z, axis=0)
            >>> out
            Tensor(shape=[1, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[3, 3, 3]])
            >>> out = paddle.diff(z, axis=1)
            >>> out
            Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[1, 1],
             [1, 1]])
A
andyjpaddle 已提交
5768
    """
5769 5770 5771 5772 5773
    if n < 1:
        raise ValueError(
            "Diff expects input to be at least one-dimensional but got {}".format(
                n
            )
5774
        )
5775

5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826
    def _diff_handler(x, n=1, axis=-1, prepend=None, append=None, name=None):
        if axis < 0:
            axis = axis + len(x.shape)
        if axis > len(x.shape):
            axis = len(x.shape)
        if axis < 0:
            axis = 0
        dtype = x.dtype
        axes = [axis]
        infer_flags = [1 for i in range(len(axes))]
        if in_dynamic_mode():
            has_pend = False
            input_list = []
            if prepend is not None and append is not None:
                input_list = [prepend, x, append]
                has_pend = True
            elif prepend is not None:
                input_list = [prepend, x]
                has_pend = True
            elif append is not None:
                input_list = [x, append]
                has_pend = True
            if has_pend:
                new_input = _C_ops.concat(input_list, axis)
            else:
                new_input = x

            attrs_1 = ()
            attrs_2 = ()

            dim_len = new_input.shape[axis]

            starts_1 = [0]
            attrs_1 += ('starts', starts_1)
            ends_1 = [dim_len - 1]
            attrs_1 += ('ends', ends_1)
            input_front = _C_ops.slice(
                new_input, axes, starts_1, ends_1, infer_flags, []
            )
            starts_2 = [1]
            attrs_2 += ('starts', starts_2)
            ends_2 = [dim_len]
            attrs_2 += ('ends', ends_2)
            input_back = _C_ops.slice(
                new_input, axes, starts_2, ends_2, infer_flags, []
            )

            if x.dtype == paddle.bool:
                return _C_ops.logical_xor(input_back, input_front)
            else:
                return _C_ops.subtract(input_back, input_front)
5827
        else:
5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865
            check_variable_and_dtype(
                x,
                'x',
                ['float16', 'float32', 'float64', 'bool', 'int32', 'int64'],
                'diff',
            )
            check_type(axis, 'axis', (int), 'diff')
            helper = LayerHelper('diff', **locals())
            has_pend = False
            input_list = []
            if prepend is not None and append is not None:
                input_list = [prepend, x, append]
                has_pend = True
            elif prepend is not None:
                input_list = [prepend, x]
                has_pend = True
            elif append is not None:
                input_list = [x, append]
                has_pend = True

            if has_pend:
                new_input = helper.create_variable_for_type_inference(dtype)
                helper.append_op(
                    type='concat',
                    inputs={'X': input_list},
                    outputs={'Out': [new_input]},
                    attrs={'axis': axis},
                )
            else:
                new_input = x

            dim_len = new_input.shape[axis]
            attrs_1 = {'axes': axes}
            starts_1 = [0]
            ends_1 = [dim_len - 1]
            attrs_1['starts'] = starts_1
            attrs_1['ends'] = ends_1
            input_front = helper.create_variable_for_type_inference(dtype)
5866
            helper.append_op(
5867 5868 5869 5870
                type='slice',
                inputs={'Input': new_input},
                attrs=attrs_1,
                outputs={'Out': input_front},
5871
            )
5872 5873 5874 5875 5876 5877
            attrs_2 = {'axes': axes}
            starts_2 = [1]
            ends_2 = [dim_len]
            attrs_2['starts'] = starts_2
            attrs_2['ends'] = ends_2
            input_back = helper.create_variable_for_type_inference(dtype)
5878
            helper.append_op(
5879 5880 5881 5882
                type='slice',
                inputs={'Input': new_input},
                attrs=attrs_2,
                outputs={'Out': input_back},
5883
            )
5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904

            if dtype == paddle.bool:
                out = helper.create_variable_for_type_inference(dtype)
                helper.append_op(
                    type='logical_xor',
                    inputs={"X": input_back, "Y": input_front},
                    outputs={"Out": out},
                )
            else:
                out = paddle.tensor.math.subtract(input_back, input_front)
            return out

    out = _diff_handler(
        x, n=1, axis=axis, prepend=prepend, append=append, name=name
    )
    if n > 1:
        for _ in range(n - 1):
            out = _diff_handler(
                out, n=1, axis=axis, prepend=prepend, append=append, name=name
            )
    return out
F
Feiyu Chan 已提交
5905

5906

F
Feiyu Chan 已提交
5907 5908
def angle(x, name=None):
    r"""
5909
    Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while
F
Feiyu Chan 已提交
5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921
    for negative real numbers, the angle is :math:`\pi`.

    Equation:
        .. math::

            angle(x)=arctan2(x.imag, x.real)

    Args:
        x (Tensor): An N-D Tensor, the data type is complex64, complex128, or float32, float64 .
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
5922
        Tensor: An N-D Tensor of real data type with the same precision as that of x's data type.
F
Feiyu Chan 已提交
5923 5924 5925 5926

    Examples:
        .. code-block:: python

5927
            >>> import paddle
F
Feiyu Chan 已提交
5928

5929 5930 5931 5932 5933 5934 5935 5936 5937
            >>> x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32')
            >>> y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32')
            >>> z = x + 1j * y
            >>> z
            Tensor(shape=[4, 4], dtype=complex64, place=Place(cpu), stop_gradient=True,
            [[(-2-2j), (-2-1j), (-2+0j), (-2+1j)],
             [(-1-2j), (-1-1j), (-1+0j), (-1+1j)],
             [-2j    , -1j    ,  0j    ,  1j    ],
             [ (1-2j),  (1-1j),  (1+0j),  (1+1j)]])
F
Feiyu Chan 已提交
5938

5939 5940 5941 5942 5943 5944 5945
            >>> theta = paddle.angle(z)
            >>> theta
            Tensor(shape=[4, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[-2.35619450, -2.67794514,  3.14159274,  2.67794514],
             [-2.03444386, -2.35619450,  3.14159274,  2.35619450],
             [-1.57079637, -1.57079637,  0.        ,  1.57079637],
             [-1.10714877, -0.78539819,  0.        ,  0.78539819]])
F
Feiyu Chan 已提交
5946 5947
    """

5948
    if in_dynamic_mode():
F
Feiyu Chan 已提交
5949
        return _C_ops.angle(x)
5950 5951
    else:
        check_variable_and_dtype(
C
chenxujun 已提交
5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962
            x,
            'x',
            [
                'float16',
                'float32',
                'float64',
                'complex64',
                'complex128',
                'uint16',
            ],
            'angle',
5963 5964 5965 5966 5967 5968 5969 5970 5971 5972
        )
        op_type = "angle"
        helper = LayerHelper(op_type, **locals())
        inputs = {"X": x}
        out = helper.create_variable_for_type_inference(
            dtype=_complex_to_real_dtype(x.dtype)
        )
        outputs = {"Out": out}
        helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
        return out
5973

5974

5975
def heaviside(x, y, name=None):
5976
    r"""
5977 5978 5979 5980 5981
    Computes the Heaviside step function determined by corresponding element in y for each element in x. The equation is

    .. math::
        heaviside(x, y)=
            \left\{
5982 5983 5984 5985
                \begin{array}{lcl}
                0,& &\text{if} \ x < 0, \\
                y,& &\text{if} \ x = 0, \\
                1,& &\text{if} \ x > 0.
5986
                \end{array}
5987
            \right.
5988

5989
    Note:
I
Infinity_lee 已提交
5990 5991 5992
        ``paddle.heaviside`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
5993 5994

    Args:
5995 5996
        x (Tensor): The input tensor of Heaviside step function, it's data type should be float16, float32, float64, int32 or int64.
        y (Tensor): The tensor that determines a Heaviside step function, it's data type should be float16, float32, float64, int32 or int64.
5997 5998 5999 6000 6001 6002 6003 6004
        name (str, optional): Name for the operation (optional, default is None). Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. If x and y have different shapes and are broadcastable, the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.

    Examples:
        .. code-block:: python

6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016
            >>> import paddle
            >>> x = paddle.to_tensor([-0.5, 0, 0.5])
            >>> y = paddle.to_tensor([0.1])
            >>> paddle.heaviside(x, y)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.        , 0.10000000, 1.        ])
            >>> x = paddle.to_tensor([[-0.5, 0, 0.5], [-0.5, 0.5, 0]])
            >>> y = paddle.to_tensor([0.1, 0.2, 0.3])
            >>> paddle.heaviside(x, y)
            Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.        , 0.20000000, 1.        ],
             [0.        , 1.        , 0.30000001]])
6017
    """
6018
    if in_dynamic_mode():
6019
        return _C_ops.heaviside(x, y)
6020
    else:
W
Weilong Wu 已提交
6021
        op_type = 'elementwise_heaviside'
6022
        return _elementwise_op(LayerHelper(op_type, **locals()))
6023

6024

6025 6026 6027 6028 6029 6030
def frac(x, name=None):
    """
    This API is used to return the fractional portion of each element in input.

    Args:
        x (Tensor): The input tensor, which data type should be int32, int64, float32, float64.
6031
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
6032 6033 6034 6035 6036

    Returns:
        Tensor: The output Tensor of frac.

    Examples:
6037
        .. code-block:: python
6038

6039
            >>> import paddle
6040

6041 6042 6043 6044 6045 6046 6047
            >>> input = paddle.to_tensor([[12.22000003, -1.02999997],
            ...                           [-0.54999995, 0.66000003]])
            >>> output = paddle.frac(input)
            >>> output
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[ 0.22000003, -0.02999997],
             [-0.54999995,  0.66000003]])
6048
    """
6049
    if x.dtype not in [
6050 6051 6052 6053
        paddle.int32,
        paddle.int64,
        paddle.float32,
        paddle.float64,
6054
    ]:
6055
        raise TypeError(
6056 6057 6058 6059
            "The data type of input must be one of ['int32', 'int64', 'float32', 'float64'], but got {}".format(
                x.dtype
            )
        )
6060
    if in_dynamic_mode():
6061 6062
        y = _C_ops.trunc(x)
        return _C_ops.subtract(x, y)
6063
    else:
6064 6065
        inputs = {"X": x}
        attrs = {}
6066

6067 6068 6069 6070 6071 6072 6073 6074
        helper = LayerHelper("trunc", **locals())
        check_variable_and_dtype(
            x, "X", ['int32', 'int64', 'float32', 'float64'], 'trunc'
        )
        y = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": y}
        )
6075
        return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
6076

6077

6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100
@inplace_apis_in_dygraph_only
def frac_(x, name=None):
    r"""
    Inplace version of ``frac`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_frac`.
    """

    if x.dtype not in [
        paddle.int32,
        paddle.int64,
        paddle.float32,
        paddle.float64,
    ]:
        raise TypeError(
            "The data type of input must be one of ['int32', 'int64', 'float32', 'float64'], but got {}".format(
                x.dtype
            )
        )
    if in_dynamic_mode():
        y = _C_ops.trunc(x)
        return _C_ops.subtract_(x, y)


6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115
def sgn(x, name=None):
    """
    For complex tensor, this API returns a new tensor whose elements have the same angles as the corresponding
    elements of input and absolute values of one.
    For other float dtype tensor,
    this API returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero, same as paddle.sign.

    Args:
        x (Tensor): The input tensor, which data type should be float16, float32, float64, complex64, complex128.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: A sign Tensor for real input, or normalized Tensor for complex input, shape and data type are same as input.

    Examples:
6116
        .. code-block:: python
6117

6118
            >>> import paddle
6119

6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130
            >>> x = paddle.to_tensor([[3 + 4j, 7 - 24j, 0, 1 + 2j], [6 + 8j, 3, 0, -2]])
            >>> paddle.sgn(x)
            Tensor(shape=[2, 4], dtype=complex64, place=Place(cpu), stop_gradient=True,
            [[ (0.6000000238418579+0.800000011920929j),
              (0.2800000011920929-0.9599999785423279j),
               0j                                     ,
              (0.4472135901451111+0.8944271802902222j)],
             [ (0.6000000238418579+0.800000011920929j),
               (1+0j)                                 ,
               0j                                     ,
              (-1+0j)                                 ]])
6131 6132

    """
6133
    if x.dtype not in [
6134 6135 6136 6137 6138
        paddle.float16,
        paddle.float32,
        paddle.float64,
        paddle.complex64,
        paddle.complex128,
6139
    ]:
6140
        raise TypeError(
6141 6142 6143 6144
            "The data type of input must be one of ['float16', 'float32', 'float64', 'complex64', 'complex128'], but got {}".format(
                x.dtype
            )
        )
6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155
    if paddle.is_complex(x):
        expand_x = paddle.as_real(x)
        x_abs = paddle.abs(x)
        x_abs = paddle.unsqueeze(x_abs, axis=-1)
        output = expand_x / x_abs
        zeros = paddle.zeros_like(output)
        output = paddle.where(paddle.isnan(output), zeros, output)

        return paddle.as_complex(output)
    else:
        return paddle.sign(x)
6156

6157

6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180
def take(x, index, mode='raise', name=None):
    """
    Returns a new tensor with the elements of input tensor x at the given index.
    The input tensor is treated as if it were viewed as a 1-D tensor.
    The result takes the same shape as the index.

    Args:
        x (Tensor): An N-D Tensor, its data type should be int32, int64, float32, float64.
        index (Tensor): An N-D Tensor, its data type should be int32, int64.
        mode (str, optional): Specifies how out-of-bounds index will behave. the candicates are ``'raise'``, ``'wrap'`` and ``'clip'``.

            - ``'raise'``: raise an error (default);
            - ``'wrap'``: wrap around;
            - ``'clip'``: clip to the range. ``'clip'`` mode means that all indices that are too large are replaced by the index that addresses the last element. Note that this disables indexing with negative numbers.

        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, Tensor with the same shape as index, the data type is the same with input.

    Examples:
        .. code-block:: python

6181
            >>> import paddle
6182

6183 6184
            >>> x_int = paddle.arange(0, 12).reshape([3, 4])
            >>> x_float = x_int.astype(paddle.float64)
6185

6186 6187 6188
            >>> idx_pos = paddle.arange(4, 10).reshape([2, 3])  # positive index
            >>> idx_neg = paddle.arange(-2, 4).reshape([2, 3])  # negative index
            >>> idx_err = paddle.arange(-2, 13).reshape([3, 5])  # index out of range
6189

6190 6191 6192 6193
            >>> paddle.take(x_int, idx_pos)
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[4, 5, 6],
             [7, 8, 9]])
6194

6195 6196 6197 6198
            >>> paddle.take(x_int, idx_neg)
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[10, 11, 0 ],
             [1 , 2 , 3 ]])
6199

6200 6201 6202 6203
            >>> paddle.take(x_float, idx_pos)
            Tensor(shape=[2, 3], dtype=float64, place=Place(cpu), stop_gradient=True,
            [[4., 5., 6.],
             [7., 8., 9.]])
6204

6205 6206 6207 6208
            >>> x_int.take(idx_pos)
            Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[4, 5, 6],
             [7, 8, 9]])
6209

6210 6211 6212 6213 6214
            >>> paddle.take(x_int, idx_err, mode='wrap')
            Tensor(shape=[3, 5], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[10, 11, 0 , 1 , 2 ],
             [3 , 4 , 5 , 6 , 7 ],
             [8 , 9 , 10, 11, 0 ]])
6215

6216 6217 6218 6219 6220
            >>> paddle.take(x_int, idx_err, mode='clip')
            Tensor(shape=[3, 5], dtype=int64, place=Place(cpu), stop_gradient=True,
            [[0 , 0 , 0 , 1 , 2 ],
             [3 , 4 , 5 , 6 , 7 ],
             [8 , 9 , 10, 11, 11]])
6221 6222 6223 6224

    """
    if mode not in ['raise', 'wrap', 'clip']:
        raise ValueError(
6225 6226 6227 6228
            "'mode' in 'take' should be 'raise', 'wrap', 'clip', but received {}.".format(
                mode
            )
        )
6229

6230
    if in_dynamic_mode():
6231 6232
        if not isinstance(index, (paddle.Tensor, Variable)):
            raise TypeError(
6233
                "The type of 'index' must be Tensor, but got {}".format(
6234 6235 6236
                    type(index)
                )
            )
6237 6238
        if index.dtype not in [paddle.int32, paddle.int64]:
            raise TypeError(
6239 6240 6241 6242
                "The data type of 'index' must be one of ['int32', 'int64'], but got {}".format(
                    index.dtype
                )
            )
6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255

    else:
        check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'take')

    input_1d = x.flatten()
    index_1d = index.flatten()
    max_index = input_1d.shape[-1]

    if mode == 'raise':
        # This processing enables 'take' to handle negative indexes within the correct range.
        index_1d = paddle.where(index_1d < 0, index_1d + max_index, index_1d)
    elif mode == 'wrap':
        # The out of range indices are constrained by taking the remainder.
6256
        index_1d = paddle.where(index_1d < 0, index_1d % max_index, index_1d)
6257 6258 6259
        index_1d = paddle.where(
            index_1d >= max_index, index_1d % max_index, index_1d
        )
6260 6261 6262 6263 6264 6265 6266
    elif mode == 'clip':
        # 'clip' mode disables indexing with negative numbers.
        index_1d = clip(index_1d, 0, max_index - 1)

    out = input_1d.index_select(index_1d).reshape(index.shape)

    return out
6267 6268 6269 6270 6271 6272 6273 6274 6275


def frexp(x, name=None):
    """
    The function used to decompose a floating point number into mantissa and exponent.

    Args:
        x (Tensor): The input tensor, it's data type should be float32, float64.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
6276

6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287
    Returns:

        - mantissa (Tensor), A mantissa Tensor. The shape and data type of mantissa tensor and exponential tensor are
            the same as those of input.

        - exponent (Tensor), A exponent Tensor. The shape and data type of mantissa tensor and exponential tensor are
            the same as those of input.

    Examples:
        .. code-block:: python

6288
            >>> import paddle
6289

6290 6291 6292 6293 6294 6295 6296 6297
            >>> x = paddle.to_tensor([[1, 2, 3, 4]], dtype="float32")
            >>> mantissa, exponent = paddle.tensor.math.frexp(x)
            >>> mantissa
            Tensor(shape=[1, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.50000000, 0.50000000, 0.75000000, 0.50000000]])
            >>> exponent
            Tensor(shape=[1, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[1., 2., 2., 3.]])
6298
    """
6299 6300
    if x.dtype not in [paddle.float32, paddle.float64]:
        raise TypeError(
6301 6302 6303 6304
            "The data type of input must be one of ['float32', 'float64'], but got {}".format(
                x.dtype
            )
        )
6305 6306
    input_x = paddle.abs(x)
    exponent = paddle.floor(paddle.log2(input_x))
6307 6308 6309
    exponent = paddle.where(
        paddle.isinf(exponent), paddle.full_like(exponent, 0), exponent
    )
6310 6311 6312 6313

    # 0填充
    mantissa = paddle.divide(input_x, 2**exponent)
    # 计算exponent
6314 6315 6316 6317 6318 6319 6320 6321 6322 6323
    exponent = paddle.where(
        (mantissa >= 1),
        paddle.add(exponent, paddle.ones_like(exponent)),
        exponent,
    )
    mantissa = paddle.where(
        (mantissa >= 1),
        paddle.divide(mantissa, 2 ** paddle.ones_like(exponent)),
        mantissa,
    )
6324 6325 6326

    mantissa = paddle.where((x < 0), mantissa * -1, mantissa)
    return mantissa, exponent
6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368


def _trapezoid(y, x=None, dx=None, axis=-1, mode='sum'):
    """
    Integrate along the given axis using the composite trapezoidal rule.

    Args:
        y (Tensor): Input tensor to integrate. It's data type should be float16, float32, float64.
        x (Tensor, optional): The sample points corresponding to the :attr:`y` values, the same type as :attr:`y`.
            It is known that the size of :attr:`y` is `[d_1, d_2, ... , d_n]` and :math:`axis=k`, then the size of :attr:`x` can only be `[d_k]` or `[d_1, d_2, ... , d_n ]`.
            If :attr:`x` is None, the sample points are assumed to be evenly spaced :attr:`dx` apart. The default is None.
        dx (float, optional): The spacing between sample points when :attr:`x` is None. If neither :attr:`x` nor :attr:`dx` is provided then the default is :math:`dx = 1`.
        axis (int, optional): The axis along which to integrate. The default is -1.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
        sum_mode (str): use a different summation. The default is `sum`.

    Returns:
        Tensor, Definite integral of :attr:`y` is N-D tensor as approximated along a single axis by the trapezoidal rule.
    """
    if mode == 'sum':
        sum_mode = paddle.sum
    elif mode == 'cumsum':
        sum_mode = paddle.cumsum

    if not (x is None or dx is None):
        raise ValueError("Not permitted to specify both x and dx input args.")
    if y.dtype not in [paddle.float16, paddle.float32, paddle.float64]:
        raise TypeError(
            "The data type of input must be Tensor, and dtype should be one of ['paddle.float16', 'paddle.float32', 'paddle.float64'], but got {}".format(
                y.dtype
            )
        )

    y_shape = y.shape
    length = y_shape[axis]
    if axis < 0:
        axis += y.dim()
    if x is None:
        if dx is None:
            dx = 1.0
        dx = paddle.to_tensor(dx)
        if dx.dim() > 1:
6369
            raise ValueError(f'Expected dx to be a scalar, got dx={dx}')
6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414
    else:
        if x.dtype not in [paddle.float16, paddle.float32, paddle.float64]:
            raise TypeError(
                "The data type of input must be Tensor, and dtype should be one of ['paddle.float16', 'paddle.float32', 'paddle.float64'], but got {}".format(
                    x.dtype
                )
            )
        # Reshape to correct shape
        if x.dim() == 1:
            dx = paddle.diff(x)
            shape = [1] * y.dim()
            shape[axis] = dx.shape[0]
            dx = dx.reshape(shape)
        else:
            dx = paddle.diff(x, axis=axis)
    return 0.5 * sum_mode(
        (
            paddle.gather(y, paddle.arange(1, length), axis=axis)
            + paddle.gather(y, paddle.arange(0, length - 1), axis=axis)
        )
        * dx,
        axis=axis,
    )


def trapezoid(y, x=None, dx=None, axis=-1, name=None):
    """
    Integrate along the given axis using the composite trapezoidal rule. Use the sum method.

    Args:
        y (Tensor): Input tensor to integrate. It's data type should be float16, float32, float64.
        x (Tensor, optional): The sample points corresponding to the :attr:`y` values, the same type as :attr:`y`.
            It is known that the size of :attr:`y` is `[d_1, d_2, ... , d_n]` and :math:`axis=k`, then the size of :attr:`x` can only be `[d_k]` or `[d_1, d_2, ... , d_n ]`.
            If :attr:`x` is None, the sample points are assumed to be evenly spaced :attr:`dx` apart. The default is None.
        dx (float, optional): The spacing between sample points when :attr:`x` is None. If neither :attr:`x` nor :attr:`dx` is provided then the default is :math:`dx = 1`.
        axis (int, optional): The axis along which to integrate. The default is -1.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, Definite integral of :attr:`y` is N-D tensor as approximated along a single axis by the trapezoidal rule.
        If :attr:`y` is a 1D tensor, then the result is a float. If N is greater than 1, then the result is an (N-1)-D tensor.

    Examples:
        .. code-block:: python

6415
            >>> import paddle
6416

6417
            >>> y = paddle.to_tensor([4, 5, 6], dtype='float32')
6418

6419 6420 6421
            >>> paddle.trapezoid(y)
            Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
            10.)
6422

6423 6424 6425
            >>> paddle.trapezoid(y, dx=2.)
            Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
            20.)
6426

6427 6428
            >>> y = paddle.to_tensor([4, 5, 6], dtype='float32')
            >>> x = paddle.to_tensor([1, 2, 3], dtype='float32')
6429

6430 6431 6432
            >>> paddle.trapezoid(y, x)
            Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
            10.)
6433

6434 6435
            >>> y = paddle.to_tensor([1, 2, 3], dtype='float64')
            >>> x = paddle.to_tensor([8, 6, 4], dtype='float64')
6436

6437 6438 6439 6440
            >>> paddle.trapezoid(y, x)
            Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=True,
            -8.)
            >>> y = paddle.arange(6).reshape((2, 3)).astype('float32')
6441

6442 6443 6444 6445 6446 6447
            >>> paddle.trapezoid(y, axis=0)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.50000000, 2.50000000, 3.50000000])
            >>> paddle.trapezoid(y, axis=1)
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [2., 8.])
6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471
    """
    return _trapezoid(y, x, dx, axis, mode='sum')


def cumulative_trapezoid(y, x=None, dx=None, axis=-1, name=None):
    """
    Integrate along the given axis using the composite trapezoidal rule. Use the cumsum method

    Args:
        y (Tensor): Input tensor to integrate. It's data type should be float16, float32, float64.
        x (Tensor, optional): The sample points corresponding to the :attr:`y` values, the same type as :attr:`y`.
            It is known that the size of :attr:`y` is `[d_1, d_2, ... , d_n]` and :math:`axis=k`, then the size of :attr:`x` can only be `[d_k]` or `[d_1, d_2, ... , d_n ]`.
            If :attr:`x` is None, the sample points are assumed to be evenly spaced :attr:`dx` apart. The default is None.
        dx (float, optional): The spacing between sample points when :attr:`x` is None. If neither :attr:`x` nor :attr:`dx` is provided then the default is :math:`dx = 1`.
        axis (int, optional): The axis along which to integrate. The default is -1.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, Definite integral of :attr:`y` is N-D tensor as approximated along a single axis by the trapezoidal rule.
        The result is an N-D tensor.

    Examples:
        .. code-block:: python

6472
            >>> import paddle
6473

6474
            >>> y = paddle.to_tensor([4, 5, 6], dtype='float32')
6475

6476 6477 6478
            >>> paddle.cumulative_trapezoid(y)
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [4.50000000, 10.       ])
6479

6480 6481 6482
            >>> paddle.cumulative_trapezoid(y, dx=2.)
            >>> # Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            >>> #        [9. , 20.])
6483

6484 6485
            >>> y = paddle.to_tensor([4, 5, 6], dtype='float32')
            >>> x = paddle.to_tensor([1, 2, 3], dtype='float32')
6486

6487 6488 6489
            >>> paddle.cumulative_trapezoid(y, x)
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [4.50000000, 10.       ])
6490

6491 6492
            >>> y = paddle.to_tensor([1, 2, 3], dtype='float64')
            >>> x = paddle.to_tensor([8, 6, 4], dtype='float64')
6493

6494 6495 6496
            >>> paddle.cumulative_trapezoid(y, x)
            Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=True,
            [-3., -8.])
6497

6498
            >>> y = paddle.arange(6).reshape((2, 3)).astype('float32')
6499

6500 6501 6502 6503 6504 6505 6506
            >>> paddle.cumulative_trapezoid(y, axis=0)
            Tensor(shape=[1, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[1.50000000, 2.50000000, 3.50000000]])
            >>> paddle.cumulative_trapezoid(y, axis=1)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[0.50000000, 2.        ],
             [3.50000000, 8.        ]])
6507 6508
    """
    return _trapezoid(y, x, dx, axis, mode='cumsum')
6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532


def vander(x, n=None, increasing=False, name=None):
    """
    Generate a Vandermonde matrix.

    The columns of the output matrix are powers of the input vector. Order of the powers is
    determined by the increasing Boolean parameter. Specifically, when the increment is
    "false", the ith output column is a step-up in the order of the elements of the input
    vector to the N - i - 1 power. Such a matrix with a geometric progression in each row
    is named after Alexandre-Theophile Vandermonde.

    Args:
        x (Tensor): The input tensor, it must be 1-D Tensor, and it's data type should be ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'].
        n (int): Number of columns in the output. If n is not specified, a square array is returned (n = len(x)).
        increasing(bool): Order of the powers of the columns. If True, the powers increase from left to right, if False (the default) they are reversed.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
    Returns:
        Tensor, A vandermonde matrix with shape (len(x), N). If increasing is False, the first column is :math:`x^{(N-1)}`, the second :math:`x^{(N-2)}` and so forth.
        If increasing is True, the columns are :math:`x^0`, :math:`x^1`, ..., :math:`x^{(N-1)}`.

    Examples:
        .. code-block:: python

6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560
            >>> import paddle
            >>> x = paddle.to_tensor([1., 2., 3.], dtype="float32")
            >>> out = paddle.vander(x)
            >>> out
            Tensor(shape=[3, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[1., 1., 1.],
             [4., 2., 1.],
             [9., 3., 1.]])
            >>> out1 = paddle.vander(x,2)
            >>> out1
            Tensor(shape=[3, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[1., 1.],
             [2., 1.],
             [3., 1.]])
            >>> out2 = paddle.vander(x, increasing = True)
            >>> out2
            Tensor(shape=[3, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [[1., 1., 1.],
             [1., 2., 4.],
             [1., 3., 9.]])
            >>> real = paddle.to_tensor([2., 4.])
            >>> imag = paddle.to_tensor([1., 3.])
            >>> complex = paddle.complex(real, imag)
            >>> out3 = paddle.vander(complex)
            >>> out3
            Tensor(shape=[2, 2], dtype=complex64, place=Place(cpu), stop_gradient=True,
            [[(2+1j), (1+0j)],
             [(4+3j), (1+0j)]])
6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581
    """
    check_variable_and_dtype(
        x,
        'x',
        ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'],
        'vander',
    )
    if x.dim() != 1:
        raise ValueError(
            "The input of x is expected to be a 1-D Tensor."
            "But now the dims of Input(X) is %d." % x.dim()
        )

    if n is None:
        n = x.shape[0]

    if n < 0:
        raise ValueError("N must be non-negative.")

    res = paddle.empty([x.shape[0], n], dtype=x.dtype)

6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601
    if paddle.in_dynamic_mode():
        if n > 0:
            res[:, 0] = paddle.to_tensor([1], dtype=x.dtype)
        if n > 1:
            res[:, 1:] = x[:, None]
            res[:, 1:] = paddle.cumprod(res[:, 1:], dim=-1)
    else:
        if n > 0:
            res = paddle.static.setitem(
                res, (slice(None), 0), paddle.to_tensor([1], dtype=x.dtype)
            )
        if n > 1:
            res = paddle.static.setitem(
                res, (slice(None), slice(1, None)), x[:, None]
            )
            res = paddle.static.setitem(
                res,
                (slice(None), slice(1, None)),
                paddle.cumprod(res[:, 1:], dim=-1),
            )
6602 6603
    res = res[:, ::-1] if not increasing else res
    return res
6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621


def nextafter(x, y, name=None):
    r"""
    Return the next floating-point value after input towards other, elementwise.
    The shapes of input and other must be broadcastable.

    Args:
        x (Tensor): An N-D Tensor, the data type is float32, float64.
        y (Tensor): An N-D Tensor, the data type is float32, float64.
        name(str, optional):Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the shape and data type is the same with input.

    Examples:
        .. code-block:: python

6622 6623 6624 6625 6626
            >>> import paddle
            >>> out = paddle.nextafter(paddle.to_tensor([1.0,2.0]),paddle.to_tensor([2.0,1.0]))
            >>> out
            Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
            [1.00000012, 1.99999988])
6627
    """
6628
    if in_dynamic_mode():
6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639
        return _C_ops.nextafter(x, y)
    else:
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'nextafter')
        check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'nextafter')
        op_type = "nextafter"
        helper = LayerHelper(op_type, **locals())
        inputs = {"x": x, "y": y}
        out = helper.create_variable_for_type_inference(dtype=paddle.float32)
        outputs = {"out": out}
        helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
    return out
6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660


def i0(x, name=None):
    r"""
    The function used to calculate modified bessel function of order 0.

    Equation:
        ..  math::

            I_0(x) = \sum^{\infty}_{k=0}\frac{(x^2/4)^k}{(k!)^2}

    Args:
        x (Tensor): The input tensor, it's data type should be float32, float64.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

    Returns:
        - out (Tensor), A Tensor. the value of the modified bessel function of order 0 at x.

    Examples:
        .. code-block:: python

6661
            >>> import paddle
6662

6663 6664 6665 6666
            >>> x = paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")
            >>> paddle.i0(x)
            Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.99999994 , 1.26606596 , 2.27958512 , 4.88079262 , 11.30192089])
6667
    """
6668
    if in_dynamic_mode():
6669 6670 6671 6672 6673 6674 6675 6676 6677 6678
        return _C_ops.i0(x)
    else:
        check_variable_and_dtype(x, "x", ["float32", "float64"], "i0")

        helper = LayerHelper("i0", **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='i0', inputs={'x': x}, outputs={'out': out})
    return out


6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689
@inplace_apis_in_dygraph_only
def i0_(x, name=None):
    r"""
    Inplace version of ``i0`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_i0`.
    """

    if in_dynamic_mode():
        return _C_ops.i0_(x)


6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709
def i0e(x, name=None):
    r"""
    The function used to calculate exponentially scaled modified Bessel function of order 0.

    Equation:
        ..  math::

            I_0(x) = \sum^{\infty}_{k=0}\frac{(x^2/4)^k}{(k!)^2} \\
            I_{0e}(x) = e^{-|x|}I_0(x)

    Args:
        x (Tensor): The input tensor, it's data type should be float32, float64.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

    Returns:
        - out (Tensor), A Tensor. the value of the exponentially scaled modified Bessel function of order 0 at x.

    Examples:
        .. code-block:: python

6710
            >>> import paddle
6711

6712 6713 6714 6715
            >>> x = paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")
            >>> print(paddle.i0e(x))
            Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.99999994, 0.46575963, 0.30850831, 0.24300036, 0.20700191])
6716
    """
6717
    if in_dynamic_mode():
6718 6719 6720 6721 6722 6723 6724 6725
        return _C_ops.i0e(x)
    else:
        check_variable_and_dtype(x, "x", ["float32", "float64"], "i0e")

        helper = LayerHelper("i0e", **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='i0e', inputs={'x': x}, outputs={'out': out})
    return out
6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741


def i1(x, name=None):
    """
    The function is used to calculate modified bessel function of order 1.

    Args:
        x (Tensor): The input tensor, it's data type should be float32, float64.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

    Returns:
        - out (Tensor), A Tensor. the value of the modified bessel function of order 1 at x.

    Examples:
        .. code-block:: python

6742
            >>> import paddle
6743

6744 6745 6746 6747
            >>> x = paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")
            >>> print(paddle.i1(x))
            Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.        , 0.56515908, 1.59063685, 3.95337057, 9.75946712])
6748
    """
6749
    if in_dynamic_mode():
6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776
        return _C_ops.i1(x)
    else:
        check_variable_and_dtype(x, "x", ["float32", "float64"], "i1")

        helper = LayerHelper("i1", **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='i1', inputs={'x': x}, outputs={'out': out}, attrs={}
        )
    return out


def i1e(x, name=None):
    """
    The function is used to calculate exponentially scaled modified Bessel function of order 1.

    Args:

        x (Tensor): The input tensor, it's data type should be float32, float64.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

    Returns:
        - out (Tensor), A Tensor. the value of the exponentially scaled modified Bessel function of order 1 at x.

    Examples:
        .. code-block:: python

6777
            >>> import paddle
6778

6779 6780 6781 6782
            >>> x = paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")
            >>> print(paddle.i1e(x))
            Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.        , 0.20791042, 0.21526928, 0.19682673, 0.17875087])
6783
    """
6784
    if in_dynamic_mode():
6785 6786 6787 6788 6789 6790 6791 6792 6793 6794
        return _C_ops.i1e(x)
    else:
        check_variable_and_dtype(x, "x", ["float32", "float64"], "i1e")

        helper = LayerHelper("i1e", **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='i1e', inputs={'x': x}, outputs={'out': out}, attrs={}
        )
    return out
6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816


def polygamma(x, n, name=None):
    r"""
    Calculates the polygamma of the given input tensor, element-wise.

    The equation is:

    .. math::
        \Phi^n(x) = \frac{d^n}{dx^n} [\ln(\Gamma(x))]

    Args:
        x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
        n (int): Order of the derivative. Must be integral.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        - out (Tensor), A Tensor. the polygamma of the input Tensor, the shape and data type is the same with input.

    Examples:
        .. code-block:: python

6817
            >>> import paddle
6818

6819 6820 6821 6822 6823
            >>> data = paddle.to_tensor([2, 3, 25.5], dtype='float32')
            >>> res = paddle.polygamma(data, 1)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [0.64493412,  0.39493406,  0.03999467])
6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852
    """
    if not isinstance(n, int):
        raise TypeError(
            "The input of n must be int type, but received: %s " % (type(n))
        )
    if n < 0:
        raise ValueError(
            "The input of n must be greater than or equal to 0. But received n = %s"
            % (n)
        )
    if n == 0:
        return digamma(x)
    else:
        if in_dynamic_mode():
            return _C_ops.polygamma(x, n)
        else:
            check_variable_and_dtype(
                x, "x", ["float32", "float64"], "polygamma"
            )

            helper = LayerHelper("polygamma", **locals())
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
            helper.append_op(
                type='polygamma',
                inputs={'x': x},
                outputs={'out': out},
                attrs={'n': n},
            )
        return out
6853 6854


6855
@inplace_apis_in_dygraph_only
6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876
def polygamma_(x, n, name=None):
    r"""
    Inplace version of ``polygamma`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_polygamma`.
    """
    if not isinstance(n, int):
        raise TypeError(
            "The input of n must be int type, but received: %s " % (type(n))
        )
    if n < 0:
        raise ValueError(
            "The input of n must be greater than or equal to 0. But received n = %s"
            % (n)
        )
    if n == 0:
        return digamma_(x)
    else:
        if in_dynamic_mode():
            return _C_ops.polygamma_(x, n)


6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893
def ldexp(x, y, name=None):
    """
    Compute the result of multiplying x by 2 to the power of y. The equation is:

    .. math::
        out = x * 2^{y}

    Args:
        x (Tensor): The input Tensor, the data type is float32, float64, int32 or int64.
        y (Tensor):  A Tensor of exponents, typically integers.
        name (str, optional): Name for the operation (optional, default is None).For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y. And the data type is float32 or float64.

    Examples:

6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912
        .. code-block:: python

            >>> import paddle

            >>> # example1
            >>> x = paddle.to_tensor([1, 2, 3], dtype='float32')
            >>> y = paddle.to_tensor([2, 3, 4], dtype='int32')
            >>> res = paddle.ldexp(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [4. , 16., 48.])

            >>> # example2
            >>> x = paddle.to_tensor([1, 2, 3], dtype='float32')
            >>> y = paddle.to_tensor([2], dtype='int32')
            >>> res = paddle.ldexp(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            [4. , 8. , 12.])
6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926

    """
    if not isinstance(x, (paddle.Tensor, Variable)):
        raise TypeError(f"x must be tensor type, but got {type(x)}")
    if not isinstance(y, (paddle.Tensor, Variable)):
        raise TypeError(f"y must be tensor type, but got {type(y)}")
    if x.dtype == paddle.float64 or y.dtype == paddle.float64:
        out_dtype = paddle.float64
    else:
        out_dtype = paddle.get_default_dtype()
    x = paddle.cast(x, dtype=out_dtype)
    y = paddle.cast(y, dtype=out_dtype)
    two = paddle.to_tensor(2, dtype=out_dtype)
    return paddle.multiply(x, paddle.pow(two, y))
6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945


def ldexp_(x, y, name=None):
    r"""
    Inplace version of ``polygamma`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_polygamma`.
    """
    if not isinstance(x, (paddle.Tensor, Variable)):
        raise TypeError(f"x must be tensor type, but got {type(x)}")
    if not isinstance(y, (paddle.Tensor, Variable)):
        raise TypeError(f"y must be tensor type, but got {type(y)}")
    if x.dtype == paddle.float64 or y.dtype == paddle.float64:
        out_dtype = paddle.float64
    else:
        out_dtype = paddle.get_default_dtype()
    x = paddle.cast_(x, dtype=out_dtype)
    y = paddle.cast(y, dtype=out_dtype)
    two = paddle.to_tensor(2, dtype=out_dtype)
    return paddle.multiply_(x, paddle.pow(two, y))