math.py 182.4 KB
Newer Older
W
WuHaobo 已提交
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15 16
"""
math functions
"""
17
# TODO: define math functions
18

19
import numpy as np
20

21
import paddle
22 23 24
from paddle import _C_ops, _legacy_C_ops
from paddle.common_ops_import import VarDesc, dygraph_only, dygraph_utils

25 26 27
# TODO: define math functions
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only

28 29
from ..fluid.data_feeder import (
    check_dtype,
30 31
    check_type,
    check_variable_and_dtype,
32 33
    convert_dtype,
)
34
from ..fluid.layers import utils
35 36 37 38 39 40 41 42 43 44
from ..framework import (
    LayerHelper,
    convert_np_dtype_to_dtype_,
    core,
    in_dygraph_mode,
)
from ..static import Variable
from .creation import _complex_to_real_dtype
from .layer_function_generator import generate_layer_fn, templatedoc
from .manipulation import cast
45 46
from .ops import abs  # noqa: F401
from .ops import acos  # noqa: F401
47
from .ops import acosh  # noqa: F401
48
from .ops import asin  # noqa: F401
49 50 51
from .ops import asinh  # noqa: F401
from .ops import atan  # noqa: F401
from .ops import atanh  # noqa: F401
52 53 54 55
from .ops import ceil  # noqa: F401
from .ops import ceil_  # noqa: F401
from .ops import cos  # noqa: F401
from .ops import cosh  # noqa: F401
56
from .ops import erf  # noqa: F401
57 58 59 60 61 62 63 64 65 66 67
from .ops import exp  # noqa: F401
from .ops import exp_  # noqa: F401
from .ops import expm1  # noqa: F401
from .ops import floor  # noqa: F401
from .ops import floor_  # noqa: F401
from .ops import reciprocal  # noqa: F401
from .ops import reciprocal_  # noqa: F401
from .ops import round  # noqa: F401
from .ops import round_  # noqa: F401
from .ops import rsqrt  # noqa: F401
from .ops import rsqrt_  # noqa: F401
68 69
from .ops import sin  # noqa: F401
from .ops import sinh  # noqa: F401
70 71
from .ops import sqrt  # noqa: F401
from .ops import sqrt_  # noqa: F401
72 73
from .ops import square  # noqa: F401
from .ops import tan  # noqa: F401
74

75 76
__all__ = []

77 78 79 80 81 82 83 84 85 86 87 88 89
_supported_int_dtype_ = [
    VarDesc.VarType.UINT8,
    VarDesc.VarType.INT8,
    VarDesc.VarType.INT16,
    VarDesc.VarType.INT32,
    VarDesc.VarType.INT64,
]

_supported_float_dtype_ = [
    VarDesc.VarType.FP32,
    VarDesc.VarType.FP64,
]

90

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
def _get_reduce_axis(axis, x):
    """
    Internal function for max, min, amax and amin.
    It computes the attribute reduce_all value based on axis.
    """
    if axis is not None and not isinstance(axis, list):
        if isinstance(axis, (tuple, range)):
            axis = list(axis)
        elif isinstance(axis, int):
            axis = [axis]
        else:
            raise TypeError(
                "The type of axis must be int, list or tuple, but received {}".format(
                    type(axis)
                )
            )
    if axis is None:
        axis = []
    if axis == [] or len(axis) == len(x.shape):
        reduce_all = True
    else:
        reduce_all = False
    return reduce_all, axis


def _get_reduce_axis_with_tensor(axis, x):
    if isinstance(axis, Variable):
        if axis.shape[0] == len(x.shape):
            reduce_all = True
        else:
            reduce_all = False
    else:
        reduce_all, axis = _get_reduce_axis(axis, x)
        if utils._contain_var(axis):
            axis = utils._convert_to_tensor_list(axis)
    return reduce_all, axis


129 130
def log(x, name=None):
    r"""
C
Chen Long 已提交
131
    Calculates the natural log of the given input Tensor, element-wise.
132 133 134

    .. math::

135
        Out = \ln(x)
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157

    Args:
        x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
        name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`


    Returns:
        Tensor: The natural log of the input Tensor computed element-wise.

    Examples:

        .. code-block:: python

            import paddle

            x = [[2,3,4], [7,8,9]]
            x = paddle.to_tensor(x, dtype='float32')
            res = paddle.log(x)
            # [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]]
    """
    if in_dygraph_mode():
        return _C_ops.log(x)
158 159 160 161 162 163 164 165
    else:
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
        inputs = {'X': [x]}
        helper = LayerHelper('log', **locals())
        dtype = helper.input_dtype(input_param_name='x')
        out = helper.create_variable_for_type_inference(dtype)
        helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out})
        return out
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184


def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
    """
    Scale operator.

    Putting scale and bias to the input Tensor as following:

    ``bias_after_scale`` is True:

    .. math::
                            Out=scale*X+bias

    ``bias_after_scale`` is False:

    .. math::
                            Out=scale*(X+bias)

    Args:
185 186 187 188 189 190
        x (Tensor): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8.
        scale (float|Tensor): The scale factor of the input, it should be a float number or a Tensor with shape [1] and data type as float32.
        bias (float): The bias to be put on the input.
        bias_after_scale (bool): Apply bias addition after or before scaling. It is useful for numeric stability in some circumstances.
        act (str, optional): Activation applied to the output such as tanh, softmax, sigmoid, relu.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
191 192

    Returns:
C
Chen Long 已提交
193
        Tensor: Output Tensor of scale operator, with shape and data type same as input.
194 195 196

    Examples:
        .. code-block:: python
197

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
            # scale as a float32 number
            import paddle

            data = paddle.randn(shape=[2,3], dtype='float32')
            res = paddle.scale(data, scale=2.0, bias=1.0)

        .. code-block:: python

            # scale with parameter scale as a Tensor
            import paddle

            data = paddle.randn(shape=[2, 3], dtype='float32')
            factor = paddle.to_tensor([2], dtype='float32')
            res = paddle.scale(data, scale=factor, bias=1.0)

    """

    if in_dygraph_mode():
W
Weilong Wu 已提交
216 217
        if act is None:
            return _C_ops.scale(x, scale, float(bias), bias_after_scale)
W
wanghuancoder 已提交
218 219
        out = _C_ops.scale(x, scale, float(bias), bias_after_scale)
        return dygraph_utils._append_activation_in_dygraph(out, act)
220 221
    else:
        check_variable_and_dtype(
222
            x,
223 224 225 226 227 228 229 230 231 232 233 234 235
            "x",
            [
                'float16',
                'uint16',
                'float32',
                'float64',
                'int8',
                'int16',
                'int32',
                'int64',
                'uint8',
            ],
            "scale",
236
        )
237 238 239 240 241 242 243 244 245 246 247
        inputs = {'X': [x]}
        attrs = {
            'bias': float(bias),
            'bias_after_scale': bias_after_scale,
        }
        if isinstance(scale, Variable):
            inputs['ScaleTensor'] = [scale]
        else:
            attrs['scale'] = float(scale)
        helper = LayerHelper('scale', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
248

249 250 251 252
        helper.append_op(
            type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs
        )
        return helper.append_activation(out)
253 254 255


def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
256 257
    r"""

258 259 260 261
    stanh activation.

    .. math::

262
        out = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}
263 264 265 266 267

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        scale_a (float, optional): The scale factor a of the input. Default is 0.67.
        scale_b (float, optional): The scale factor b of the output. Default is 1.7159.
268
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
269 270 271 272 273 274 275 276 277 278 279 280 281 282

    Returns:
        A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
            out = paddle.stanh(x, scale_a=0.67, scale_b=1.72) # [1.00616539, 1.49927628, 1.65933108, 1.70390463]

    """

283
    if in_dygraph_mode():
284
        return _legacy_C_ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b)
285 286 287 288
    else:
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], 'stanh'
        )
289

290 291 292 293 294 295 296 297 298
        helper = LayerHelper('stanh', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='stanh',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'scale_a': scale_a, 'scale_b': scale_b},
        )
        return out
299

300

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
def multiplex(inputs, index, name=None):
    """

    Based on the given index parameter, the OP selects a specific row from each input Tensor to construct the output Tensor.

    If the input of this OP contains :math:`m` Tensors, where :math:`I_{i}` means the i-th input Tensor, :math:`i` between :math:`[0,m)` .

    And :math:`O` means the output, where :math:`O[i]` means the i-th row of the output, then the output satisfies that :math:`O[i] = I_{index[i]}[i]` .

    For Example:

            .. code-block:: text

                Given:

                inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]],
                          [[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]],
                          [[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]],
                          [[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]]

                index = [[3],[0],[1],[2]]

                out = [[3,0,3,4],    # out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4]
                       [0,1,3,4],    # out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4]
                       [1,2,4,2],    # out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2]
                       [2,3,3,4]]    # out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4]


    Args:
        inputs (list): The input Tensor list. The list elements are N-D Tensors of data types float32, float64, int32, int64. All input Tensor shapes should be the same and rank must be at least 2.
        index (Tensor): Used to select some rows in the input Tensor to construct an index of the output Tensor. It is a 2-D Tensor with data type int32 or int64 and shape [M, 1], where M is the number of input Tensors.
332
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
333

334 335 336 337 338 339 340 341
    Returns:
        Tensor: Output of multiplex OP, with data type being float32, float64, int32, int64.

    Examples:

        .. code-block:: python

            import paddle
342

343 344 345 346
            img1 = paddle.to_tensor([[1, 2], [3, 4]], dtype=paddle.float32)
            img2 = paddle.to_tensor([[5, 6], [7, 8]], dtype=paddle.float32)
            inputs = [img1, img2]
            index = paddle.to_tensor([[1], [0]], dtype=paddle.int32)
347
            res = paddle.multiplex(inputs, index)
348
            print(res) # Tensor([[5., 6.], [3., 4.]], dtype=float32)
349 350

    """
351 352
    if in_dygraph_mode():
        return _C_ops.multiplex(inputs, index)
353 354
    else:
        helper = LayerHelper('multiplex', **locals())
355

356 357 358 359 360 361 362 363 364 365 366 367
        check_type(inputs, 'inputs', (list), 'multiplex')
        if len(inputs) < 2:
            raise ValueError(
                "inputs should be a list object with at least 2 elements."
            )
        for id, x in enumerate(inputs):
            check_variable_and_dtype(
                x,
                'input[' + str(id) + ']',
                ['float32', 'float64', 'int32', 'int64'],
                'multiplex',
            )
368
        check_variable_and_dtype(
369
            index, "index", ['int32', 'int64'], 'multiplex'
370
        )
371

372 373 374 375 376 377 378
        out = helper.create_variable_for_type_inference(inputs[0].dtype)
        helper.append_op(
            type='multiplex',
            inputs={'X': inputs, 'Ids': index},
            outputs={'Out': [out]},
        )
        return out
379

380

381 382 383 384 385 386
@inplace_apis_in_dygraph_only
def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
    """
    Inplace version of ``scale`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_tensor_scale`.
    """
387
    if in_dygraph_mode():
388
        return _C_ops.scale_(x, scale, float(bias), bias_after_scale)
389 390


391
def pow(x, y, name=None):
392
    """
C
Chen Long 已提交
393
    Compute the power of Tensor elements. The equation is:
S
swtkiwi 已提交
394

395
    .. math::
396
        out = x^{y}
397

398
    Note:
I
Infinity_lee 已提交
399 400 401
        ``paddle.pow`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensors
402 403


404
    Args:
405
        x (Tensor): An N-D Tensor, the data type is float16, float32, float64, int32 or int64.
406
        y (float|int|Tensor): If it is an N-D Tensor, its data type should be the same as `x`.
407
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
408

409
    Returns:
410
        N-D Tensor. A location into which the result is stored. Its dimension and data type are the same as `x`.
411 412 413

    Examples:

414
        ..  code-block:: python
415 416 417

            import paddle

418 419 420 421 422 423 424 425 426 427 428 429
            x = paddle.to_tensor([1, 2, 3], dtype='float32')

            # example 1: y is a float or int
            res = paddle.pow(x, 2)
            print(res)
            # Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #        [1., 4., 9.])
            res = paddle.pow(x, 2.5)
            print(res)
            # Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #        [1.         , 5.65685415 , 15.58845711])

430
            # example 2: y is a Tensor
431
            y = paddle.to_tensor([2], dtype='float32')
432
            res = paddle.pow(x, y)
433 434 435
            print(res)
            # Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #        [1., 4., 9.])
436 437

    """
438
    # in dynamic graph mode
439
    if in_dygraph_mode():
440
        if isinstance(y, (int, float)):
441
            return _C_ops.pow(x, y)
442
        elif isinstance(y, (paddle.Tensor, Variable)):
443
            return _C_ops.elementwise_pow(x, y)
444
        else:
445
            raise TypeError(
446 447
                'y must be scalar or tensor type, but received: %s ' % (y.dtype)
            )
448 449
    else:
        # in static graph mode
450
        if isinstance(y, (int, float)):
451 452 453 454 455 456
            helper = LayerHelper('pow', **locals())
            inputs = {'X': x}
            attrs = {'factor': y}
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
            helper.append_op(
                type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs
457
            )
458 459 460 461 462 463
            return out
        elif isinstance(y, (paddle.Tensor, Variable)):
            # TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
            helper = LayerHelper('elementwise_pow', **locals())
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
            return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
464
        else:
465
            raise TypeError(
466
                'y must be scalar or tensor type, but received: %s ' % (type(y))
467
            )
468 469


470
OP_NAMEMAPPING = {
471 472 473 474 475 476 477 478
    'elementwise_max': 'maximum',
    'elementwise_min': 'minimum',
    'elementwise_pow': 'elementwise_pow',
    'elementwise_floordiv': 'floor_divide',
    'elementwise_add': 'add',
    'elementwise_sub': 'subtract',
    'elementwise_mul': 'multiply',
    'elementwise_div': 'divide',
C
Chen Weihang 已提交
479
    'elementwise_mod': 'remainder',
480
}
481

482

483
@dygraph_only
484 485 486
def _elementwise_op_in_dygraph(
    x, y, axis=-1, act=None, use_mkldnn=False, op_name=None
):
487
    def is_inplace(op_name):
488
        return op_name[-1] == "_"
489

490
    if op_name not in OP_NAMEMAPPING.keys() or axis != -1:
491
        op = getattr(_legacy_C_ops, op_name)
492
        out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
W
wanghuancoder 已提交
493 494
    else:
        if in_dygraph_mode():
495 496
            op = getattr(
                _C_ops,
497 498
                OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name,
            )
W
wanghuancoder 已提交
499
            out = op(x, y)
W
Weilong Wu 已提交
500 501 502 503 504 505
    if act is None:
        return out
    else:
        return dygraph_utils._append_activation_in_dygraph(
            out, act, use_mkldnn=use_mkldnn
        )
506

507 508 509 510 511 512 513

def _elementwise_op(helper):
    op_type = helper.layer_type
    original_op_type = helper.kwargs.get('original_op_type', op_type)
    x = helper.kwargs.get('x', None)
    y = helper.kwargs.get('y', None)

514 515
    out = helper.kwargs.get('out', None)

516 517 518
    assert x is not None, 'x cannot be None in {}'.format(original_op_type)
    assert y is not None, 'y cannot be None in {}'.format(original_op_type)
    check_variable_and_dtype(
519 520 521 522 523
        x,
        'x',
        ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
        original_op_type,
    )
524
    check_variable_and_dtype(
525 526 527 528 529
        y,
        'y',
        ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
        original_op_type,
    )
530 531 532 533

    axis = helper.kwargs.get('axis', -1)
    use_mkldnn = helper.kwargs.get('use_mkldnn', False)
    name = helper.kwargs.get('name', None)
534 535 536 537 538

    if out is None:
        if name is None:
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
        else:
539 540 541 542 543 544 545 546 547 548
            out = helper.create_variable(
                name=name, dtype=x.dtype, persistable=False
            )

    helper.append_op(
        type=op_type,
        inputs={'X': x, 'Y': y},
        outputs={'Out': out},
        attrs={'axis': axis, 'use_mkldnn': use_mkldnn},
    )
549 550 551
    return helper.append_activation(out)


Y
Yang Zhang 已提交
552
def add(x, y, name=None):
553
    """
554 555 556 557 558 559 560 561
    Elementwise Add Operator.
    Add two tensors element-wise
    The equation is:

    ..  math::

        Out=X+Y

562 563
    $X$ the tensor of any dimension.
    $Y$ the tensor whose dimensions must be less than or equal to the dimensions of $X$.
564 565

    There are two cases for this operator:
566 567 568 569

    1. The shape of $Y$ is the same with $X$.
    2. The shape of $Y$ is a continuous subsequence of $X$.

570
    For case 2:
571 572

    1. Broadcast $Y$ to match the shape of $X$, where axis is the start dimension index for broadcasting $Y$ onto $X$.
H
HongyuJia 已提交
573
    2. If $axis$ is -1 (default), $axis$=rank($X$)-rank($Y$).
574
    3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of subsequence, such as shape($Y$) = (2, 1) => (2).
575 576 577 578

        For example:

        ..  code-block:: python
579

580 581 582 583 584 585
            shape(X) = (2, 3, 4, 5), shape(Y) = (,)
            shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
            shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
            shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
            shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
            shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
586

587
    Args:
588 589 590
        x (Tensor): Tensor or LoDTensor of any dimensions. Its dtype should be int32, int64, float32, float64.
        y (Tensor): Tensor or LoDTensor of any dimensions. Its dtype should be int32, int64, float32, float64.
        name (string, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
591 592

    Returns:
H
HongyuJia 已提交
593
        N-D Tensor. A location into which the result is stored. It's dimension equals with x.
594 595 596 597

    Examples:

        ..  code-block:: python
598

599
            import paddle
600

601 602 603 604
            x = paddle.to_tensor([2, 3, 4], 'float64')
            y = paddle.to_tensor([1, 5, 2], 'float64')
            z = paddle.add(x, y)
            print(z)  # [3., 8., 6. ]
605
    """
606

J
Jiabin Yang 已提交
607
    if in_dygraph_mode():
608
        return _C_ops.add(x, y)
J
Jiabin Yang 已提交
609
    else:
610
        return _elementwise_op(LayerHelper('elementwise_add', **locals()))
611 612


613 614 615 616 617 618 619 620 621 622 623
@inplace_apis_in_dygraph_only
def add_(x, y, name=None):
    """
    Inplace version of ``add`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_tensor_add`.
    """
    op_type = 'elementwise_add_'
    axis = -1

    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
624
        raise ValueError(
625 626 627 628
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
629

630
    if in_dygraph_mode():
631
        return _C_ops.add_(x, y)
632
    else:
633
        out = _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type)
634
        return out
635 636


637 638
def subtract(x, y, name=None):
    """
W
Wei Shengyu 已提交
639
    Substract two tensors element-wise. The equation is:
640 641 642 643

    .. math::
        out = x - y

644
    Note:
I
Infinity_lee 已提交
645 646 647
        ``paddle.subtract`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
648 649 650 651 652 653 654 655 656 657 658 659

    Args:
        x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.

    Examples:

        .. code-block:: python
W
Wei Shengyu 已提交
660

661 662 663 664 665 666
            import paddle

            x = paddle.to_tensor([[1, 2], [7, 8]])
            y = paddle.to_tensor([[5, 6], [3, 4]])
            res = paddle.subtract(x, y)
            print(res)
667 668 669
            # Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[-4, -4],
            #         [ 4,  4]])
670 671 672 673 674

            x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
            y = paddle.to_tensor([1, 0, 4])
            res = paddle.subtract(x, y)
            print(res)
675 676 677
            # Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[[ 0,  2, -1],
            #          [ 0,  2, -1]]])
678

679 680
            x = paddle.to_tensor([2, float('nan'), 5], dtype='float32')
            y = paddle.to_tensor([1, 4, float('nan')], dtype='float32')
681 682
            res = paddle.subtract(x, y)
            print(res)
683 684
            # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            #        [1. , nan, nan])
685

686
            x = paddle.to_tensor([5, float('inf'), -float('inf')], dtype='float64')
687 688 689
            y = paddle.to_tensor([1, 4, 5], dtype='float64')
            res = paddle.subtract(x, y)
            print(res)
690 691
            # Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
            #        [ 4.  ,  inf., -inf.])
692 693 694 695
    """
    op_type = 'elementwise_sub'
    axis = -1
    act = None
J
Jiabin Yang 已提交
696
    if in_dygraph_mode():
697
        return _C_ops.subtract(x, y)
J
Jiabin Yang 已提交
698
    else:
699
        return _elementwise_op(LayerHelper(op_type, **locals()))
700 701


702 703 704 705 706 707 708 709 710 711 712
@inplace_apis_in_dygraph_only
def subtract_(x, y, name=None):
    """
    Inplace version of ``subtract`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_tensor_subtract`.
    """
    axis = -1
    act = None

    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
713
        raise ValueError(
714 715 716 717
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
718

719
    if in_dygraph_mode():
720
        return _C_ops.subtract_(x, y)
721
    else:
722 723 724
        out = _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name='elementwise_sub_'
        )
725
        return out
726 727


728
def divide(x, y, name=None):
729
    """
730
    Divide two tensors element-wise. The equation is:
731

732 733
    .. math::
        out = x / y
734

735
    Note:
I
Infinity_lee 已提交
736 737 738
        ``paddle.divide`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
739

740 741 742 743
    Args:
        x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
744

745
    Returns:
746
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.
747

748
    Examples:
749

750
        ..  code-block:: python
751

752
            import paddle
753

754 755
            x = paddle.to_tensor([2, 3, 4], dtype='float64')
            y = paddle.to_tensor([1, 5, 2], dtype='float64')
756
            z = paddle.divide(x, y)
757
            print(z)  # [2., 0.6, 2.]
758

759 760 761 762
    """
    op_type = 'elementwise_div'
    axis = -1
    act = None
J
Jiabin Yang 已提交
763
    if in_dygraph_mode():
764
        return _C_ops.divide(x, y)
J
Jiabin Yang 已提交
765
    else:
766
        return _elementwise_op(LayerHelper(op_type, **locals()))
767 768


769 770
def floor_divide(x, y, name=None):
    """
L
Lin Manhui 已提交
771
    Floor divide two tensors element-wise and rounds the quotinents to the nearest integer toward zero. The equation is:
772

773
    .. math::
L
Lin Manhui 已提交
774
        out = trunc(x / y)
775

776
    Note:
I
Infinity_lee 已提交
777 778 779 780
        ``paddle.floor_divide`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor

L
Lin Manhui 已提交
781
        Also note that the name ``floor_divide`` can be misleading, as the quotinents are actually rounded toward zero, not toward negative infinite.
782

783 784 785 786
    Args:
        x (Tensor): the input tensor, it's data type should be int32, int64.
        y (Tensor): the input tensor, it's data type should be int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
787

788 789
    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with $x$.
790

791
    Examples:
792

793
        ..  code-block:: python
794

795
            import paddle
796

797 798
            x = paddle.to_tensor([2, 3, 8, 7])
            y = paddle.to_tensor([1, 5, 3, 3])
799
            z = paddle.floor_divide(x, y)
800
            print(z)  # [2, 0, 2, 2]
801

802 803 804
    """
    op_type = 'elementwise_floordiv'
    axis = -1
805 806
    if in_dygraph_mode():
        return _C_ops.floor_divide(x, y)
807 808
    else:
        return _elementwise_op(LayerHelper(op_type, **locals()))
809 810


811
def remainder(x, y, name=None):
812
    r"""
813 814 815
    Mod two tensors element-wise. The equation is:

    .. math::
816

817 818
        out = x \% y

819
    Note:
I
Infinity_lee 已提交
820 821 822
        ``paddle.remainder`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
823 824

    Args:
825 826
        x (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
827 828 829
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
830
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.
831 832 833 834 835 836 837

    Examples:

        ..  code-block:: python

            import paddle

838 839
            x = paddle.to_tensor([2, 3, 8, 7])
            y = paddle.to_tensor([1, 5, 3, 3])
840
            z = paddle.remainder(x, y)
W
WangXi 已提交
841
            print(z)  # [0, 3, 2, 1]
842 843 844

    """
    op_type = 'elementwise_mod'
845
    axis = -1
846 847 848

    if in_dygraph_mode():
        return _C_ops.remainder(x, y)
849 850
    else:
        return _elementwise_op(LayerHelper(op_type, **locals()))
851 852


853 854 855 856 857 858 859 860 861 862 863 864
@inplace_apis_in_dygraph_only
def remainder_(x, y, name=None):
    r"""
    Inplace version of ``remainder`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_tensor_remainder`.
    """
    op_type = 'elementwise_mod_'
    axis = -1

    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
865 866 867 868
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
869 870 871 872

    return _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type)


873 874
mod = remainder  # noqa: F841
floor_mod = remainder  # noqa: F841
875 876


877
def multiply(x, y, name=None):
878
    """
879
    multiply two tensors element-wise. The equation is:
880

881 882
    .. math::
        out = x * y
883

884
    Note:
I
Infinity_lee 已提交
885 886 887
        ``paddle.multiply`` supports broadcasting. If you would like to know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
888

889
    Args:
W
will-jl944 已提交
890 891
        x (Tensor): the input tensor, its data type should be one of float32, float64, int32, int64, bool.
        y (Tensor): the input tensor, its data type should be one of float32, float64, int32, int64, bool.
892
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
893

894
    Returns:
895
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.
896

897 898 899 900 901 902
    Examples:

        ..  code-block:: python

            import paddle

903 904
            x = paddle.to_tensor([[1, 2], [3, 4]])
            y = paddle.to_tensor([[5, 6], [7, 8]])
905
            res = paddle.multiply(x, y)
906
            print(res) # [[5, 12], [21, 32]]
907

908
            x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
909 910 911
            y = paddle.to_tensor([2])
            res = paddle.multiply(x, y)
            print(res) # [[[2, 4, 6], [2, 4, 6]]]
912 913 914 915

    """
    op_type = 'elementwise_mul'
    act = None
916
    axis = -1
917

J
Jiabin Yang 已提交
918
    if in_dygraph_mode():
919
        return _C_ops.multiply(x, y)
J
Jiabin Yang 已提交
920
    else:
921 922 923 924
        if x.dtype != y.dtype:
            raise TypeError(
                'Input tensors must be same type, but received type of x: %s, type of y: %s '
                % (x.dtype, y.dtype)
925
            )
926

927
        return _elementwise_op(LayerHelper(op_type, **locals()))
928

929

930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
@dygraph_only
def _elementwise_op_with_axis_in_dygraph(
    x, y, axis=-1, name=None, op_type="Undifined"
):
    assert (
        in_dygraph_mode()
    ), "You can only call `_elementwise_op_with_axis_in_dygraph` function within in_dygraph_mode"
    assert op_type in ["add", "subtract", "multiply", "divide"], (
        "op_name input error! _elementwise_op_with_axis is an inner function to replace elementwise_add/sub/mul/div. Input op_name=%s, Expect op_name=[add|subtract|multiply|divide]\n"
        % op_type
    )
    op = getattr(_C_ops, op_type)
    x_shape = list(x.shape)
    y_shape = list(y.shape)
    if axis == -1 or len(x_shape) == len(y_shape):
        return op(x, y)
    if len(x_shape) > len(y_shape):
        padding = len(x_shape) - len(y_shape) - axis
        y = paddle.reshape(y, [1] * axis + y_shape + [1] * padding)
    else:
        padding = len(y_shape) - len(x_shape) - axis
        x = paddle.reshape(x, [1] * axis + y_shape + [1] * padding)
    return op(x, y)


def _add_with_axis(x, y, axis=-1, name=None):
    # opt performance, only dynamic mode needs reshape
    if in_dygraph_mode():
        return _elementwise_op_with_axis_in_dygraph(x, y, axis, name, "add")
    else:
        op_type = 'elementwise_add'
        act = None
962
        return _elementwise_op(LayerHelper(op_type, **locals()))
963 964 965 966 967 968 969 970 971 972 973


def _subtract_with_axis(x, y, axis=-1, name=None):
    # opt performance, only dynamic mode needs reshape
    if in_dygraph_mode():
        return _elementwise_op_with_axis_in_dygraph(
            x, y, axis, name, "subtract"
        )
    else:
        op_type = 'elementwise_sub'
        act = None
974
        return _elementwise_op(LayerHelper(op_type, **locals()))
975 976 977 978 979 980 981 982 983 984 985


def _multiply_with_axis(x, y, axis=-1, name=None):
    # opt performance, only dynamic mode needs reshape
    if in_dygraph_mode():
        return _elementwise_op_with_axis_in_dygraph(
            x, y, axis, name, "multiply"
        )
    else:
        op_type = 'elementwise_mul'
        act = None
986
        return _elementwise_op(LayerHelper(op_type, **locals()))
987 988 989 990 991 992 993 994 995


def _divide_with_axis(x, y, axis=-1, name=None):
    # opt performance, only dynamic mode needs reshape
    if in_dygraph_mode():
        return _elementwise_op_with_axis_in_dygraph(x, y, axis, name, "divide")
    else:
        op_type = 'elementwise_div'
        act = None
996
        return _elementwise_op(LayerHelper(op_type, **locals()))
997 998


999
def maximum(x, y, name=None):
1000
    """
W
Wei Shengyu 已提交
1001
    Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is:
1002

1003 1004
    .. math::
        out = max(x, y)
1005

1006
    Note:
I
Infinity_lee 已提交
1007 1008 1009
        ``paddle.maximum`` supports broadcasting. If you want know more about broadcasting, please refer to  `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028

    Args:
        x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.to_tensor([[1, 2], [7, 8]])
            y = paddle.to_tensor([[3, 4], [5, 6]])
            res = paddle.maximum(x, y)
            print(res)
1029 1030 1031
            # Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[3, 4],
            #         [7, 8]])
1032 1033 1034 1035 1036

            x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
            y = paddle.to_tensor([3, 0, 4])
            res = paddle.maximum(x, y)
            print(res)
1037 1038 1039
            # Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[3, 2, 4],
            #         [3, 2, 4]])
1040 1041

            x = paddle.to_tensor([2, 3, 5], dtype='float32')
1042
            y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
1043 1044
            res = paddle.maximum(x, y)
            print(res)
1045 1046
            # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            #        [2. , nan, nan])
1047

1048 1049
            x = paddle.to_tensor([5, 3, float("inf")], dtype='float32')
            y = paddle.to_tensor([1, -float("inf"), 5], dtype='float32')
1050 1051
            res = paddle.maximum(x, y)
            print(res)
1052 1053
            # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            #        [5.  , 3.  , inf.])
1054 1055
    """
    op_type = 'elementwise_max'
1056
    axis = -1
1057
    act = None
1058 1059
    if in_dygraph_mode():
        return _C_ops.maximum(x, y)
1060 1061
    else:
        return _elementwise_op(LayerHelper(op_type, **locals()))
1062

1063

1064
def minimum(x, y, name=None):
1065
    """
C
Chen Long 已提交
1066
    Compare two tensors and return a new tensor containing the element-wise minima. The equation is:
1067

1068 1069
    .. math::
        out = min(x, y)
1070

1071
    Note:
I
Infinity_lee 已提交
1072 1073 1074
        ``paddle.minimum`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
1075 1076 1077 1078 1079 1080 1081

    Args:
        x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
C
Chen Long 已提交
1082
        Tensor. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.to_tensor([[1, 2], [7, 8]])
            y = paddle.to_tensor([[3, 4], [5, 6]])
            res = paddle.minimum(x, y)
            print(res)
1094 1095 1096
            # Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[1, 2],
            #         [5, 6]])
1097 1098 1099 1100 1101

            x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
            y = paddle.to_tensor([3, 0, 4])
            res = paddle.minimum(x, y)
            print(res)
1102 1103 1104
            # Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[[1, 0, 3],
            #          [1, 0, 3]]])
1105 1106

            x = paddle.to_tensor([2, 3, 5], dtype='float32')
1107
            y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
1108 1109
            res = paddle.minimum(x, y)
            print(res)
1110 1111
            # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            #        [1. , nan, nan])
1112

1113 1114
            x = paddle.to_tensor([5, 3, float("inf")], dtype='float64')
            y = paddle.to_tensor([1, -float("inf"), 5], dtype='float64')
1115 1116
            res = paddle.minimum(x, y)
            print(res)
1117 1118
            # Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
            #        [ 1.  , -inf.,  5.  ])
1119 1120
    """
    op_type = 'elementwise_min'
1121
    axis = -1
1122
    act = None
1123 1124
    if in_dygraph_mode():
        return _C_ops.minimum(x, y)
1125 1126
    else:
        return _elementwise_op(LayerHelper(op_type, **locals()))
1127

1128

L
LJQ❤️ 已提交
1129 1130 1131 1132 1133 1134 1135 1136 1137
def fmax(x, y, name=None):
    """
    Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the maximum value of the element.
    If one of them is a nan value, the other value is directly returned, if both are nan values, then the first nan value is returned.
    The equation is:

    .. math::
        out = fmax(x, y)

1138
    Note:
I
Infinity_lee 已提交
1139 1140 1141
        ``paddle.fmax`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
L
LJQ❤️ 已提交
1142 1143

    Args:
1144 1145
        x (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
L
LJQ❤️ 已提交
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.to_tensor([[1, 2], [7, 8]])
            y = paddle.to_tensor([[3, 4], [5, 6]])
            res = paddle.fmax(x, y)
            print(res)
1161 1162 1163
            # Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[3, 4],
            #         [7, 8]])
L
LJQ❤️ 已提交
1164 1165 1166 1167 1168

            x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
            y = paddle.to_tensor([3, 0, 4])
            res = paddle.fmax(x, y)
            print(res)
1169 1170 1171
            # Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[3, 2, 4],
            #         [3, 2, 4]])
L
LJQ❤️ 已提交
1172 1173

            x = paddle.to_tensor([2, 3, 5], dtype='float32')
1174
            y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
L
LJQ❤️ 已提交
1175 1176
            res = paddle.fmax(x, y)
            print(res)
1177 1178
            # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            #        [2., 3., 5.])
L
LJQ❤️ 已提交
1179

1180 1181
            x = paddle.to_tensor([5, 3, float("inf")], dtype='float32')
            y = paddle.to_tensor([1, -float("inf"), 5], dtype='float32')
L
LJQ❤️ 已提交
1182 1183
            res = paddle.fmax(x, y)
            print(res)
1184 1185
            # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            #        [5.  , 3.  , inf.])
L
LJQ❤️ 已提交
1186 1187 1188 1189
    """
    op_type = 'elementwise_fmax'
    axis = -1
    act = None
1190
    if in_dygraph_mode():
1191
        return _C_ops.fmax(x, y)
1192 1193
    else:
        return _elementwise_op(LayerHelper(op_type, **locals()))
L
LJQ❤️ 已提交
1194

1195

L
LJQ❤️ 已提交
1196 1197 1198 1199 1200 1201 1202 1203 1204
def fmin(x, y, name=None):
    """
    Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the minimum value of the element.
    If one of them is a nan value, the other value is directly returned, if both are nan values, then the first nan value is returned.
    The equation is:

    .. math::
        out = fmin(x, y)

1205
    Note:
I
Infinity_lee 已提交
1206 1207 1208
        ``paddle.fmin`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
L
LJQ❤️ 已提交
1209 1210

    Args:
1211 1212
        x (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float16, float32, float64, int32, int64.
L
LJQ❤️ 已提交
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape,  its shape is the same as x and y.

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.to_tensor([[1, 2], [7, 8]])
            y = paddle.to_tensor([[3, 4], [5, 6]])
            res = paddle.fmin(x, y)
            print(res)
1228 1229 1230
            # Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[1, 2],
            #         [5, 6]])
L
LJQ❤️ 已提交
1231 1232 1233 1234 1235

            x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
            y = paddle.to_tensor([3, 0, 4])
            res = paddle.fmin(x, y)
            print(res)
1236 1237 1238
            # Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[[1, 0, 3],
            #          [1, 0, 3]]])
L
LJQ❤️ 已提交
1239 1240

            x = paddle.to_tensor([2, 3, 5], dtype='float32')
1241
            y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
L
LJQ❤️ 已提交
1242 1243
            res = paddle.fmin(x, y)
            print(res)
1244 1245
            # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
            #        [1., 3., 5.])
L
LJQ❤️ 已提交
1246

1247 1248
            x = paddle.to_tensor([5, 3, float("inf")], dtype='float64')
            y = paddle.to_tensor([1, -float("inf"), 5], dtype='float64')
L
LJQ❤️ 已提交
1249 1250
            res = paddle.fmin(x, y)
            print(res)
1251 1252
            # Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
            #        [ 1.  , -inf.,  5.  ])
L
LJQ❤️ 已提交
1253 1254 1255 1256
    """
    op_type = 'elementwise_fmin'
    axis = -1
    act = None
1257
    if in_dygraph_mode():
1258
        return _C_ops.fmin(x, y)
1259 1260
    else:
        return _elementwise_op(LayerHelper(op_type, **locals()))
L
LJQ❤️ 已提交
1261

Y
Yang Zhang 已提交
1262

1263
def sum(x, axis=None, dtype=None, keepdim=False, name=None):
1264 1265 1266 1267
    """
    Computes the sum of tensor elements over the given dimension.

    Args:
1268
        x (Tensor): An N-D Tensor, the data type is bool, float16, float32, float64, int32 or int64.
1269 1270
        axis (int|list|tuple, optional): The dimensions along which the sum is performed. If
            :attr:`None`, sum all elements of :attr:`x` and return a
N
Noel 已提交
1271
            Tensor with a single element, otherwise must be in the
1272 1273 1274 1275 1276 1277 1278
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype
            of output is the same as input Tensor `x`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
1279
            value is False.
1280
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
1281 1282

    Returns:
1283
        Tensor: Results of summation operation on the specified axis of input Tensor `x`,
1284
        if `x.dtype='bool'`, `x.dtype='int32'`, it's data type is `'int64'`,
1285
        otherwise it's data type is the same as `x`.
1286 1287 1288 1289 1290

    Examples:
        .. code-block:: python

            import paddle
1291

1292
            # x is a Tensor with following elements:
1293 1294 1295
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
            # Each example is followed by the corresponding output tensor.
1296 1297
            x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
                                  [0.1, 0.2, 0.6, 0.7]])
1298
            out1 = paddle.sum(x)  # [3.5]
1299 1300 1301
            out2 = paddle.sum(x, axis=0)  # [0.3, 0.5, 1.1, 1.6]
            out3 = paddle.sum(x, axis=-1)  # [1.9, 1.6]
            out4 = paddle.sum(x, axis=1, keepdim=True)  # [[1.9], [1.6]]
1302

1303
            # y is a Tensor with shape [2, 2, 2] and elements as below:
1304 1305 1306
            #      [[[1, 2], [3, 4]],
            #      [[5, 6], [7, 8]]]
            # Each example is followed by the corresponding output tensor.
1307
            y = paddle.to_tensor([[[1, 2], [3, 4]],
1308
                                  [[5, 6], [7, 8]]])
1309 1310
            out5 = paddle.sum(y, axis=[1, 2]) # [10, 26]
            out6 = paddle.sum(y, axis=[0, 1]) # [16, 20]
1311

1312 1313 1314 1315 1316 1317 1318 1319 1320
            # x is a Tensor with following elements:
            #    [[True, True, True, True]
            #     [False, False, False, False]]
            # Each example is followed by the corresponding output tensor.
            x = paddle.to_tensor([[True, True, True, True],
                                  [False, False, False, False]])
            out7 = paddle.sum(x)  # [4]
            out8 = paddle.sum(x, axis=0)  # [1, 1, 1, 1]
            out9 = paddle.sum(x, axis=1)  # [4, 0]
1321
    """
1322

1323 1324 1325 1326
    dtype_flag = False
    if dtype is not None:
        dtype_flag = True
        dtype = convert_np_dtype_to_dtype_(dtype)
F
From00 已提交
1327 1328

    if in_dygraph_mode():
1329
        return _C_ops.sum(x, axis, dtype, keepdim)
1330 1331 1332
    else:
        reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
        attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}
F
From00 已提交
1333

1334
        if dtype_flag:
1335
            attrs.update({'in_dtype': x.dtype, 'out_dtype': dtype})
W
wanghuancoder 已提交
1336

1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
        check_variable_and_dtype(
            x,
            'x',
            [
                'bool',
                'float16',
                'float32',
                'float64',
                'int16',
                'int32',
                'int64',
                'complex64',
                'complex128',
            ],
            'sum',
        )
1353

1354 1355 1356
        check_type(
            axis, 'axis', (int, list, tuple, type(None), Variable), 'sum'
        )
1357

1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
        helper = LayerHelper('sum', **locals())
        if dtype_flag:
            out = helper.create_variable_for_type_inference(dtype=dtype)
        else:
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_sum',
            inputs={'X': x},
            outputs={'Out': out},
            attrs=attrs,
        )
        return out
1370

1371

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
def nan_to_num(x, nan=0.0, posinf=None, neginf=None, name=None):
    """
    Replaces NaN, positive infinity, and negative infinity values in input tensor.

    Args:
        x (Tensor): An N-D Tensor, the data type is float32, float64.
        nan (float, optional): the value to replace NaNs with. Default is 0.
        posinf (float, optional): if a Number, the value to replace positive infinity values with. If None, positive infinity values are replaced with the greatest finite value representable by input’s dtype. Default is None.
        neginf (float, optional): if a Number, the value to replace negative infinity values with. If None, negative infinity values are replaced with the lowest finite value representable by input’s dtype. Default is None.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: Results of nan_to_num operation input Tensor ``x``.

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.to_tensor([float('nan'), 0.3, float('+inf'), float('-inf')], dtype='float32')
            out1 = paddle.nan_to_num(x)  # [0, 0.3, 3.4028235e+38, -3.4028235e+38]
            out2 = paddle.nan_to_num(x, nan=1)  # [1, 0.3, 3.4028235e+38, -3.4028235e+38]
            out3 = paddle.nan_to_num(x, posinf=5)  # [0, 0.3, 5, -3.4028235e+38]
            out4 = paddle.nan_to_num(x, nan=10, neginf=-99)  # [10, 0.3, 3.4028235e+38, -99]
    """
    # NOTE(tiancaishaonvjituizi): it seems that paddle handles the dtype of python float number
    # incorrectly, so we have to explicitly contruct tensors here
    posinf_value = paddle.full_like(x, float("+inf"))
    neginf_value = paddle.full_like(x, float("-inf"))
    nan = paddle.full_like(x, nan)
    assert x.dtype in [paddle.float32, paddle.float64]
    is_float32 = x.dtype == paddle.float32
    if posinf is None:
        posinf = (
            np.finfo(np.float32).max if is_float32 else np.finfo(np.float64).max
        )
    posinf = paddle.full_like(x, posinf)
    if neginf is None:
        neginf = (
            np.finfo(np.float32).min if is_float32 else np.finfo(np.float64).min
        )
    neginf = paddle.full_like(x, neginf)
    x = paddle.where(paddle.isnan(x), nan, x)
    x = paddle.where(x == posinf_value, posinf, x)
    x = paddle.where(x == neginf_value, neginf, x)
    return x


W
wangguanqun 已提交
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
    """
    Computes the sum of tensor elements over the given axis, treating Not a Numbers (NaNs) as zero.

    Args:
        x (Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64.
        axis (int|list|tuple, optional): The dimensions along which the nansum is performed. If
            :attr:`None`, nansum all elements of :attr:`x` and return a
            Tensor with a single element, otherwise must be in the
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype
            of output is the same as input Tensor `x`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
            value is False.
1437
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
W
wangguanqun 已提交
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450

    Returns:
        Tensor: Results of summation operation on the specified axis of input Tensor `x`,

    Examples:
        .. code-block:: python

            import paddle

            # x is a Tensor with following elements:
            #    [[nan, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, -nan, 0.7]]
            # Each example is followed by the corresponding output tensor.
1451 1452
            x = paddle.to_tensor([[float('nan'), 0.3, 0.5, 0.9],
                            [0.1, 0.2, float('-nan'), 0.7]],dtype="float32")
W
wangguanqun 已提交
1453 1454 1455 1456 1457 1458 1459 1460 1461
            out1 = paddle.nansum(x)  # [2.7]
            out2 = paddle.nansum(x, axis=0)  # [0.1, 0.5, 0.5, 1.6]
            out3 = paddle.nansum(x, axis=-1)  # [1.7, 1.0]
            out4 = paddle.nansum(x, axis=1, keepdim=True)  # [[1.7], [1.0]]

            # y is a Tensor with shape [2, 2, 2] and elements as below:
            #      [[[1, nan], [3, 4]],
            #      [[5, 6], [-nan, 8]]]
            # Each example is followed by the corresponding output tensor.
1462
            y = paddle.to_tensor([[[1, float('nan')], [3, 4]],
W
wangguanqun 已提交
1463 1464 1465 1466
                            [[5, 6], [float('-nan'), 8]]])
            out5 = paddle.nansum(y, axis=[1, 2]) # [8, 19]
            out6 = paddle.nansum(y, axis=[0, 1]) # [9, 18]
    """
1467 1468 1469
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int32', 'int64'], 'nansum'
    )
W
wangguanqun 已提交
1470 1471 1472 1473 1474 1475 1476
    check_type(axis, 'axis', (int, list, tuple, type(None)), 'nansum')

    zero_tensor = paddle.zeros_like(x)
    tmp_tensor = paddle.where(isnan(x), zero_tensor, x)
    return sum(tmp_tensor, axis, dtype, keepdim, name)


1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
def nanmean(x, axis=None, keepdim=False, name=None):
    r"""
    Compute the arithmetic mean along the specified axis, ignoring NaNs.

    Args:
        x (Tensor): The input Tensor with data type uint16, float16, float32, float64.
        axis (int|list|tuple, optional):The axis along which to perform nanmean
            calculations. ``axis`` should be int, list(int) or tuple(int). If
            ``axis`` is a list/tuple of dimension(s), nanmean is calculated along
            all element(s) of ``axis`` . ``axis`` or element(s) of ``axis``
            should be in range [-D, D), where D is the dimensions of ``x`` . If
            ``axis`` or element(s) of ``axis`` is less than 0, it works the
            same way as :math:`axis + D` . If ``axis`` is None, nanmean is
            calculated over all elements of ``x``. Default is None.
        keepdim (bool, optional): Whether to reserve the reduced dimension(s)
            in the output Tensor. If ``keepdim`` is True, the dimensions of
            the output Tensor is the same as ``x`` except in the reduced
            dimensions(it is of size 1 in this case). Otherwise, the shape of
            the output Tensor is squeezed in ``axis`` . Default is False.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, results of arithmetic mean along ``axis`` of ``x``, with the same data
        type as ``x``.

    Examples:

        .. code-block:: python
            :name: code-example1

            import paddle
            # x is a 2-D Tensor:
            x = paddle.to_tensor([[float('nan'), 0.3, 0.5, 0.9],
                                  [0.1, 0.2, float('-nan'), 0.7]])
            out1 = paddle.nanmean(x)
            # [0.44999996]
            out2 = paddle.nanmean(x, axis=0)
            # [0.1, 0.25, 0.5, 0.79999995]
            out3 = paddle.nanmean(x, axis=0, keepdim=True)
            # [[0.1, 0.25, 0.5, 0.79999995]]
            out4 = paddle.nanmean(x, axis=1)
            # [0.56666666 0.33333334]
            out5 = paddle.nanmean(x, axis=1, keepdim=True)
            # [[0.56666666]
            #  [0.33333334]]

            # y is a 3-D Tensor:
            y = paddle.to_tensor([[[1, float('nan')], [3, 4]],
                                   [[5, 6], [float('-nan'), 8]]])
            out6 = paddle.nanmean(y, axis=[1, 2])
            # [2.66666675, 6.33333349]
            out7 = paddle.nanmean(y, axis=[0, 1])
            # [3., 6.]
    """
    if isinstance(axis, int):
        axis = [axis]
1534 1535 1536
    check_variable_and_dtype(
        x, 'x/input', ['uint16', 'float16', 'float32', 'float64'], 'nanmean'
    )
1537 1538 1539
    if axis is not None:
        check_type(axis, 'axis/dim', (int, list, tuple), 'nanmean')

1540 1541 1542
    cnt = paddle.sum(~paddle.isnan(x), axis=axis, keepdim=keepdim)
    return paddle.divide(
        paddle.nansum(x, axis=axis, keepdim=keepdim, name=name),
1543 1544
        cnt.astype(x.dtype),
    )
1545 1546


1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
def count_nonzero(x, axis=None, keepdim=False, name=None):
    r"""
    Counts the number of non-zero values in the tensor x along the specified axis.

    Args:
        x (Tensor): An N-D Tensor, the data type is bool, float16, float32, float64, int32 or int64.
        axis (int|list|tuple, optional): The dimensions along which the sum is performed. If
            :attr:`None`, sum all elements of :attr:`x` and return a
            Tensor with a single element, otherwise must be in the
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
            value is False.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: Results of count operation on the specified axis of input Tensor `x`, it's data type is `'int64'`.

    Examples:

        .. code-block:: python

            import paddle
            # x is a 2-D Tensor:
            x = paddle.to_tensor([[0., 1.1, 1.2], [0., 0., 1.3], [0., 0., 0.]])
            out1 = paddle.count_nonzero(x)
            # [3]
            out2 = paddle.count_nonzero(x, axis=0)
            # [0, 1, 2]
            out3 = paddle.count_nonzero(x, axis=0, keepdim=True)
            # [[0, 1, 2]]
            out4 = paddle.count_nonzero(x, axis=1)
            # [2, 1, 0]
            out5 = paddle.count_nonzero(x, axis=1, keepdim=True)
            #[[2],
            # [1],
            # [0]]

            # y is a 3-D Tensor:
            y = paddle.to_tensor([[[0., 1.1, 1.2], [0., 0., 1.3], [0., 0., 0.]],
                                  [[0., 2.5, 2.6], [0., 0., 2.4], [2.1, 2.2, 2.3]]])
            out6 = paddle.count_nonzero(y, axis=[1, 2])
            # [3, 6]
            out7 = paddle.count_nonzero(y, axis=[0, 1])
            # [1, 3, 5]
    """

    if axis is not None:
        if isinstance(axis, int):
            axis = [axis]
        dims = len(x.shape)
        for i in range(len(axis)):
1601 1602 1603
            if not isinstance(axis[i], int) or not (
                axis[i] < dims and axis[i] >= -dims
            ):
1604 1605 1606 1607 1608 1609 1610 1611 1612
                raise ValueError(
                    "Axis should be None, int, or a list, element should in range [-rank(x), rank(x))."
                )

    bool_tensor = paddle.cast(x, 'bool')
    int_tensor = paddle.cast(bool_tensor, 'int64')
    return paddle.sum(int_tensor, axis=axis, keepdim=keepdim, name=name)


1613
@templatedoc(op_type="sum")
S
Steffy-zxf 已提交
1614
def add_n(inputs, name=None):
1615
    """
1616
    Sum one or more Tensor of the input.
1617

S
Steffy-zxf 已提交
1618 1619 1620
    For example:

    .. code-block:: text
1621

S
Steffy-zxf 已提交
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
        Case 1:

            Input:
                input.shape = [2, 3]
                input = [[1, 2, 3],
                         [4, 5, 6]]

            Output:
                output.shape = [2, 3]
                output = [[1, 2, 3],
                          [4, 5, 6]]

        Case 2:
1635

S
Steffy-zxf 已提交
1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
            Input:
                First input:
                    input1.shape = [2, 3]
                    Input1 = [[1, 2, 3],
                              [4, 5, 6]]

                The second input:
                    input2.shape = [2, 3]
                    input2 = [[7, 8, 9],
                              [10, 11, 12]]

                Output:
                    output.shape = [2, 3]
                    output = [[8, 10, 12],
                              [14, 16, 18]]
1651 1652

    Args:
1653
        inputs (Tensor|list[Tensor]|tuple[Tensor]):  A Tensor or a list/tuple of Tensors. The shape and data type of the list/tuple elements should be consistent.
S
Steffy-zxf 已提交
1654
            Input can be multi-dimensional Tensor, and data types can be: float32, float64, int32, int64.
1655
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
1656 1657

    Returns:
S
Steffy-zxf 已提交
1658
        Tensor, the sum of input :math:`inputs` , its shape and data types are consistent with :math:`inputs`.
1659 1660 1661

    Examples:
        .. code-block:: python
1662

1663 1664
            import paddle

S
Steffy-zxf 已提交
1665 1666 1667
            input0 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32')
            input1 = paddle.to_tensor([[7, 8, 9], [10, 11, 12]], dtype='float32')
            output = paddle.add_n([input0, input1])
1668
            # [[8., 10., 12.],
S
Steffy-zxf 已提交
1669
            #  [14., 16., 18.]]
1670
    """
1671 1672 1673
    if in_dygraph_mode():
        if isinstance(inputs, Variable):
            inputs = [inputs]
1674
        return _C_ops.add_n(inputs)
1675
    else:
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
        helper = LayerHelper('add_n', **locals())
        check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n')
        if isinstance(inputs, list) or isinstance(inputs, tuple):
            if len(inputs) > 0:
                for input in inputs:
                    check_variable_and_dtype(
                        input,
                        "inputs",
                        ['float16', 'float32', 'float64', 'int32', 'int64'],
                        'add_n',
                    )
        else:
            check_variable_and_dtype(
                inputs,
                "inputs",
                ['float16', 'float32', 'float64', 'int32', 'int64'],
                'add_n',
            )
1694

1695 1696 1697 1698 1699 1700 1701 1702 1703
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype('inputs')
        )
        helper.append_op(
            type='sum',
            inputs={'X': inputs},
            outputs={'Out': out},
            attrs={'use_mkldnn': False},
        )
1704

1705
        return out
1706 1707


1708 1709 1710
def trunc(input, name=None):
    '''
    This API is used to returns a new tensor with the truncated integer values of input.
1711

1712 1713 1714
    Args:
        input (Tensor): The input tensor, it's data type should be int32, int64, float32, float64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
1715

1716 1717
    Returns:
        Tensor: The output Tensor of trunc.
1718

1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
    Examples:
        .. code-block:: python

            import paddle

            input = paddle.rand([2,2],'float32')
            print(input)
            # Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #         [[0.02331470, 0.42374918],
            #         [0.79647720, 0.74970269]])

            output = paddle.trunc(input)
            print(output)
            # Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #         [[0., 0.],
            #         [0., 0.]]))
    '''
J
Jiabin Yang 已提交
1736
    if in_dygraph_mode():
1737
        return _C_ops.trunc(input)
1738
    else:
1739 1740
        inputs = {"X": input}
        attrs = {}
1741

1742 1743 1744 1745 1746
        helper = LayerHelper("trunc", **locals())
        check_variable_and_dtype(
            input, 'X', ['int32', 'int64', 'float32', 'float64'], 'trunc'
        )
        out = helper.create_variable_for_type_inference(dtype=input.dtype)
1747

1748 1749 1750 1751
        helper.append_op(
            type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": out}
        )
        return out
1752 1753


W
WuHaobo 已提交
1754
def mm(input, mat2, name=None):
1755
    """
S
swtkiwi 已提交
1756

1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
    Applies matrix multiplication to two tensors.

    Currently, the input tensors' rank can be any, but when the rank of any
    inputs is bigger than 3, this two inputs' rank should be equal.


    Also note that if the raw tensor :math:`x` or :math:`mat2` is rank-1 and
    nontransposed, the prepended or appended dimension :math:`1` will be
    removed after matrix multiplication.

    Args:
1768
        input (Tensor): The input tensor which is a Tensor.
N
Noel 已提交
1769
        mat2 (Tensor): The input tensor which is a Tensor.
1770
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
1771 1772

    Returns:
N
Noel 已提交
1773
        Tensor: The product Tensor.
1774

W
wawltor 已提交
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
    ::

        * example 1:

        input: [B, ..., M, K], mat2: [B, ..., K, N]
        out: [B, ..., M, N]

        * example 2:

        input: [B, M, K], mat2: [B, K, N]
        out: [B, M, N]

        * example 3:

        input: [B, M, K], mat2: [K, N]
        out: [B, M, N]

        * example 4:

        input: [M, K], mat2: [K, N]
        out: [M, N]

        * example 5:

        input: [B, M, K], mat2: [K]
        out: [B, M]

        * example 6:

        input: [K], mat2: [K]
        out: [1]

1807 1808 1809 1810
    Examples:
        .. code-block:: python

            import paddle
1811 1812 1813 1814 1815 1816 1817 1818
            input = paddle.arange(1, 7).reshape((3, 2)).astype('float32')
            mat2 = paddle.arange(1, 9).reshape((2, 4)).astype('float32')
            out = paddle.mm(input, mat2)
            print(out)
            #        [[11., 14., 17., 20.],
            #         [23., 30., 37., 44.],
            #         [35., 46., 57., 68.]])

N
Noel 已提交
1819

1820
    """
1821
    if in_dygraph_mode():
1822
        return _C_ops.matmul(input, mat2, False, False)
1823
    else:
1824

1825 1826 1827 1828 1829
        def __check_input(x, y):
            var_names = {'x': x, 'y': y}
            for name, val in var_names.items():
                check_variable_and_dtype(
                    val, name, ['float16', 'float32', 'float64'], 'mm'
1830
                )
1831 1832 1833 1834 1835 1836
            x_shape = list(x.shape)
            y_shape = list(y.shape)
            if len(x_shape) == 1:
                x_shape = [1] + x_shape
            if len(y_shape) == 1:
                y_shape = y_shape + [1]
1837

1838 1839 1840
            # check the inner 2 dimensions
            if x_shape[-1] != y_shape[-2]:
                if not ((x_shape[-1] == -1) or (y_shape[-2] == -1)):
1841
                    raise ValueError(
1842 1843 1844 1845
                        "After performing an optional transpose, Input X's width should be "
                        "equal to Y's width for multiplication "
                        "prerequisites. But received X's shape: %s, Y's shape: %s\n"
                        % (x_shape, y_shape)
1846
                    )
1847

1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
            if len(y_shape) > 2 and len(x_shape) > 2:
                for i, dim_x in enumerate(x_shape[:-2]):
                    # don't check neg shape
                    if dim_x < 0 or y_shape[i] < 0:
                        continue
                    if dim_x != y_shape[i]:
                        raise ValueError(
                            "When the matrix is larger than 2 dimensions, the higher "
                            "dimensional values of the two matrices need to be equal. "
                            "But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
                            "Y's shape: %s.\n" % (i, i, x_shape, y_shape)
                        )

        __check_input(input, mat2)

        helper = LayerHelper('mm', **locals())
        out = helper.create_variable_for_type_inference(dtype=input.dtype)
        helper.append_op(
            type='matmul_v2',
            inputs={'X': input, 'Y': mat2},
            outputs={'Out': out},
        )
        return out
1871

1872

Y
yaoxuefeng 已提交
1873
def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
1874 1875 1876
    """
    **addmm**

1877
    Perform matrix multiplication for input $x$ and $y$.
1878 1879 1880 1881 1882 1883 1884 1885 1886
    $input$ is added to the final result.
    The equation is:

    ..  math::
        Out = alpha * x * y + beta * input

    $Input$, $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $input$.

    Args:
Y
yaoxuefeng 已提交
1887 1888 1889
        input (Tensor): The input Tensor to be added to the final result.
        x (Tensor): The first input Tensor for matrix multiplication.
        y (Tensor): The second input Tensor for matrix multiplication.
1890 1891
        beta (float, optional): Coefficient of $input$, default is 1.
        alpha (float, optional): Coefficient of $x*y$, default is 1.
1892
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
1893 1894

    Returns:
1895
        Tensor: The output Tensor of addmm.
1896 1897 1898

    Examples:
        ..  code-block:: python
1899

1900 1901
            import paddle

Y
yaoxuefeng 已提交
1902 1903 1904
            x = paddle.ones([2,2])
            y = paddle.ones([2,2])
            input = paddle.ones([2,2])
Y
yaoxuefeng 已提交
1905

Y
yaoxuefeng 已提交
1906
            out = paddle.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )
Y
yaoxuefeng 已提交
1907

N
Noel 已提交
1908
            print(out)
1909 1910 1911
            # [[10.5 10.5]
            # [10.5 10.5]]
    """
Y
yaoxuefeng 已提交
1912 1913 1914
    input_shape = input.shape
    x_shape = x.shape
    y_shape = y.shape
1915
    if not len(x_shape) == len(y_shape) == 2:
1916
        raise ValueError(
1917 1918 1919 1920
            "The dimention of x, y should be 2 but receive x's shape: {}, y's shape: {}".format(
                x_shape, y_shape
            )
        )
Y
yaoxuefeng 已提交
1921
    if x_shape[1] != y_shape[0]:
1922
        raise ValueError(
1923 1924 1925 1926
            "The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(
                x_shape, y_shape
            )
        )
1927 1928 1929
    if len(input_shape) == 2:
        if input_shape[0] != x_shape[0]:
            if input_shape[0] != 1:
1930
                raise ValueError(
1931 1932 1933 1934
                    "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(
                        input_shape[0]
                    )
                )
1935
            if input_shape[1] != y_shape[1] and input_shape[1] != 1:
1936
                raise ValueError(
1937 1938 1939 1940
                    "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(
                        input_shape[1]
                    )
                )
1941 1942
        if input_shape[1] != y_shape[1]:
            if input_shape[1] != 1:
1943
                raise ValueError(
1944 1945 1946 1947
                    "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(
                        input_shape[1]
                    )
                )
1948 1949
    elif len(input_shape) == 1:
        if input_shape[0] not in (y_shape[1], 1):
1950
            raise ValueError(
1951 1952 1953 1954
                "The input's shape: {} is not broadcastable with [x.shape[0], y.shape[1]]: [{},{}]".format(
                    input_shape, x_shape[0], y_shape[1]
                )
            )
1955
    else:
1956
        raise ValueError(
1957 1958 1959 1960
            "The dimention of input should be 2 or 1 but receive input's shape: {}".format(
                input_shape
            )
        )
Y
yaoxuefeng 已提交
1961

J
Jiabin Yang 已提交
1962
    if in_dygraph_mode():
1963
        return _C_ops.addmm(input, x, y, beta, alpha)
J
Jiabin Yang 已提交
1964
    else:
1965 1966
        inputs = {'Input': input, "X": x, "Y": y}
        attrs = {'Alpha': alpha, 'Beta': beta}
1967

1968 1969 1970 1971 1972 1973 1974
        helper = LayerHelper("addmm", **locals())
        check_variable_and_dtype(
            input, 'Input', ['float32', 'float64'], 'addmm'
        )
        check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
        check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1975

1976 1977 1978 1979
        helper.append_op(
            type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out}
        )
        return out
1980

1981

S
seemingwang 已提交
1982 1983 1984 1985 1986 1987 1988
def renorm(x, p, axis, max_norm):
    """
    **renorm**

    This operator is used to calculate the p-norm along the axis,
    suppose the input-shape on axis dimension has the value of T, then
    the tensor is split into T parts, the p-norm should be calculated for each
1989
    part, if the p-norm for part i is larger than max-norm, then each element
S
seemingwang 已提交
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
    in part i should be re-normalized at the same scale so that part-i' p-norm equals
    max-norm exactly, otherwise part-i stays unchanged.

    Args:
        x (Tensor): The input Tensor
        p (float): The power of the norm operation.
        axis (int): the dimension to slice the tensor.
        max-norm (float): the maximal norm limit.

    Returns:
        Tensor: the renorm Tensor.

    Examples:
        ..  code-block:: python
2004

S
seemingwang 已提交
2005 2006 2007 2008
            import paddle
            input = [[[2.0,2,-2],[3,0.3,3]],[[2,-8,2],[3.1,3.7,3]]]
            x = paddle.to_tensor(input,dtype='float32')
            y = paddle.renorm(x, 1.0, 2, 2.05)
2009
            print(y)
S
seemingwang 已提交
2010 2011 2012 2013
    #        [[[ 0.40594056,  0.29285714, -0.41000000],
    #          [ 0.60891086,  0.04392857,  0.61500001]],
    #         [[ 0.40594056, -1.17142856,  0.41000000],
    #          [ 0.62920785,  0.54178572,  0.61500001]]])
2014

S
seemingwang 已提交
2015 2016 2017
    """
    input_shape = x.shape
    if not axis < len(input_shape):
2018 2019
        raise ValueError(
            "the axis:{} should be less then the shape's size {}:{}".format(
2020 2021 2022
                axis, len(input_shape), input_shape
            )
        )
2023
    if not axis >= 0:
S
seemingwang 已提交
2024
        if not axis >= -1 * len(input_shape):
2025
            raise ValueError(
2026 2027 2028 2029
                "the axis:{} should not be less than -1 * length of input_shape:{}".format(
                    axis, -1 * len(input_shape)
                )
            )
S
seemingwang 已提交
2030
        axis = axis + len(input_shape)
S
seemingwang 已提交
2031
    if in_dygraph_mode():
2032
        out = _C_ops.renorm(x, p, axis, max_norm)
S
seemingwang 已提交
2033
        return out
2034
    else:
2035
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'renorm')
2036 2037
        inputs = {'X': x}
        attrs = {'p': p, 'axis': axis, 'max_norm': max_norm}
S
seemingwang 已提交
2038

2039 2040
        helper = LayerHelper("renorm", **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
S
seemingwang 已提交
2041

2042 2043 2044 2045
        helper.append_op(
            type="renorm", inputs=inputs, attrs=attrs, outputs={"Out": out}
        )
        return out
S
seemingwang 已提交
2046

2047

Z
zhiboniu 已提交
2048 2049 2050 2051
def inner(x, y, name=None):
    """

    Inner product of two input Tensor.
2052

Z
zhiboniu 已提交
2053 2054 2055 2056 2057
    Ordinary inner product for 1-D Tensors, in higher dimensions a sum product over the last axes.

    Args:
        x (Tensor): An N-D Tensor or a Scalar Tensor. If its not a scalar Tensor, its last dimensions must match y's.
        y (Tensor): An N-D Tensor or a Scalar Tensor. If its not a scalar Tensor, its last dimensions must match x's.
2058
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Z
zhiboniu 已提交
2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080

    Returns:
        Tensor: The inner-product Tensor, the output shape is x.shape[:-1] + y.shape[:-1].

    Examples:
        .. code-block:: python

            import paddle
            x = paddle.arange(1, 7).reshape((2, 3)).astype('float32')
            y = paddle.arange(1, 10).reshape((3, 3)).astype('float32')
            out = paddle.inner(x, y)
            print(out)
            #        ([[14, 32, 50],
            #         [32, 77, 122]])


    """
    if x.size == 1 or y.size == 1:
        return multiply(x, y)
    else:
        xshape = x.shape
        yshape = y.shape
2081 2082
        dstshape = list(xshape[:-1]) + list(yshape[:-1])
        if len(dstshape) == 0:
Z
zhiboniu 已提交
2083 2084 2085 2086
            dstshape = [1]
        nx = x.reshape((-1, xshape[-1]))
        ny = y.reshape((-1, yshape[-1]))

2087
        if in_dygraph_mode():
2088
            return _C_ops.matmul(nx, ny.T, False, False).reshape(dstshape)
2089
        else:
Z
zhiboniu 已提交
2090

2091 2092 2093 2094 2095
            def __check_input(x, y):
                var_names = {'x': x, 'y': y}
                for name, val in var_names.items():
                    check_variable_and_dtype(
                        val, name, ['float16', 'float32', 'float64'], 'inner'
2096
                    )
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
                x_shape = list(xshape)
                y_shape = list(yshape)

                # check the inner 2 dimensions
                if x_shape[-1] != y_shape[-1]:
                    if not ((x_shape[-1] == -1) or (y_shape[-1] == -1)):
                        raise ValueError(
                            "After performing an optional transpose, Input X's last dim should be "
                            "equal to Y's last dim for multiplication "
                            "prerequisites. But received X's shape: %s, Y's shape: %s\n"
                            % (x_shape, y_shape)
                        )

            __check_input(nx, ny)

            helper = LayerHelper('inner', **locals())
            out = helper.create_variable_for_type_inference(dtype=nx.dtype)
            helper.append_op(
                type='matmul_v2',
                inputs={'X': nx, 'Y': ny.T},
                outputs={'Out': out},
            )
            return out.reshape(dstshape)
Z
zhiboniu 已提交
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129


def outer(x, y, name=None):
    """

    Outer product of two Tensors.

    Input is flattened if not already 1-dimensional.

    Args:
2130 2131
        x (Tensor): An N-D Tensor or a Scalar Tensor.
        y (Tensor): An N-D Tensor or a Scalar Tensor.
2132
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Z
zhiboniu 已提交
2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153

    Returns:
        Tensor: The outer-product Tensor.

    Examples:
        .. code-block:: python

            import paddle
            x = paddle.arange(1, 4).astype('float32')
            y = paddle.arange(1, 6).astype('float32')
            out = paddle.outer(x, y)
            print(out)
            #        ([[1, 2, 3, 4, 5],
            #         [2, 4, 6, 8, 10],
            #         [3, 6, 9, 12, 15]])


    """
    nx = x.reshape((-1, 1))
    ny = y.reshape((1, -1))

2154
    if in_dygraph_mode():
2155
        return _C_ops.matmul(nx, ny, False, False)
2156
    else:
Z
zhiboniu 已提交
2157

2158 2159 2160 2161 2162 2163
        def __check_input(x, y):
            var_names = {'x': x, 'y': y}
            for name, val in var_names.items():
                check_variable_and_dtype(
                    val, name, ['float16', 'float32', 'float64'], 'inner'
                )
Z
zhiboniu 已提交
2164

2165
        __check_input(nx, ny)
Z
zhiboniu 已提交
2166

2167 2168 2169 2170 2171 2172
        helper = LayerHelper('outer', **locals())
        out = helper.create_variable_for_type_inference(dtype=nx.dtype)
        helper.append_op(
            type='matmul_v2', inputs={'X': nx, 'Y': ny}, outputs={'Out': out}
        )
        return out
Z
zhiboniu 已提交
2173 2174


2175
def logsumexp(x, axis=None, keepdim=False, name=None):
2176
    r"""
2177
    Calculates the log of the sum of exponentials of ``x`` along ``axis`` .
2178

2179
    .. math::
2180
       logsumexp(x) = \log\sum exp(x)
2181

2182
    Args:
2183
        x (Tensor): The input Tensor with data type float32 or float64, which
S
Shang Zhizhou 已提交
2184
            have no more than 4 dimensions.
2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
        axis (int|list|tuple, optional): The axis along which to perform
            logsumexp calculations. ``axis`` should be int, list(int) or
            tuple(int). If ``axis`` is a list/tuple of dimension(s), logsumexp
            is calculated along all element(s) of ``axis`` . ``axis`` or
            element(s) of ``axis`` should be in range [-D, D), where D is the
            dimensions of ``x`` . If ``axis`` or element(s) of ``axis`` is
            less than 0, it works the same way as :math:`axis + D` . If
            ``axis`` is None, logsumexp is calculated along all elements of
            ``x``. Default is None.
        keepdim (bool, optional): Whether to reserve the reduced dimension(s)
            in the output Tensor. If ``keep_dim`` is True, the dimensions of
            the output Tensor is the same as ``x`` except in the reduced
            dimensions(it is of size 1 in this case). Otherwise, the shape of
            the output Tensor is squeezed in ``axis`` . Default is False.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
2201

2202
    Returns:
2203 2204
        Tensor, results of logsumexp along ``axis`` of ``x``, with the same data
        type as ``x``.
2205

2206
    Examples:
2207

2208
    .. code-block:: python
2209

2210 2211
        import paddle

2212
        x = paddle.to_tensor([[-1.5, 0., 2.], [3., 1.2, -2.4]])
2213 2214
        out1 = paddle.logsumexp(x) # [3.4691226]
        out2 = paddle.logsumexp(x, 1) # [2.15317821, 3.15684602]
2215 2216

    """
2217
    reduce_all, axis = _get_reduce_axis(axis, x)
2218

2219
    if in_dygraph_mode():
2220
        return _C_ops.logsumexp(x, axis, keepdim, reduce_all)
2221 2222 2223 2224 2225 2226 2227 2228
    else:
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'logsumexp')

        helper = LayerHelper('logsumexp', **locals())
        attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all': reduce_all}
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(
            type='logsumexp', inputs={'X': x}, outputs={'Out': out}, attrs=attrs
2229
        )
2230
        return out
2231

S
swtkiwi 已提交
2232

2233 2234
def inverse(x, name=None):
    """
2235 2236 2237 2238 2239
    Takes the inverse of the square matrix. A square matrix is a matrix with
    the same number of rows and columns. The input can be a square matrix
    (2-D Tensor) or batches of square matrices.

    Args:
2240
        x (Tensor): The input tensor. The last two
2241 2242 2243
            dimensions should be equal. When the number of dimensions is
            greater than 2, it is treated as batches of square matrix. The data
            type can be float32 and float64.
2244
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2245 2246

    Returns:
2247
        Tensor: A Tensor holds the inverse of x. The shape and data type
2248
                        is the same as x.
2249 2250 2251 2252 2253

    Examples:
        .. code-block:: python

            import paddle
2254 2255

            mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32')
2256 2257
            inv = paddle.inverse(mat)
            print(inv) # [[0.5, 0], [0, 0.5]]
2258 2259

    """
2260
    if in_dygraph_mode():
W
wanghuancoder 已提交
2261
        return _C_ops.inverse(x)
2262
    else:
2263

2264 2265 2266 2267 2268 2269 2270 2271
        def _check_input(x):
            check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'inverse')
            if len(x.shape) < 2:
                raise ValueError(
                    "The input of inverse is expected to be a Tensor whose number "
                    "of dimensions is no less than 2. But reviced: %d, "
                    "x's shape: %s." % (len(x.shape), x.shape)
                )
2272

2273 2274 2275 2276 2277 2278 2279
        _check_input(x)
        helper = LayerHelper('inverse', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='inverse', inputs={'Input': [x]}, outputs={'Output': [out]}
        )
        return out
2280

2281

2282
def max(x, axis=None, keepdim=False, name=None):
2283
    """
S
swtkiwi 已提交
2284

2285
    Computes the maximum of tensor elements over the given axis.
2286

T
Tao Luo 已提交
2287 2288
    Note:
        The difference between max and amax is: If there are multiple maximum elements,
2289
        amax evenly distributes gradient between these equal values,
T
Tao Luo 已提交
2290 2291 2292
        while max propagates gradient to all of them.


2293
    Args:
2294 2295
        x (Tensor): A tensor, the data type is float32, float64, int32, int64.
        axis (int|list|tuple, optional): The axis along which the maximum is computed.
2296
            If :attr:`None`, compute the maximum over all elements of
N
Noel 已提交
2297
            `x` and return a Tensor with a single element,
2298 2299
            otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`.
            If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
2300
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
2301
            output Tensor. The result tensor will have one fewer dimension
2302
            than the `x` unless :attr:`keepdim` is true, default
2303
            value is False.
2304
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2305 2306

    Returns:
2307
        Tensor, results of maximum on the specified axis of input tensor,
2308
        it's data type is the same as `x`.
2309 2310 2311

    Examples:
        .. code-block:: python
2312

2313
            import paddle
2314

N
Noel 已提交
2315
            # data_x is a Tensor with shape [2, 4]
2316
            # the axis is a int element
2317
            x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
2318
                                  [0.1, 0.2, 0.6, 0.7]],
2319
                                 dtype='float64', stop_gradient=False)
2320
            result1 = paddle.max(x)
2321
            result1.backward()
2322
            print(result1, x.grad)
2323 2324 2325
            #[0.9], [[0., 0., 0., 1.], [0., 0., 0., 0.]]

            x.clear_grad()
2326
            result2 = paddle.max(x, axis=0)
2327
            result2.backward()
2328
            print(result2, x.grad)
2329 2330 2331
            #[0.2, 0.3, 0.6, 0.9], [[1., 1., 0., 1.], [0., 0., 1., 0.]]

            x.clear_grad()
2332
            result3 = paddle.max(x, axis=-1)
2333
            result3.backward()
2334
            print(result3, x.grad)
2335 2336 2337
            #[0.9, 0.7], [[0., 0., 0., 1.], [0., 0., 0., 1.]]

            x.clear_grad()
2338
            result4 = paddle.max(x, axis=1, keepdim=True)
2339
            result4.backward()
2340
            print(result4, x.grad)
2341
            #[[0.9], [0.7]], [[0., 0., 0., 1.], [0., 0., 0., 1.]]
2342

N
Noel 已提交
2343
            # data_y is a Tensor with shape [2, 2, 2]
2344
            # the axis is list
2345
            y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
2346 2347
                                  [[5.0, 6.0], [7.0, 8.0]]],
                                 dtype='float64', stop_gradient=False)
2348
            result5 = paddle.max(y, axis=[1, 2])
2349
            result5.backward()
2350
            print(result5, y.grad)
2351 2352 2353
            #[4., 8.], [[[0., 0.], [0., 1.]], [[0., 0.], [0., 1.]]]

            y.clear_grad()
2354
            result6 = paddle.max(y, axis=[0, 1])
2355
            result6.backward()
2356
            print(result6, y.grad)
2357
            #[7., 8.], [[[0., 0.], [0., 0.]], [[0., 0.], [1., 1.]]]
2358 2359
    """

2360
    if in_dygraph_mode():
2361
        return _C_ops.max(x, axis, keepdim)
2362 2363 2364 2365 2366
    else:
        reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
        helper = LayerHelper('max', **locals())
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max'
2367
        )
2368 2369
        if not isinstance(axis, Variable) and utils._contain_var(axis):
            axis = utils._convert_to_tensor_list(axis)
2370

2371 2372 2373 2374 2375 2376 2377 2378
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_max',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all},
        )
        return out
2379

2380

2381
def min(x, axis=None, keepdim=False, name=None):
2382
    """
S
swtkiwi 已提交
2383

2384
    Computes the minimum of tensor elements over the given axis
2385

T
Tao Luo 已提交
2386 2387
    Note:
        The difference between min and amin is: If there are multiple minimum elements,
2388
        amin evenly distributes gradient between these equal values,
T
Tao Luo 已提交
2389 2390
        while min propagates gradient to all of them.

2391
    Args:
2392 2393
        x (Tensor): A tensor, the data type is float32, float64, int32, int64.
        axis (int|list|tuple, optional): The axis along which the minimum is computed.
2394
            If :attr:`None`, compute the minimum over all elements of
N
Noel 已提交
2395
            `x` and return a Tensor with a single element,
2396 2397
            otherwise must be in the range :math:`[-x.ndim, x.ndim)`.
            If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
2398
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
2399
            output Tensor. The result tensor will have one fewer dimension
2400
            than the `x` unless :attr:`keepdim` is true, default
2401
            value is False.
2402
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2403

2404
    Returns:
2405
        Tensor, results of minimum on the specified axis of input tensor,
2406
        it's data type is the same as input's Tensor.
2407

2408 2409 2410
    Examples:
        .. code-block:: python

2411
            import paddle
2412

2413
            # data_x is a Tensor with shape [2, 4]
2414
            # the axis is a int element
2415
            x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
2416
                                  [0.1, 0.2, 0.6, 0.7]],
2417
                                 dtype='float64', stop_gradient=False)
2418
            result1 = paddle.min(x)
2419
            result1.backward()
2420
            print(result1, x.grad)
2421 2422 2423
            #[0.1], [[0., 0., 0., 0.], [1., 0., 0., 0.]]

            x.clear_grad()
2424
            result2 = paddle.min(x, axis=0)
2425
            result2.backward()
2426
            print(result2, x.grad)
2427 2428 2429
            #[0.1, 0.2, 0.5, 0.7], [[0., 0., 1., 0.], [1., 1., 0., 1.]]

            x.clear_grad()
2430
            result3 = paddle.min(x, axis=-1)
2431
            result3.backward()
2432
            print(result3, x.grad)
2433 2434 2435
            #[0.2, 0.1], [[1., 0., 0., 0.], [1., 0., 0., 0.]]

            x.clear_grad()
2436
            result4 = paddle.min(x, axis=1, keepdim=True)
2437
            result4.backward()
2438
            print(result4, x.grad)
2439
            #[[0.2], [0.1]], [[1., 0., 0., 0.], [1., 0., 0., 0.]]
2440

2441
            # data_y is a Tensor with shape [2, 2, 2]
2442
            # the axis is list
2443
            y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
2444 2445
                                  [[5.0, 6.0], [7.0, 8.0]]],
                                 dtype='float64', stop_gradient=False)
2446
            result5 = paddle.min(y, axis=[1, 2])
2447
            result5.backward()
2448
            print(result5, y.grad)
2449 2450 2451
            #[1., 5.], [[[1., 0.], [0., 0.]], [[1., 0.], [0., 0.]]]

            y.clear_grad()
2452
            result6 = paddle.min(y, axis=[0, 1])
2453
            result6.backward()
2454
            print(result6, y.grad)
2455
            #[1., 2.], [[[1., 1.], [0., 0.]], [[0., 0.], [0., 0.]]]
2456
    """
2457

2458
    if in_dygraph_mode():
2459
        return _C_ops.min(x, axis, keepdim)
2460 2461 2462 2463 2464
    else:
        reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
        helper = LayerHelper('min', **locals())
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min'
2465
        )
2466

2467 2468 2469 2470 2471 2472 2473 2474
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_min',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all},
        )
        return out
2475

2476

T
Tao Luo 已提交
2477 2478 2479 2480 2481 2482
def amax(x, axis=None, keepdim=False, name=None):
    """
    Computes the maximum of tensor elements over the given axis.

    Note:
        The difference between max and amax is: If there are multiple maximum elements,
2483
        amax evenly distributes gradient between these equal values,
T
Tao Luo 已提交
2484 2485 2486
        while max propagates gradient to all of them.

    Args:
2487
        x (Tensor): A tensor, the data type is float32, float64, int32, int64,
2488
            the dimension is no more than 4.
2489
        axis (int|list|tuple, optional): The axis along which the maximum is computed.
T
Tao Luo 已提交
2490 2491 2492 2493
            If :attr:`None`, compute the maximum over all elements of
            `x` and return a Tensor with a single element,
            otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`.
            If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
2494
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
T
Tao Luo 已提交
2495 2496 2497
            output Tensor. The result tensor will have one fewer dimension
            than the `x` unless :attr:`keepdim` is true, default
            value is False.
2498
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
T
Tao Luo 已提交
2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511

    Returns:
        Tensor, results of maximum on the specified axis of input tensor,
        it's data type is the same as `x`.

    Examples:
        .. code-block:: python

            import paddle
            # data_x is a Tensor with shape [2, 4] with multiple maximum elements
            # the axis is a int element

            x = paddle.to_tensor([[0.1, 0.9, 0.9, 0.9],
2512
                                  [0.9, 0.9, 0.6, 0.7]],
T
Tao Luo 已提交
2513
                                 dtype='float64', stop_gradient=False)
2514 2515
            # There are 5 maximum elements:
            # 1) amax evenly distributes gradient between these equal values,
T
Tao Luo 已提交
2516
            #    thus the corresponding gradients are 1/5=0.2;
2517
            # 2) while max propagates gradient to all of them,
T
Tao Luo 已提交
2518
            #    thus the corresponding gradient are 1.
T
Tao Luo 已提交
2519 2520
            result1 = paddle.amax(x)
            result1.backward()
2521
            print(result1, x.grad)
T
Tao Luo 已提交
2522 2523
            #[0.9], [[0., 0.2, 0.2, 0.2], [0.2, 0.2, 0., 0.]]

T
Tao Luo 已提交
2524 2525 2526
            x.clear_grad()
            result1_max = paddle.max(x)
            result1_max.backward()
2527
            print(result1_max, x.grad)
T
Tao Luo 已提交
2528 2529 2530 2531
            #[0.9], [[0., 1.0, 1.0, 1.0], [1.0, 1.0, 0., 0.]]

            ###############################

T
Tao Luo 已提交
2532 2533 2534
            x.clear_grad()
            result2 = paddle.amax(x, axis=0)
            result2.backward()
2535
            print(result2, x.grad)
T
Tao Luo 已提交
2536 2537 2538 2539 2540
            #[0.9, 0.9, 0.9, 0.9], [[0., 0.5, 1., 1.], [1., 0.5, 0., 0.]]

            x.clear_grad()
            result3 = paddle.amax(x, axis=-1)
            result3.backward()
2541
            print(result3, x.grad)
T
Tao Luo 已提交
2542 2543 2544 2545 2546
            #[0.9, 0.9], [[0., 0.3333, 0.3333, 0.3333], [0.5, 0.5, 0., 0.]]

            x.clear_grad()
            result4 = paddle.amax(x, axis=1, keepdim=True)
            result4.backward()
2547
            print(result4, x.grad)
T
Tao Luo 已提交
2548 2549 2550
            #[[0.9], [0.9]], [[0., 0.3333, 0.3333, 0.3333.], [0.5, 0.5, 0., 0.]]

            # data_y is a Tensor with shape [2, 2, 2]
2551
            # the axis is list
T
Tao Luo 已提交
2552 2553 2554 2555 2556
            y = paddle.to_tensor([[[0.1, 0.9], [0.9, 0.9]],
                                  [[0.9, 0.9], [0.6, 0.7]]],
                                 dtype='float64', stop_gradient=False)
            result5 = paddle.amax(y, axis=[1, 2])
            result5.backward()
2557
            print(result5, y.grad)
T
Tao Luo 已提交
2558 2559 2560 2561 2562
            #[0.9., 0.9], [[[0., 0.3333], [0.3333, 0.3333]], [[0.5, 0.5], [0., 1.]]]

            y.clear_grad()
            result6 = paddle.amax(y, axis=[0, 1])
            result6.backward()
2563
            print(result6, y.grad)
T
Tao Luo 已提交
2564 2565
            #[0.9., 0.9], [[[0., 0.3333], [0.5, 0.3333]], [[0.5, 0.3333], [1., 1.]]]
    """
2566
    if in_dygraph_mode():
2567
        return _C_ops.amax(x, axis, keepdim)
2568

2569 2570 2571 2572 2573
    else:
        reduce_all, axis = _get_reduce_axis(axis, x)
        helper = LayerHelper('amax', **locals())
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'amax'
2574
        )
T
Tao Luo 已提交
2575

2576 2577 2578 2579 2580 2581 2582 2583
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_amax',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all},
        )
        return out
T
Tao Luo 已提交
2584

2585

T
Tao Luo 已提交
2586 2587 2588 2589 2590 2591 2592
def amin(x, axis=None, keepdim=False, name=None):
    """

    Computes the minimum of tensor elements over the given axis

    Note:
        The difference between min and amin is: If there are multiple minimum elements,
2593
        amin evenly distributes gradient between these equal values,
T
Tao Luo 已提交
2594 2595 2596
        while min propagates gradient to all of them.

    Args:
2597
        x (Tensor): A tensor, the data type is float32, float64, int32, int64,
2598
            the dimension is no more than 4.
2599
        axis (int|list|tuple, optional): The axis along which the minimum is computed.
T
Tao Luo 已提交
2600 2601 2602 2603
            If :attr:`None`, compute the minimum over all elements of
            `x` and return a Tensor with a single element,
            otherwise must be in the range :math:`[-x.ndim, x.ndim)`.
            If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
2604
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
T
Tao Luo 已提交
2605 2606 2607
            output Tensor. The result tensor will have one fewer dimension
            than the `x` unless :attr:`keepdim` is true, default
            value is False.
2608
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
T
Tao Luo 已提交
2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621

    Returns:
        Tensor, results of minimum on the specified axis of input tensor,
        it's data type is the same as input's Tensor.

    Examples:
        .. code-block:: python

            import paddle
            # data_x is a Tensor with shape [2, 4] with multiple minimum elements
            # the axis is a int element

            x = paddle.to_tensor([[0.2, 0.1, 0.1, 0.1],
2622
                                  [0.1, 0.1, 0.6, 0.7]],
T
Tao Luo 已提交
2623
                                 dtype='float64', stop_gradient=False)
2624 2625
            # There are 5 minimum elements:
            # 1) amin evenly distributes gradient between these equal values,
T
Tao Luo 已提交
2626
            #    thus the corresponding gradients are 1/5=0.2;
2627
            # 2) while min propagates gradient to all of them,
T
Tao Luo 已提交
2628
            #    thus the corresponding gradient are 1.
T
Tao Luo 已提交
2629 2630
            result1 = paddle.amin(x)
            result1.backward()
2631
            print(result1, x.grad)
T
Tao Luo 已提交
2632 2633
            #[0.1], [[0., 0.2, 0.2, 0.2], [0.2, 0.2, 0., 0.]]

T
Tao Luo 已提交
2634 2635 2636
            x.clear_grad()
            result1_min = paddle.min(x)
            result1_min.backward()
2637
            print(result1_min, x.grad)
T
Tao Luo 已提交
2638 2639 2640 2641
            #[0.1], [[0., 1.0, 1.0, 1.0], [1.0, 1.0, 0., 0.]]

            ###############################

T
Tao Luo 已提交
2642 2643 2644
            x.clear_grad()
            result2 = paddle.amin(x, axis=0)
            result2.backward()
2645
            print(result2, x.grad)
T
Tao Luo 已提交
2646 2647 2648 2649 2650
            #[0.1, 0.1, 0.1, 0.1], [[0., 0.5, 1., 1.], [1., 0.5, 0., 0.]]

            x.clear_grad()
            result3 = paddle.amin(x, axis=-1)
            result3.backward()
2651
            print(result3, x.grad)
T
Tao Luo 已提交
2652 2653 2654 2655 2656
            #[0.1, 0.1], [[0., 0.3333, 0.3333, 0.3333], [0.5, 0.5, 0., 0.]]

            x.clear_grad()
            result4 = paddle.amin(x, axis=1, keepdim=True)
            result4.backward()
2657
            print(result4, x.grad)
T
Tao Luo 已提交
2658 2659 2660
            #[[0.1], [0.1]], [[0., 0.3333, 0.3333, 0.3333.], [0.5, 0.5, 0., 0.]]

            # data_y is a Tensor with shape [2, 2, 2]
2661
            # the axis is list
T
Tao Luo 已提交
2662 2663 2664 2665 2666
            y = paddle.to_tensor([[[0.2, 0.1], [0.1, 0.1]],
                                  [[0.1, 0.1], [0.6, 0.7]]],
                                 dtype='float64', stop_gradient=False)
            result5 = paddle.amin(y, axis=[1, 2])
            result5.backward()
2667
            print(result5, y.grad)
T
Tao Luo 已提交
2668 2669 2670 2671 2672
            #[0.1., 0.1], [[[0., 0.3333], [0.3333, 0.3333]], [[0.5, 0.5], [0., 1.]]]

            y.clear_grad()
            result6 = paddle.amin(y, axis=[0, 1])
            result6.backward()
2673
            print(result6, y.grad)
T
Tao Luo 已提交
2674 2675
            #[0.1., 0.1], [[[0., 0.3333], [0.5, 0.3333]], [[0.5, 0.3333], [1., 1.]]]
    """
2676
    if in_dygraph_mode():
2677
        return _C_ops.amin(x, axis, keepdim)
2678

2679 2680 2681 2682 2683
    else:
        reduce_all, axis = _get_reduce_axis(axis, x)
        helper = LayerHelper('amin', **locals())
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'amin'
2684
        )
T
Tao Luo 已提交
2685

2686 2687 2688 2689 2690 2691 2692 2693
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_amin',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all},
        )
        return out
T
Tao Luo 已提交
2694

2695

W
WuHaobo 已提交
2696
def log1p(x, name=None):
2697
    r"""
2698
    Calculates the natural log of the given input tensor, element-wise.
N
Noel 已提交
2699

2700
    .. math::
2701
        Out = \ln(x+1)
S
Steffy-zxf 已提交
2702

2703
    Args:
S
Steffy-zxf 已提交
2704
        x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
2705
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2706

2707
    Returns:
S
Steffy-zxf 已提交
2708
        Tensor, the natural log of the input Tensor computed element-wise.
2709

2710 2711
    Examples:
        .. code-block:: python
S
Steffy-zxf 已提交
2712

2713
            import paddle
S
Steffy-zxf 已提交
2714 2715 2716 2717

            data = paddle.to_tensor([[0], [1]], dtype='float32')
            res = paddle.log1p(data)
            # [[0.], [0.6931472]]
2718 2719
    """

2720
    if in_dygraph_mode():
W
wanghuancoder 已提交
2721
        return _C_ops.log1p(x)
2722 2723 2724 2725 2726 2727 2728 2729
    else:
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p")
        inputs = {'X': [x]}
        helper = LayerHelper('log1p', **locals())
        dtype = helper.input_dtype(input_param_name='x')
        out = helper.create_variable_for_type_inference(dtype)
        helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
        return out
B
Bai Yifan 已提交
2730

2731

J
joejiong 已提交
2732
def log2(x, name=None):
2733
    r"""
J
joejiong 已提交
2734 2735 2736 2737
    Calculates the log to the base 2 of the given input tensor, element-wise.

    .. math::

2738
        Out = \log_2x
J
joejiong 已提交
2739 2740 2741

    Args:
        x (Tensor): Input tensor must be one of the following types: float32, float64.
2742
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
J
joejiong 已提交
2743 2744 2745 2746 2747 2748 2749 2750


    Returns:
        Tensor: The log to the base 2 of the input Tensor computed element-wise.

    Examples:

        .. code-block:: python
2751

J
joejiong 已提交
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769
            import paddle

            # example 1: x is a float
            x_i = paddle.to_tensor([[1.0], [2.0]])
            res = paddle.log2(x_i) # [[0.], [1.0]]

            # example 2: x is float32
            x_i = paddle.full(shape=[1], fill_value=2, dtype='float32')
            paddle.to_tensor(x_i)
            res = paddle.log2(x_i)
            print(res) # [1.0]

            # example 3: x is float64
            x_i = paddle.full(shape=[1], fill_value=2, dtype='float64')
            paddle.to_tensor(x_i)
            res = paddle.log2(x_i)
            print(res) # [1.0]
    """
2770
    if in_dygraph_mode():
W
wanghuancoder 已提交
2771
        return _C_ops.log2(x)
2772 2773 2774 2775 2776 2777 2778 2779 2780 2781
    else:
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], "log2"
        )
        inputs = {'X': [x]}
        helper = LayerHelper('log2', **locals())
        dtype = helper.input_dtype(input_param_name='x')
        out = helper.create_variable_for_type_inference(dtype)
        helper.append_op(type="log2", inputs={"X": x}, outputs={"Out": out})
        return out
W
WuHaobo 已提交
2782

J
joejiong 已提交
2783 2784

def log10(x, name=None):
2785
    r"""
J
joejiong 已提交
2786 2787 2788 2789
    Calculates the log to the base 10 of the given input tensor, element-wise.

    .. math::

2790
        Out = \log_10_x
J
joejiong 已提交
2791 2792 2793

    Args:
        x (Tensor): Input tensor must be one of the following types: float32, float64.
2794
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
J
joejiong 已提交
2795 2796 2797 2798 2799 2800 2801 2802


    Returns:
        Tensor: The log to the base 10 of the input Tensor computed element-wise.

    Examples:

        .. code-block:: python
2803

J
joejiong 已提交
2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
            import paddle

            # example 1: x is a float
            x_i = paddle.to_tensor([[1.0], [10.0]])
            res = paddle.log10(x_i) # [[0.], [1.0]]

            # example 2: x is float32
            x_i = paddle.full(shape=[1], fill_value=10, dtype='float32')
            paddle.to_tensor(x_i)
            res = paddle.log10(x_i)
            print(res) # [1.0]

            # example 3: x is float64
            x_i = paddle.full(shape=[1], fill_value=10, dtype='float64')
            paddle.to_tensor(x_i)
            res = paddle.log10(x_i)
            print(res) # [1.0]
    """
2822
    if in_dygraph_mode():
W
wanghuancoder 已提交
2823
        return _C_ops.log10(x)
2824 2825 2826 2827 2828 2829 2830 2831 2832 2833
    else:
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], "log10"
        )
        inputs = {'X': [x]}
        helper = LayerHelper('log10', **locals())
        dtype = helper.input_dtype(input_param_name='x')
        out = helper.create_variable_for_type_inference(dtype)
        helper.append_op(type="log10", inputs={"X": x}, outputs={"Out": out})
        return out
J
joejiong 已提交
2834 2835


Y
Yang Zhang 已提交
2836
def clip(x, min=None, max=None, name=None):
2837
    """
Y
Yang Zhang 已提交
2838
    This operator clip all elements in input into the range [ min, max ] and return
2839 2840 2841 2842
    a resulting tensor as the following equation:

    .. math::

2843
        Out = MIN(MAX(x, min), max)
2844 2845

    Args:
2846
        x (Tensor): An N-D Tensor with data type float32, float64, int32 or int64.
2847
        min (float|int|Tensor, optional): The lower bound with type ``float`` , ``int`` or a ``Tensor``
2848
            with shape [1] and type ``int32``, ``float32``, ``float64``.
2849
        max (float|int|Tensor, optional): The upper bound with type ``float``, ``int`` or a ``Tensor``
2850
            with shape [1] and type ``int32``, ``float32``, ``float64``.
2851
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
2852 2853

    Returns:
Y
Yang Zhang 已提交
2854
        Tensor: A Tensor with the same data type and data shape as input.
2855 2856 2857 2858 2859

    Examples:
        .. code-block:: python

            import paddle
N
Noel 已提交
2860

2861
            x1 = paddle.to_tensor([[1.2, 3.5], [4.5, 6.4]], 'float32')
Y
Yang Zhang 已提交
2862 2863
            out1 = paddle.clip(x1, min=3.5, max=5.0)
            out2 = paddle.clip(x1, min=2.5)
2864
            print(out1)
Y
Yang Zhang 已提交
2865 2866
            # [[3.5, 3.5]
            # [4.5, 5.0]]
2867
            print(out2)
Y
Yang Zhang 已提交
2868 2869
            # [[2.5, 3.5]
            # [[4.5, 6.4]
2870 2871
    """

2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
    x_dtype = str(x.dtype)
    if x_dtype == 'paddle.int32':
        min_ = np.iinfo(np.int32).min
        max_ = np.iinfo(np.int32).max - 2**7
    elif x_dtype == 'paddle.int64':
        min_ = np.iinfo(np.int64).min
        max_ = np.iinfo(np.int64).max - 2**39
    else:
        min_ = float(np.finfo(np.float32).min)
        max_ = float(np.finfo(np.float32).max)
2882

C
chentianyu03 已提交
2883 2884 2885 2886 2887 2888 2889
    if in_dygraph_mode():
        if isinstance(min, Variable):
            min = min.numpy().item(0)
        if isinstance(max, Variable):
            max = max.numpy().item(0)
        min = min_ if min is None else min
        max = max_ if max is None else max
2890
        return _C_ops.clip(x, min, max)
2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911
    else:
        if min is not None:
            check_type(min, 'min', (float, int, Variable), 'clip')
            if isinstance(min, Variable):
                check_dtype(
                    min.dtype,
                    'min',
                    ['float32', 'float64', 'int32'],
                    'clip',
                    '(When the type of min in clip is Variable.)',
                )
        if max is not None:
            check_type(max, 'max', (float, int, Variable), 'clip')
            if isinstance(max, Variable):
                check_dtype(
                    max.dtype,
                    'max',
                    ['float32', 'float64', 'int32'],
                    'clip',
                    '(When the type of max in clip is Variable.)',
                )
C
chentianyu03 已提交
2912

2913 2914 2915
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'clip'
        )
Y
Yang Zhang 已提交
2916

2917 2918
        inputs = {'X': x}
        attrs = {'min': min_, 'max': max_}
2919

2920 2921 2922 2923 2924
        if isinstance(min, Variable):
            min.stop_gradient = True
            inputs['Min'] = min
        elif min is not None:
            attrs['min'] = min
2925

2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938
        if isinstance(max, Variable):
            max.stop_gradient = True
            inputs['Max'] = max
        elif max is not None:
            attrs['max'] = max

        helper = LayerHelper('clip', **locals())
        output = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype('x')
        )
        helper.append_op(
            type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs
        )
2939

2940
        return output
F
Feiyu Chan 已提交
2941

W
WuHaobo 已提交
2942

2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956
@inplace_apis_in_dygraph_only
def clip_(x, min=None, max=None, name=None):
    """
    Inplace version of ``clip`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_tensor_clip`.
    """
    fmin = float(np.finfo(np.float32).min)
    fmax = float(np.finfo(np.float32).max)
    if isinstance(min, Variable):
        min = min.numpy().item(0)
    if isinstance(max, Variable):
        max = max.numpy().item(0)
    min = fmin if min is None else min
    max = fmax if max is None else max
C
chentianyu03 已提交
2957 2958

    if in_dygraph_mode():
2959
        return _C_ops.clip_(x, min, max)
C
chentianyu03 已提交
2960

2961

2962
def trace(x, offset=0, axis1=0, axis2=1, name=None):
L
Li Fuchen 已提交
2963
    """
S
swtkiwi 已提交
2964

2965
    Computes the sum along diagonals of the input tensor x.
2966 2967

    If ``x`` is 2D, returns the sum of diagonal.
L
Li Fuchen 已提交
2968

2969
    If ``x`` has larger dimensions, then returns an tensor of diagonals sum, diagonals be taken from
2970
    the 2D planes specified by axis1 and axis2. By default, the 2D planes formed by the first and second axes
2971
    of the input tensor x.
L
Li Fuchen 已提交
2972

2973
    The argument ``offset`` determines where diagonals are taken from input tensor x:
L
Li Fuchen 已提交
2974 2975 2976 2977

    - If offset = 0, it is the main diagonal.
    - If offset > 0, it is above the main diagonal.
    - If offset < 0, it is below the main diagonal.
2978
    - Note that if offset is out of input's shape indicated by axis1 and axis2, 0 will be returned.
2979

L
Li Fuchen 已提交
2980
    Args:
2981 2982 2983 2984 2985
        x (Tensor): The input tensor x. Must be at least 2-dimensional. The input data type should be float32, float64, int32, int64.
        offset (int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).
        axis1 (int, optional): The first axis with respect to take diagonal. Default: 0.
        axis2 (int, optional): The second axis with respect to take diagonal. Default: 1.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
L
Li Fuchen 已提交
2986 2987

    Returns:
2988
        Tensor: the output data type is the same as input data type.
L
Li Fuchen 已提交
2989 2990 2991 2992 2993

    Examples:
        .. code-block:: python

            import paddle
2994

2995 2996 2997
            case1 = paddle.randn([2, 3])
            case2 = paddle.randn([3, 10, 10])
            case3 = paddle.randn([3, 10, 5, 10])
2998 2999 3000
            data1 = paddle.trace(case1) # data1.shape = [1]
            data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]
            data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
L
Li Fuchen 已提交
3001
    """
3002

Z
zyfncg 已提交
3003
    def __check_input(x, offset, axis1, axis2):
3004 3005 3006 3007 3008 3009
        check_dtype(
            x.dtype,
            'Input',
            ['int32', 'int64', 'float16', 'float32', 'float64'],
            'trace',
        )
L
Li Fuchen 已提交
3010

3011
        input_shape = list(x.shape)
3012 3013 3014 3015
        assert len(input_shape) >= 2, (
            "The x must be at least 2-dimensional, "
            "But received Input x's dimensional: %s.\n" % len(input_shape)
        )
L
Li Fuchen 已提交
3016

3017 3018
        axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
        axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
L
Li Fuchen 已提交
3019

3020 3021
        assert (0 <= axis1_) and (axis1_ < len(input_shape)), (
            "The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n"
3022
            % (-(len(input_shape)), len(input_shape) - 1, axis1)
3023
        )
L
Li Fuchen 已提交
3024

3025 3026
        assert (0 <= axis2_) and (axis2_ < len(input_shape)), (
            "The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n"
3027
            % (-(len(input_shape)), len(input_shape) - 1, axis2)
3028
        )
L
Li Fuchen 已提交
3029

3030 3031 3032 3033
        assert axis1_ != axis2_, (
            "axis1 and axis2 cannot be the same axis."
            "But received axis1 = %d, axis2 = %d\n" % (axis1, axis2)
        )
L
Li Fuchen 已提交
3034

H
hong 已提交
3035
    if in_dygraph_mode():
3036
        return _C_ops.trace(x, offset, axis1, axis2)
3037 3038
    else:
        __check_input(x, offset, axis1, axis2)
H
hong 已提交
3039

3040 3041
        helper = LayerHelper('trace', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
L
Li Fuchen 已提交
3042

3043 3044 3045 3046 3047 3048 3049
        helper.append_op(
            type='trace',
            inputs={'Input': [x]},
            attrs={'offset': offset, 'axis1': axis1, 'axis2': axis2},
            outputs={'Out': [out]},
        )
        return out
L
Li Fuchen 已提交
3050

3051

3052 3053 3054 3055 3056
def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
    """
    This OP computes the diagonals of the input tensor x.

    If ``x`` is 2D, returns the diagonal.
3057
    If ``x`` has larger dimensions, diagonals be taken from the 2D planes specified by axis1 and axis2.
3058 3059 3060 3061 3062 3063 3064
    By default, the 2D planes formed by the first and second axis of the input tensor x.

    The argument ``offset`` determines where diagonals are taken from input tensor x:

    - If offset = 0, it is the main diagonal.
    - If offset > 0, it is above the main diagonal.
    - If offset < 0, it is below the main diagonal.
3065

3066
    Args:
3067 3068 3069 3070 3071
        x (Tensor): The input tensor x. Must be at least 2-dimensional. The input data type should be bool, int32, int64, float16, float32, float64.
        offset (int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).
        axis1 (int, optional): The first axis with respect to take diagonal. Default: 0.
        axis2 (int, optional): The second axis with respect to take diagonal. Default: 1.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114

    Returns:
        Tensor: a partial view of input tensor in specify two dimensions, the output data type is the same as input data type.

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.rand([2,2,3],'float32')
            print(x)
            # Tensor(shape=[2, 2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #        [[[0.45661032, 0.03751532, 0.90191704],
            #          [0.43760979, 0.86177313, 0.65221709]],

            #         [[0.17020577, 0.00259554, 0.28954273],
            #          [0.51795638, 0.27325270, 0.18117726]]])

            out1 = paddle.diagonal(x)
            print(out1)
            #Tensor(shape=[3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [[0.45661032, 0.51795638],
            #        [0.03751532, 0.27325270],
            #        [0.90191704, 0.18117726]])

            out2 = paddle.diagonal(x, offset=0, axis1=2, axis2=1)
            print(out2)
            #Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [[0.45661032, 0.86177313],
            #        [0.17020577, 0.27325270]])

            out3 = paddle.diagonal(x, offset=1, axis1=0, axis2=1)
            print(out3)
            #Tensor(shape=[3, 1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [[0.43760979],
            #        [0.86177313],
            #        [0.65221709]])

            out4 = paddle.diagonal(x, offset=0, axis1=1, axis2=2)
            print(out4)
            #Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [[0.45661032, 0.86177313],
            #        [0.17020577, 0.27325270]])
3115

3116
    """
J
Jiabin Yang 已提交
3117
    if in_dygraph_mode():
3118
        return _C_ops.diagonal(x, offset, axis1, axis2)
J
Jiabin Yang 已提交
3119
    else:
W
wanghuancoder 已提交
3120

3121 3122 3123 3124 3125 3126 3127
        def __check_input(x, offset, axis1, axis2):
            check_dtype(
                x.dtype,
                'Input',
                ['bool', 'int32', 'int64', 'float16', 'float32', 'float64'],
                'diagonal',
            )
3128

3129 3130 3131 3132 3133
            input_shape = list(x.shape)
            assert len(input_shape) >= 2, (
                "The x must be at least 2-dimensional, "
                "But received Input x's dimensional: %s.\n" % len(input_shape)
            )
3134

3135 3136
            axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
            axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
3137

3138 3139 3140 3141
            assert axis1_ < len(input_shape), (
                "The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n"
                % (-(len(input_shape)), len(input_shape) - 1, axis1)
            )
3142

3143 3144 3145 3146
            assert axis2_ < len(input_shape), (
                "The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n"
                % (-(len(input_shape)), len(input_shape) - 1, axis2)
            )
3147

3148 3149 3150 3151
            assert axis1_ != axis2_, (
                "axis1 and axis2 cannot be the same axis."
                "But received axis1 = %d, axis2 = %d\n" % (axis1, axis2)
            )
3152

3153 3154 3155
        __check_input(x, offset, axis1, axis2)
        helper = LayerHelper('diagonal', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
3156

3157 3158 3159 3160 3161 3162 3163
        helper.append_op(
            type='diagonal',
            inputs={'Input': [x]},
            attrs={'offset': offset, 'axis1': axis1, 'axis2': axis2},
            outputs={'Out': [out]},
        )
        return out
3164 3165


F
Feiyu Chan 已提交
3166
@templatedoc(op_type="kron")
W
WuHaobo 已提交
3167
def kron(x, y, name=None):
S
swtkiwi 已提交
3168 3169
    """

3170
    ${comment}
F
Feiyu Chan 已提交
3171 3172

    Args:
3173 3174
        x (Tensor): the fist operand of kron op, data type: float16, float32, float64, int32 or int64.
        y (Tensor): the second operand of kron op, data type: float16, float32, float64, int32 or int64. Its data type should be the same with x.
3175
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
F
Feiyu Chan 已提交
3176 3177

    Returns:
3178
        Tensor: The output of kron, data type: float16, float32, float64, int32 or int64. Its data is the same with x.
F
Feiyu Chan 已提交
3179 3180 3181

    Examples:
        .. code-block:: python
3182

3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193
            import paddle
            x = paddle.to_tensor([[1, 2], [3, 4]], dtype='int64')
            y = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='int64')
            out = paddle.kron(x, y)
            print(out)
            #        [[1, 2, 3, 2, 4, 6],
            #         [ 4,  5,  6,  8, 10, 12],
            #         [ 7,  8,  9, 14, 16, 18],
            #         [ 3,  6,  9,  4,  8, 12],
            #         [12, 15, 18, 16, 20, 24],
            #         [21, 24, 27, 28, 32, 36]])
F
Feiyu Chan 已提交
3194
    """
3195
    if in_dygraph_mode():
3196 3197 3198 3199 3200 3201 3202 3203 3204
        return _legacy_C_ops.kron(x, y)
    else:
        helper = LayerHelper('kron', **locals())
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron'
        )
        check_variable_and_dtype(
            y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron'
        )
F
Feiyu Chan 已提交
3205

3206 3207 3208 3209 3210
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out}
        )
        return out
3211 3212 3213 3214


def cumsum(x, axis=None, dtype=None, name=None):
    """
3215 3216
    The cumulative sum of the elements along a given axis.

3217
    Note:
3218
        The first element of the result is the same as the first element of the input.
3219 3220

    Args:
3221
        x (Tensor): The input tensor needed to be cumsumed.
3222
        axis (int, optional): The dimension to accumulate along. -1 means the last dimension. The default (None) is to compute the cumsum over the flattened array.
3223
        dtype (str, optional): The data type of the output tensor, can be float16, float32, float64, int32, int64. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None.
3224 3225 3226
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
3227
        Tensor, the result of cumsum operator.
3228 3229 3230

    Examples:
        .. code-block:: python
3231

3232
            import paddle
3233

3234 3235
            data = paddle.arange(12)
            data = paddle.reshape(data, (3, 4))
3236 3237 3238 3239 3240 3241 3242 3243

            y = paddle.cumsum(data)
            # [ 0  1  3  6 10 15 21 28 36 45 55 66]

            y = paddle.cumsum(data, axis=0)
            # [[ 0  1  2  3]
            #  [ 4  6  8 10]
            #  [12 15 18 21]]
3244

3245 3246 3247 3248 3249 3250 3251
            y = paddle.cumsum(data, axis=-1)
            # [[ 0  1  3  6]
            #  [ 4  9 15 22]
            #  [ 8 17 27 38]]

            y = paddle.cumsum(data, dtype='float64')
            print(y.dtype)
3252
            # paddle.float64
3253 3254 3255 3256 3257 3258
    """
    if axis is None:
        flatten = True
    else:
        flatten = False
    if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
Z
zhiboniu 已提交
3259
        x = cast(x, dtype)
3260

H
hong 已提交
3261
    if in_dygraph_mode():
3262 3263
        if axis is None:
            axis = -1
3264
        return _C_ops.cumsum(x, axis, flatten, False, False)
3265 3266 3267 3268 3269 3270 3271 3272 3273
    else:
        check_type(x, 'x', (Variable), 'cumsum')
        locals_var = locals().copy()
        kwargs = dict()
        for name, val in locals_var.items():
            if val is not None:
                kwargs[name] = val
        _cum_sum_ = generate_layer_fn('cumsum')
        return _cum_sum_(**kwargs)
G
guofei 已提交
3274

3275 3276 3277

def logcumsumexp(x, axis=None, dtype=None, name=None):
    r"""
3278
    The logarithm of the cumulative summation of the exponentiation of the elements along a given axis.
3279 3280 3281 3282 3283 3284

    For summation index j given by `axis` and other indices i, the result is

    .. math::

        logcumsumexp(x)_{ij} = log \sum_{i=0}^{j}exp(x_{ij})
3285

3286 3287 3288 3289 3290 3291
    Note:
        The first element of the result is the same as the first element of the input.

    Args:
        x (Tensor): The input tensor.
        axis (int, optional): The dimension to do the operation along. -1 means the last dimension. The default (None) is to compute the cumsum over the flattened array.
3292
        dtype (str, optional): The data type of the output tensor, can be float16, float32, float64. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None.
3293 3294 3295
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
3296
        Tensor, the result of logcumsumexp operator.
3297 3298 3299

    Examples:
        .. code-block:: python
3300

3301
            import paddle
3302

3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313
            data = paddle.arange(12, dtype='float64')
            data = paddle.reshape(data, (3, 4))

            y = paddle.logcumsumexp(data)
            # [ 0.         1.3132617  2.4076061  3.4401898  4.4519143  5.4561934
            #   6.4577627  7.4583397  8.458551   9.45863   10.458658  11.458669 ]

            y = paddle.logcumsumexp(data, axis=0)
            # [[ 0.        1.        2.        3.      ]
            #  [ 4.01815   5.01815   6.01815   7.01815 ]
            #  [ 8.018479  9.018479 10.018479 11.018479]]
3314

3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331
            y = paddle.logcumsumexp(data, axis=-1)
            # [[ 0.         1.3132617  2.4076061  3.4401898]
            #  [ 4.         5.3132615  6.407606   7.44019  ]
            #  [ 8.         9.313262  10.407606  11.440189 ]]

            y = paddle.logcumsumexp(data, dtype='float64')
            print(y.dtype)
            # paddle.float64
    """
    if axis is None:
        flatten = True
    else:
        flatten = False
    if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
        x = cast(x, dtype)

    if in_dygraph_mode():
3332 3333
        if axis is None:
            axis = -1
3334
        return _C_ops.logcumsumexp(x, axis, flatten, False, False)
3335 3336 3337 3338
    else:
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], "logcumsumexp"
        )
3339

3340 3341 3342 3343 3344 3345 3346 3347 3348
        helper = LayerHelper('logcumsumexp', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(
            type='logcumsumexp',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'axis': axis, 'flatten': flatten},
        )
        return out
3349 3350


H
hlygit66666 已提交
3351 3352 3353 3354
def cumprod(x, dim=None, dtype=None, name=None):
    """
    Compute the cumulative product of the input tensor x along a given dimension dim.

3355 3356
    Note:
        The first element of the result is the same as the first element of the input.
H
hlygit66666 已提交
3357 3358 3359

    Args:
        x (Tensor): the input tensor need to be cumproded.
Z
Zman 已提交
3360 3361 3362 3363 3364 3365 3366
        dim (int, optional): the dimension along which the input tensor will be accumulated. It need to be in the range of [-x.rank, x.rank),
                    where x.rank means the dimensions of the input tensor x and -1 means the last dimension.
        dtype (str, optional): The data type of the output tensor, can be float32, float64, int32, int64, complex64,
                    complex128. If specified, the input tensor is casted to dtype before the operation is performed.
                    This is useful for preventing data type overflows. The default value is None.
        name (str, optional): Name for the operation (optional, default is None). For more information,
                    please refer to :ref:`api_guide_Name`.
H
hlygit66666 已提交
3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402

    Returns:
        Tensor, the result of cumprod operator.

    Examples:
        .. code-block:: python

            import paddle

            data = paddle.arange(12)
            data = paddle.reshape(data, (3, 4))
            # [[ 0  1  2  3 ]
            #  [ 4  5  6  7 ]
            #  [ 8  9  10 11]]

            y = paddle.cumprod(data, dim=0)
            # [[ 0  1   2   3]
            #  [ 0  5  12  21]
            #  [ 0 45 120 231]]

            y = paddle.cumprod(data, dim=-1)
            # [[ 0   0   0    0]
            #  [ 4  20 120  840]
            #  [ 8  72 720 7920]]

            y = paddle.cumprod(data, dim=1, dtype='float64')
            # [[ 0.   0.   0.    0.]
            #  [ 4.  20. 120.  840.]
            #  [ 8.  72. 720. 7920.]]

            print(y.dtype)
            # paddle.float64

    """

    if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
Z
zhiboniu 已提交
3403
        x = cast(x, dtype)
H
hlygit66666 已提交
3404

3405
    if in_dygraph_mode():
3406
        return _C_ops.cumprod(x, dim)
3407 3408 3409 3410 3411 3412 3413 3414
    else:
        check_variable_and_dtype(
            x,
            "x",
            ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'],
            'cumprod',
        )
        check_type(dim, 'dim', int, 'cumprod')
H
hlygit66666 已提交
3415

3416 3417 3418 3419 3420 3421 3422 3423 3424
        helper = LayerHelper('cumprod', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(
            type='cumprod',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': dim},
        )
        return out
H
hlygit66666 已提交
3425

3426

J
Jack Zhou 已提交
3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442
def isfinite(x, name=None):
    """

    Return whether every element of input tensor is finite number or not.

    Args:
        x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        `Tensor`, the bool result which shows every element of `x` whether it is finite number or not.

    Examples:
        .. code-block:: python

            import paddle
N
Noel 已提交
3443

3444
            x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
C
Chen Long 已提交
3445
            out = paddle.isfinite(x)
N
Noel 已提交
3446
            print(out)  # [False  True  True False  True False False]
J
Jack Zhou 已提交
3447
    """
H
hong 已提交
3448
    if in_dygraph_mode():
3449
        return _C_ops.isfinite(x)
3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462
    else:
        helper = LayerHelper("isfinite_v2", **locals())
        check_variable_and_dtype(
            x,
            'x',
            ['float16', 'float32', 'float64', 'int32', 'int64'],
            'isfinite',
        )
        out = helper.create_variable_for_type_inference('bool')
        helper.append_op(
            type="isfinite_v2", inputs={"X": x}, outputs={"Out": out}
        )
        return out
J
Jack Zhou 已提交
3463

3464

J
Jack Zhou 已提交
3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480
def isinf(x, name=None):
    """

    Return whether every element of input tensor is `+/-INF` or not.

    Args:
        x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        `Tensor`, the bool result which shows every element of `x` whether it is `+/-INF` or not.

    Examples:
        .. code-block:: python

            import paddle
C
Chen Long 已提交
3481

3482
            x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
C
Chen Long 已提交
3483
            out = paddle.isinf(x)
N
Noel 已提交
3484
            print(out)  # [ True False False  True False False False]
J
Jack Zhou 已提交
3485
    """
H
hong 已提交
3486
    if in_dygraph_mode():
3487
        return _C_ops.isinf(x)
3488 3489 3490 3491 3492 3493 3494 3495
    else:
        helper = LayerHelper("isinf_v2", **locals())
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf'
        )
        out = helper.create_variable_for_type_inference(dtype='bool')
        helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out})
        return out
J
Jack Zhou 已提交
3496

3497

J
Jack Zhou 已提交
3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513
def isnan(x, name=None):
    """

    Return whether every element of input tensor is `NaN` or not.

    Args:
        x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        `Tensor`, the bool result which shows every element of `x` whether it is `NaN` or not.

    Examples:
        .. code-block:: python

            import paddle
3514

3515
            x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
C
Chen Long 已提交
3516
            out = paddle.isnan(x)
N
Noel 已提交
3517
            print(out)  # [False False False False False  True  True]
J
Jack Zhou 已提交
3518
    """
H
hong 已提交
3519
    if in_dygraph_mode():
3520
        return _C_ops.isnan(x)
3521 3522 3523 3524 3525 3526 3527 3528
    else:
        helper = LayerHelper("isnan_v2", **locals())
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan'
        )
        out = helper.create_variable_for_type_inference(dtype='bool')
        helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out})
        return out
J
Jack Zhou 已提交
3529 3530


G
guofei 已提交
3531 3532 3533 3534 3535
def prod(x, axis=None, keepdim=False, dtype=None, name=None):
    """
    Compute the product of tensor elements over the given axis.

    Args:
3536
        x (Tensor): The input tensor, its data type should be float32, float64, int32, int64.
3537 3538 3539
        axis (int|list|tuple, optional): The axis along which the product is computed. If :attr:`None`,
            multiply all elements of `x` and return a Tensor with a single element,
            otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`,
G
guofei 已提交
3540
            the axis to reduce is :math:`x.ndim + axis[i]`. Default is None.
3541
        keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result
3542
            tensor will have one fewer dimension than the input unless `keepdim` is true. Default is False.
3543 3544 3545
        dtype (str|np.dtype, optional): The desired date type of returned tensor, can be float32, float64,
            int32, int64. If specified, the input tensor is casted to dtype before operator performed.
            This is very useful for avoiding data type overflows. The default value is None, the dtype
G
guofei 已提交
3546
            of output is the same as input Tensor `x`.
3547
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
G
guofei 已提交
3548 3549 3550

    Returns:
        Tensor, result of product on the specified dim of input tensor.
3551

G
guofei 已提交
3552 3553 3554 3555 3556 3557
    Examples:
        .. code-block:: python

            import paddle

            # the axis is a int element
3558 3559
            x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
                                  [0.1, 0.2, 0.6, 0.7]])
G
guofei 已提交
3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575
            out1 = paddle.prod(x)
            # [0.0002268]

            out2 = paddle.prod(x, -1)
            # [0.027  0.0084]

            out3 = paddle.prod(x, 0)
            # [0.02 0.06 0.3  0.63]

            out4 = paddle.prod(x, 0, keepdim=True)
            # [[0.02 0.06 0.3  0.63]]

            out5 = paddle.prod(x, 0, dtype='int64')
            # [0 0 0 0]

            # the axis is list
3576 3577
            y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
                                  [[5.0, 6.0], [7.0, 8.0]]])
G
guofei 已提交
3578 3579 3580 3581 3582 3583 3584 3585
            out6 = paddle.prod(y, [0, 1])
            # [105. 384.]

            out7 = paddle.prod(y, (1, 2))
            # [  24. 1680.]

    """
    if dtype is not None:
3586 3587 3588
        check_dtype(
            dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod'
        )
G
guofei 已提交
3589
        if x.dtype != convert_np_dtype_to_dtype_(dtype):
Z
zhiboniu 已提交
3590
            x = cast(x, dtype)
G
guofei 已提交
3591

3592
    reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
3593
    if in_dygraph_mode():
3594
        return _C_ops.prod(x, axis, keepdim, reduce_all)
3595 3596 3597 3598 3599 3600 3601
    else:
        helper = LayerHelper('reduce_prod', **locals())
        check_variable_and_dtype(
            x,
            'x/input',
            ['float32', 'float64', 'int32', 'int64'],
            'reduce_prod',
3602
        )
3603 3604 3605 3606 3607 3608 3609 3610 3611 3612
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype()
        )
        helper.append_op(
            type='reduce_prod',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all},
        )
        return out
W
WangXi 已提交
3613 3614 3615 3616


def sign(x, name=None):
    """
3617
    Returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
W
WangXi 已提交
3618 3619

    Args:
3620 3621
        x (Tensor): The input tensor. The data type can be float16, float32 or float64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
W
WangXi 已提交
3622 3623 3624 3625 3626 3627 3628 3629 3630

    Returns:
        Tensor: The output sign tensor with identical shape and data type to the input :attr:`x`.

    Examples:
        .. code-block:: python

          import paddle

3631
          x = paddle.to_tensor([3.0, 0.0, -2.0, 1.7], dtype='float32')
W
WangXi 已提交
3632 3633 3634
          out = paddle.sign(x=x)
          print(out)  # [1.0, 0.0, -1.0, 1.0]
    """
H
hong 已提交
3635
    if in_dygraph_mode():
3636
        return _C_ops.sign(x)
3637 3638 3639 3640 3641 3642
    else:
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], 'sign'
        )
        helper = LayerHelper("sign", **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
H
hong 已提交
3643

3644
        helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})
W
WangXi 已提交
3645

3646
        return out
W
WangXi 已提交
3647 3648 3649


def tanh(x, name=None):
3650
    r"""
W
WangXi 已提交
3651 3652 3653
    Tanh Activation Operator.

    .. math::
3654
        out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}
W
WangXi 已提交
3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668

    Args:
        x (Tensor): Input of Tanh operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Output of Tanh operator, a Tensor with same data type and shape as input.

    Examples:

        .. code-block:: python

            import paddle

3669
            x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
W
WangXi 已提交
3670
            out = paddle.tanh(x)
N
Noel 已提交
3671
            print(out)
W
WangXi 已提交
3672 3673
            # [-0.37994896 -0.19737532  0.09966799  0.29131261]
    """
H
hong 已提交
3674
    if in_dygraph_mode():
3675
        return _C_ops.tanh(x)
3676 3677 3678 3679 3680 3681 3682 3683 3684
    else:
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], 'tanh'
        )
        check_type(x, 'x', (Variable), 'tanh')
        helper = LayerHelper('tanh', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out})
        return out
S
Steffy-zxf 已提交
3685

3686

3687
@inplace_apis_in_dygraph_only
3688 3689 3690 3691 3692
def tanh_(x, name=None):
    r"""
    Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_tensor_tanh`.
    """
3693
    return _C_ops.tanh_(x)
3694 3695


S
Steffy-zxf 已提交
3696 3697
def increment(x, value=1.0, name=None):
    """
3698
    The API is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`.
S
Steffy-zxf 已提交
3699 3700 3701 3702
    Notice that the number of elements in :attr:`x` must be equal to 1.

    Args:
        x (Tensor): A tensor that must always contain only one element, its data type supports float32, float64, int32 and int64.
3703
        value (float, optional): The amount to increment the data of :attr:`x`. Default: 1.0.
S
Steffy-zxf 已提交
3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, the elementwise-incremented tensor with the same shape and data type as :attr:`x`.

    Examples:
        .. code-block:: python

            import paddle

            data = paddle.zeros(shape=[1], dtype='float32')
            counter = paddle.increment(data)
            # [1.]

    """
H
hong 已提交
3719
    if in_dygraph_mode():
3720
        return _C_ops.increment_(x, value)
3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732
    else:
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'increment'
        )
        helper = LayerHelper("increment", **locals())
        helper.append_op(
            type='increment',
            inputs={'X': [x]},
            outputs={'Out': [x]},
            attrs={'step': float(value)},
        )
        return x
3733 3734 3735 3736


def all(x, axis=None, keepdim=False, name=None):
    """
3737
    Computes the ``logical and`` of tensor elements over the given dimension.
3738 3739 3740 3741 3742

    Args:
        x (Tensor): An N-D Tensor, the input data type should be `bool`.
        axis (int|list|tuple, optional): The dimensions along which the ``logical and`` is compute. If
            :attr:`None`, and all elements of :attr:`x` and return a
N
Noel 已提交
3743
            Tensor with a single element, otherwise must be in the
3744 3745 3746 3747 3748 3749
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
            value is False.
3750
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
3751 3752 3753 3754 3755 3756 3757 3758

    Returns:
        Tensor: Results the ``logical and`` on the specified axis of input Tensor `x`,  it's data type is bool.

    Examples:
        .. code-block:: python

            import paddle
C
Chen Long 已提交
3759

N
Noel 已提交
3760
            # x is a bool Tensor with following elements:
3761 3762
            #    [[True, False]
            #     [True, True]]
C
Chen Long 已提交
3763
            x = paddle.to_tensor([[1, 0], [1, 1]], dtype='int32')
3764
            print(x)
S
syyxsxx 已提交
3765
            x = paddle.cast(x, 'bool')
C
Chen Long 已提交
3766

3767 3768 3769
            # out1 should be [False]
            out1 = paddle.all(x)  # [False]
            print(out1)
C
Chen Long 已提交
3770

3771 3772 3773
            # out2 should be [True, False]
            out2 = paddle.all(x, axis=0)  # [True, False]
            print(out2)
C
Chen Long 已提交
3774 3775

            # keepdim=False, out3 should be [False, True], out.shape should be (2,)
3776 3777
            out3 = paddle.all(x, axis=-1)  # [False, True]
            print(out3)
C
Chen Long 已提交
3778 3779 3780

            # keepdim=True, out4 should be [[False], [True]], out.shape should be (2,1)
            out4 = paddle.all(x, axis=1, keepdim=True) # [[False], [True]]
3781
            print(out4)
3782

3783
    """
3784
    if in_dygraph_mode():
3785
        return _C_ops.all(x, axis, keepdim)
3786 3787 3788 3789 3790 3791 3792 3793
    else:
        reduce_all, axis = _get_reduce_axis(axis, x)
        attrs = {
            'dim': axis,
            'keep_dim': keepdim,
            'reduce_all': reduce_all,
        }
        check_variable_and_dtype(x, 'x', ['bool'], 'all')
3794

3795
        check_type(axis, 'axis', (int, list, tuple, type(None)), 'all')
3796

3797 3798 3799 3800 3801 3802 3803 3804 3805
        helper = LayerHelper('all', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_all',
            inputs={'X': x},
            outputs={'Out': out},
            attrs=attrs,
        )
        return out
3806 3807 3808 3809


def any(x, axis=None, keepdim=False, name=None):
    """
C
Chen Long 已提交
3810
    Computes the ``logical or`` of tensor elements over the given dimension, and return the result.
3811 3812 3813 3814 3815

    Args:
        x (Tensor): An N-D Tensor, the input data type should be `bool`.
        axis (int|list|tuple, optional): The dimensions along which the ``logical or`` is compute. If
            :attr:`None`, and all elements of :attr:`x` and return a
N
Noel 已提交
3816
            Tensor with a single element, otherwise must be in the
3817 3818 3819 3820 3821 3822
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
            value is False.
3823
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
3824 3825 3826 3827 3828 3829 3830 3831

    Returns:
        Tensor: Results the ``logical or`` on the specified axis of input Tensor `x`,  it's data type is bool.

    Examples:
        .. code-block:: python

            import paddle
C
Chen Long 已提交
3832 3833 3834

            x = paddle.to_tensor([[1, 0], [1, 1]], dtype='int32')
            x = paddle.assign(x)
3835
            print(x)
S
syyxsxx 已提交
3836
            x = paddle.cast(x, 'bool')
C
Chen Long 已提交
3837 3838 3839 3840
            # x is a bool Tensor with following elements:
            #    [[True, False]
            #     [True, True]]

3841 3842 3843
            # out1 should be [True]
            out1 = paddle.any(x)  # [True]
            print(out1)
C
Chen Long 已提交
3844

3845 3846
            # out2 should be [True, True]
            out2 = paddle.any(x, axis=0)  # [True, True]
3847
            print(out2)
C
Chen Long 已提交
3848 3849

            # keepdim=False, out3 should be [True, True], out.shape should be (2,)
3850
            out3 = paddle.any(x, axis=-1)  # [True, True]
3851
            print(out3)
C
Chen Long 已提交
3852 3853 3854

            # keepdim=True, result should be [[True], [True]], out.shape should be (2,1)
            out4 = paddle.any(x, axis=1, keepdim=True)  # [[True], [True]]
3855 3856
            print(out4)

3857
    """
3858
    if in_dygraph_mode():
3859
        return _C_ops.any(x, axis, keepdim)
3860 3861 3862 3863 3864 3865 3866
    else:
        reduce_all, axis = _get_reduce_axis(axis, x)
        attrs = {
            'dim': axis,
            'keep_dim': keepdim,
            'reduce_all': reduce_all,
        }
3867

3868
        check_variable_and_dtype(x, 'x', ['bool'], 'any')
3869

3870
        check_type(axis, 'axis', (int, list, tuple, type(None)), 'any')
3871

3872 3873 3874 3875 3876 3877 3878 3879 3880
        helper = LayerHelper('any', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type='reduce_any',
            inputs={'X': x},
            outputs={'Out': out},
            attrs=attrs,
        )
        return out
L
Leo Chen 已提交
3881

3882

L
Leo Chen 已提交
3883 3884
def broadcast_shape(x_shape, y_shape):
    """
I
Infinity_lee 已提交
3885 3886 3887 3888 3889 3890
    The function returns the shape of doing operation with broadcasting on tensors of x_shape and y_shape.

    Note:
        If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
L
Leo Chen 已提交
3891 3892 3893 3894

    Args:
        x_shape (list[int]|tuple[int]): A shape of tensor.
        y_shape (list[int]|tuple[int]): A shape of tensor.
3895

L
Leo Chen 已提交
3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906

    Returns:
        list[int], the result shape.

    Examples:
        .. code-block:: python

            import paddle

            shape = paddle.broadcast_shape([2, 1, 3], [1, 3, 1])
            # [2, 3, 3]
3907

L
Leo Chen 已提交
3908 3909 3910 3911 3912 3913
            # shape = paddle.broadcast_shape([2, 1, 3], [3, 3, 1])
            # ValueError (terminated with error message).

    """

    return core.broadcast_shape(x_shape, y_shape)
3914

3915

3916 3917 3918 3919 3920
def conj(x, name=None):
    r"""
    This function computes the conjugate of the Tensor elementwisely.

    Args:
3921
        x (Tensor): The input Tensor which hold the complex numbers.
3922
            Optional data types are: complex64, complex128, float32, float64, int32 or int64.
3923
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
3924 3925

    Returns:
C
Chen Long 已提交
3926
        out (Tensor): The conjugate of input. The shape and data type is the same with input. If the elements of tensor is real type such as float32, float64, int32 or int64, the out is the same with input.
3927 3928 3929 3930 3931

    Examples:
        .. code-block:: python

          import paddle
3932

3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943
          data=paddle.to_tensor([[1+1j, 2+2j, 3+3j], [4+4j, 5+5j, 6+6j]])
          #Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
          #       [[(1+1j), (2+2j), (3+3j)],
          #        [(4+4j), (5+5j), (6+6j)]])

          conj_data=paddle.conj(data)
          #Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
          #       [[(1-1j), (2-2j), (3-3j)],
          #        [(4-4j), (5-5j), (6-6j)]])

    """
H
hong 已提交
3944
    if in_dygraph_mode():
3945
        return _C_ops.conj(x)
3946 3947 3948 3949 3950 3951 3952
    else:
        check_variable_and_dtype(
            x,
            "x",
            ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'],
            'conj',
        )
H
hong 已提交
3953

3954 3955 3956 3957
        helper = LayerHelper('conj', **locals())
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype()
        )
3958

3959 3960
        helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]})
        return out
3961

3962

Z
zyfncg 已提交
3963 3964 3965 3966 3967 3968 3969 3970 3971
def digamma(x, name=None):
    r"""
    Calculates the digamma of the given input tensor, element-wise.

    .. math::
        Out = \Psi(x) = \frac{ \Gamma^{'}(x) }{ \Gamma(x) }

    Args:
        x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
3972
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Z
zyfncg 已提交
3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988
    Returns:
        Tensor, the digamma of the input Tensor, the shape and data type is the same with input.

    Examples:
        .. code-block:: python

            import paddle

            data = paddle.to_tensor([[1, 1.5], [0, -2.2]], dtype='float32')
            res = paddle.digamma(data)
            print(res)
            # Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [[-0.57721591,  0.03648996],
            #        [ nan       ,  5.32286835]])
    """

J
Jiabin Yang 已提交
3989
    if in_dygraph_mode():
3990
        return _C_ops.digamma(x)
J
Jiabin Yang 已提交
3991
    else:
3992 3993 3994 3995 3996
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'digamma')
        helper = LayerHelper('digamma', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(type='digamma', inputs={'X': x}, outputs={'Out': out})
        return out
Z
zyfncg 已提交
3997

3998

3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025
def lgamma(x, name=None):
    r"""
    Calculates the lgamma of the given input tensor, element-wise.

    This operator performs elementwise lgamma for input $X$.
    :math:`out = log\Gamma(x)`


    Args:
        x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, the lgamma of the input Tensor, the shape and data type is the same with input.

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            out = paddle.lgamma(x)
            print(out)
            # [1.31452441, 1.76149750, 2.25271273, 1.09579802]
    """
    if in_dygraph_mode():
        return _C_ops.lgamma(x)
4026 4027 4028 4029 4030 4031
    else:
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'lgamma')
        helper = LayerHelper('lgamma', **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(type='lgamma', inputs={'X': x}, outputs={'Out': out})
        return out
4032 4033


4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055
def neg(x, name=None):
    """
    This function computes the negative of the Tensor elementwisely.

    Args:
        x (Tensor): Input of neg operator, an N-D Tensor, with data type float32, float64, int8, int16, int32, or int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): The negative of input Tensor. The shape and data type are the same with input Tensor.

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
            out = paddle.neg(x)
            print(out)
            # [0.4 0.2 -0.1 -0.3]
    """

4056 4057 4058
    return scale(
        x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name
    )
4059

R
ronnywang 已提交
4060

4061
def atan2(x, y, name=None):
R
ronnywang 已提交
4062
    r"""
4063
    Element-wise arctangent of x/y with consideration of the quadrant.
R
ronnywang 已提交
4064 4065 4066 4067

    Equation:
        .. math::

4068 4069 4070 4071 4072 4073 4074 4075
            atan2(x,y)=\left\{\begin{matrix}
            & tan^{-1}(\frac{x}{y}) & y > 0 \\
            & tan^{-1}(\frac{x}{y}) + \pi & x>=0, y < 0 \\
            & tan^{-1}(\frac{x}{y}) - \pi & x<0, y < 0 \\
            & +\frac{\pi}{2} & x>0, y = 0 \\
            & -\frac{\pi}{2} & x<0, y = 0 \\
            &\text{undefined} & x=0, y = 0
            \end{matrix}\right.
R
ronnywang 已提交
4076 4077

    Args:
4078 4079
        x (Tensor): An N-D Tensor, the data type is int32, int64, float16, float32, float64.
        y (Tensor): An N-D Tensor, must have the same type as `x`.
R
ronnywang 已提交
4080 4081 4082 4083 4084 4085 4086 4087
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float64 when the input data type is int).

    Examples:
        .. code-block:: python

4088
            import paddle
R
ronnywang 已提交
4089

4090 4091 4092
            x = paddle.to_tensor([-1, +1, +1, -1]).astype('float32')
            #Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [-1,  1,  1, -1])
R
ronnywang 已提交
4093

4094 4095 4096
            y = paddle.to_tensor([-1, -1, +1, +1]).astype('float32')
            #Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [-1,  -1,  1, 1])
R
ronnywang 已提交
4097

4098 4099 4100
            out = paddle.atan2(x, y)
            #Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [-2.35619450,  2.35619450,  0.78539819, -0.78539819])
R
ronnywang 已提交
4101 4102 4103

    """

J
Jiabin Yang 已提交
4104
    if in_dygraph_mode():
4105
        return _C_ops.atan2(x, y)
R
ronnywang 已提交
4106
    else:
4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118
        check_variable_and_dtype(
            x,
            'x',
            ['int32', 'int64', 'float16', 'float32', 'float64'],
            'atan2',
        )
        check_variable_and_dtype(
            y,
            'y',
            ['int32', 'int64', 'float16', 'float32', 'float64'],
            'atan2',
        )
R
ronnywang 已提交
4119

4120 4121 4122 4123 4124
        helper = LayerHelper('atan2', **locals())
        inputs = {'X1': x, 'X2': y}
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='atan2', inputs=inputs, outputs={'Out': out})
        return out
A
andyjpaddle 已提交
4125

4126

W
wangzhen38 已提交
4127 4128 4129 4130 4131
def logit(x, eps=None, name=None):
    r"""
    This function generates a new tensor with the logit of the elements of input x. x is clamped to [eps, 1-eps] when eps is not zero. When eps is zero and x < 0 or x > 1, the function will yields NaN.

    .. math::
4132

W
wangzhen38 已提交
4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163
        logit(x) = ln(\frac{x}{1 - x})

    where

    .. math::

        x_i=
            \left\{\begin{array}{rcl}
                x_i & &\text{if } eps == Default \\
                eps & &\text{if } x_i < eps \\
                x_i & &\text{if } eps <= x_i <= 1-eps \\
                1-eps & &\text{if } x_i > 1-eps
            \end{array}\right.

    Args:
        x (Tensor): The input Tensor with data type float32, float64.
        eps (float, optional):  the epsilon for input clamp bound. Default is None.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out(Tensor): A Tensor with the same data type and shape as ``x`` .

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.to_tensor([0.2635, 0.0106, 0.2780, 0.2097, 0.8095])
            out1 = paddle.logit(x)
            print(out1)
4164
            # [-1.0277, -4.5365, -0.9544, -1.3269,  1.4468]
W
wangzhen38 已提交
4165 4166

    """
4167
    if eps is None:
W
wangzhen38 已提交
4168
        eps = 0.0
4169
    if in_dygraph_mode():
4170
        return _C_ops.logit(x, eps)
4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183
    else:
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], 'logit'
        )
        helper = LayerHelper("logit", **locals())
        out = helper.create_variable_for_type_inference(x.dtype)
        helper.append_op(
            type='logit',
            inputs={'X': x},
            outputs={'Out': out},
            attrs={'eps': eps},
        )
        return out
W
wangzhen38 已提交
4184

4185

4186 4187 4188 4189 4190 4191 4192 4193 4194 4195
def lerp(x, y, weight, name=None):
    r"""
    Does a linear interpolation between x and y based on weight.

    Equation:
        .. math::

            lerp(x, y, weight) = x + weight * (y - x).

    Args:
4196 4197 4198
        x (Tensor): An N-D Tensor with starting points, the data type is float32, float64.
        y (Tensor): An N-D Tensor with ending points, the data type is float32, float64.
        weight (float|Tensor): The weight for the interpolation formula. When weight is Tensor, the data type is float32, float64.
4199 4200 4201 4202 4203 4204 4205 4206 4207
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the shape and data type is the same with input.

    Example:
        .. code-block:: python

            import paddle
4208

4209 4210 4211
            x = paddle.arange(1., 5., dtype='float32')
            y = paddle.empty([4], dtype='float32')
            y.fill_(10.)
4212
            out = paddle.lerp(x, y, 0.5)
4213
            # out: [5.5, 6., 6.5, 7.]
4214 4215

    """
H
hong 已提交
4216 4217 4218 4219
    if in_dygraph_mode():
        if isinstance(weight, float):
            weight = paddle.to_tensor(weight, dtype=x.dtype)

4220
        return _C_ops.lerp(x, y, weight)
4221
    else:
4222
        if isinstance(weight, float):
4223
            weight = paddle.full(shape=[1], fill_value=weight, dtype=x.dtype)
4224

4225 4226 4227 4228 4229
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'lerp')
        check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'lerp')
        check_variable_and_dtype(
            weight, 'weight', ['float32', 'float64'], 'lerp'
        )
4230

4231 4232 4233 4234 4235
        helper = LayerHelper('lerp', **locals())
        inputs = {'X': x, 'Y': y, 'Weight': weight}
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='lerp', inputs=inputs, outputs={'Out': out})
        return out
4236

4237

4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250
@inplace_apis_in_dygraph_only
def lerp_(x, y, weight, name=None):
    r"""
    Inplace version of ``lerp`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_tensor_lerp`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')
    if isinstance(weight, float):
        weight = paddle.to_tensor([weight], dtype=x.dtype)
    elif isinstance(weight, (paddle.Tensor, Variable)):
        out_shape = broadcast_shape(out_shape, weight.shape)
    if out_shape != x.shape:
4251
        raise ValueError(
4252 4253 4254 4255
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
4256
    return _C_ops.lerp_(x, y, weight)
4257

4258

W
wuhuanzhou 已提交
4259 4260
def erfinv(x, name=None):
    r"""
4261
    The inverse error function of x. Please refer to :ref:`api_paddle_erf`
W
wuhuanzhou 已提交
4262 4263 4264 4265 4266 4267 4268 4269 4270 4271

        .. math::

            erfinv(erf(x)) = x.

    Args:
        x (Tensor): An N-D Tensor, the data type is float32, float64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
4272
        out (Tensor), an N-D Tensor, the shape and data type is the same with input.
W
wuhuanzhou 已提交
4273 4274 4275 4276 4277

    Example:
        .. code-block:: python

            import paddle
4278

W
wuhuanzhou 已提交
4279 4280 4281 4282 4283
            x = paddle.to_tensor([0, 0.5, -1.], dtype="float32")
            out = paddle.erfinv(x)
            # out: [0, 0.4769, -inf]

    """
H
hong 已提交
4284
    if in_dygraph_mode():
4285
        return _C_ops.erfinv(x)
4286 4287 4288 4289 4290 4291
    else:
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'erfinv')
        helper = LayerHelper('erfinv', **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(type='erfinv', inputs={'X': x}, outputs={'Out': out})
        return out
W
wuhuanzhou 已提交
4292

4293

W
wuhuanzhou 已提交
4294 4295 4296 4297 4298 4299 4300
@inplace_apis_in_dygraph_only
def erfinv_(x, name=None):
    r"""
    Inplace version of ``erfinv`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_tensor_erfinv`.
    """
    check_type(x, 'x', (paddle.Tensor, Variable), 'erfinv')
4301
    return _C_ops.erfinv_(x)
W
wuhuanzhou 已提交
4302

4303

4304
def rad2deg(x, name=None):
4305
    r"""
4306
    Convert each of the elements of input x from angles in radians to degrees.
4307

4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323
    Equation:
        .. math::

            rad2deg(x)=180/ \pi * x

    Args:
        x (Tensor): An N-D Tensor, the data type is float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float32 when the input data type is int).

    Examples:
        .. code-block:: python

            import paddle
4324
            import math
4325

4326 4327 4328 4329 4330 4331 4332
            x1 = paddle.to_tensor([3.142, -3.142, 6.283, -6.283, 1.570, -1.570])
            result1 = paddle.rad2deg(x1)
            print(result1)
            # Tensor(shape=[6], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #         [180.02334595, -180.02334595,  359.98937988, -359.98937988,
            #           9.95437622 , -89.95437622])

4333
            x2 = paddle.to_tensor(math.pi/2)
4334 4335 4336 4337
            result2 = paddle.rad2deg(x2)
            print(result2)
            # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #         [90.])
4338

4339 4340 4341 4342 4343 4344 4345
            x3 = paddle.to_tensor(1)
            result3 = paddle.rad2deg(x3)
            print(result3)
            # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #         [57.29578018])
    """
    rad2deg_scale = 180 / np.pi
4346 4347 4348
    if in_dygraph_mode():
        if convert_dtype(x.dtype) in ['int32', 'int64']:
            x = cast(x, dtype="float32")
4349
        return _C_ops.scale(x, rad2deg_scale, 0.0, True)
4350
    else:
4351 4352 4353
        check_variable_and_dtype(
            x, 'x', ['int32', 'int64', 'float32', 'float64'], 'rad2deg'
        )
4354 4355 4356
        helper = LayerHelper('rad2deg', **locals())
        out_cast = x
        if convert_dtype(x.dtype) in ['int32', 'int64']:
4357
            out_cast = helper.create_variable_for_type_inference(
4358 4359 4360 4361 4362 4363 4364 4365
                dtype=paddle.float32
            )
            helper.append_op(
                type='cast',
                inputs={'X': x},
                outputs={'Out': out_cast},
                attrs={'in_dtype': x.dtype, 'out_dtype': paddle.float32},
            )
4366
        out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
4367 4368 4369 4370 4371 4372
        helper.append_op(
            type='scale',
            inputs={'X': out_cast},
            outputs={'Out': out},
            attrs={'scale': rad2deg_scale},
        )
4373 4374
        return out

4375

4376
def deg2rad(x, name=None):
4377
    r"""
4378
    Convert each of the elements of input x from degrees to angles in radians.
4379

4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394
        .. math::

            deg2rad(x)=\pi * x / 180

    Args:
        x (Tensor): An N-D Tensor, the data type is float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float32 when the input data type is int).

    Examples:
        .. code-block:: python

            import paddle
4395

4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409
            x1 = paddle.to_tensor([180.0, -180.0, 360.0, -360.0, 90.0, -90.0])
            result1 = paddle.deg2rad(x1)
            print(result1)
            # Tensor(shape=[6], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #         [3.14159274, -3.14159274,  6.28318548, -6.28318548,  1.57079637,
            #           -1.57079637])

            x2 = paddle.to_tensor(180)
            result2 = paddle.deg2rad(x2)
            print(result2)
            # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #         [3.14159274])
    """
    deg2rad_scale = np.pi / 180.0
4410 4411 4412
    if in_dygraph_mode():
        if convert_dtype(x.dtype) in ['int32', 'int64']:
            x = cast(x, dtype="float32")
4413
        return _C_ops.scale(x, deg2rad_scale, 0.0, True)
4414
    else:
4415 4416 4417
        check_variable_and_dtype(
            x, 'x', ['int32', 'int64', 'float32', 'float64'], 'deg2rad'
        )
4418 4419 4420
        helper = LayerHelper('deg2rad', **locals())
        out_cast = x
        if convert_dtype(x.dtype) in ['int32', 'int64']:
4421
            out_cast = helper.create_variable_for_type_inference(
4422 4423 4424 4425 4426 4427 4428 4429
                dtype=paddle.float32
            )
            helper.append_op(
                type='cast',
                inputs={'X': x},
                outputs={'Out': out_cast},
                attrs={'in_dtype': x.dtype, 'out_dtype': paddle.float32},
            )
4430
        out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
4431 4432 4433 4434 4435 4436
        helper.append_op(
            type='scale',
            inputs={'X': out_cast},
            outputs={'Out': out},
            attrs={'scale': deg2rad_scale},
        )
4437
        return out
A
andyjpaddle 已提交
4438

4439

T
Tao Luo 已提交
4440 4441 4442 4443
def gcd(x, y, name=None):
    """
    Computes the element-wise greatest common divisor (GCD) of input |x| and |y|.
    Both x and y must have integer types.
4444

T
Tao Luo 已提交
4445 4446 4447
    Note:
        gcd(0,0)=0, gcd(0, y)=|y|

T
Tao Luo 已提交
4448 4449
        If x.shape != y.shape, they must be broadcastable to a common shape (which becomes the shape of the output).

T
Tao Luo 已提交
4450
    Args:
4451 4452
        x (Tensor): An N-D Tensor, the data type is int32,int64.
        y (Tensor): An N-D Tensor, the data type is int32,int64.
T
Tao Luo 已提交
4453 4454 4455 4456 4457 4458 4459 4460 4461
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the data type is the same with input.

    Examples:
        .. code-block:: python

            import paddle
4462

T
Tao Luo 已提交
4463 4464 4465 4466 4467 4468
            x1 = paddle.to_tensor(12)
            x2 = paddle.to_tensor(20)
            paddle.gcd(x1, x2)
            # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [4])

T
Tao Luo 已提交
4469
            x3 = paddle.arange(6)
T
Tao Luo 已提交
4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481
            paddle.gcd(x3, x2)
            # Tensor(shape=[6], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [20, 1 , 2 , 1 , 4 , 5])

            x4 = paddle.to_tensor(0)
            paddle.gcd(x4, x2)
            # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [20])

            paddle.gcd(x4, x4)
            # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [0])
4482

T
Tao Luo 已提交
4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500
            x5 = paddle.to_tensor(-20)
            paddle.gcd(x1, x5)
            # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [4])
    """
    shape = paddle.broadcast_shape(x.shape, y.shape)
    x = paddle.broadcast_to(x, shape)
    y = paddle.broadcast_to(y, shape)
    x = paddle.abs(x)
    y = paddle.abs(y)

    def _gcd_cond_fn(x, y):
        return paddle.any(y != 0)

    def _gcd_body_fn(x, y):
        # paddle.mod will raise an error when any element of y is 0. To avoid
        # that, we change those zeros to ones. Their values don't matter because
        # they won't be used.
4501
        y_not_equal_0 = y != 0
T
Tao Luo 已提交
4502
        y_safe = paddle.where(y_not_equal_0, y, paddle.ones(y.shape, y.dtype))
4503 4504 4505 4506 4507 4508 4509 4510
        x, y = (
            paddle.where(y_not_equal_0, y, x),
            paddle.where(
                y_not_equal_0,
                paddle.mod(x, y_safe),
                paddle.zeros(y.shape, y.dtype),
            ),
        )
T
Tao Luo 已提交
4511 4512
        return (paddle.where(x < y, y, x), paddle.where(x < y, x, y))

4513
    if in_dygraph_mode():
T
Tao Luo 已提交
4514 4515 4516 4517 4518
        while _gcd_cond_fn(x, y):
            x, y = _gcd_body_fn(x, y)

        return x
    else:
T
Tao Luo 已提交
4519 4520
        check_variable_and_dtype(x, 'x', ['int32', 'int64'], 'gcd')
        check_variable_and_dtype(y, 'y', ['int32', 'int64'], 'gcd')
T
Tao Luo 已提交
4521 4522 4523
        out, _ = paddle.static.nn.while_loop(_gcd_cond_fn, _gcd_body_fn, [x, y])
        return out

4524

T
Tao Luo 已提交
4525 4526 4527 4528
def lcm(x, y, name=None):
    """
    Computes the element-wise least common multiple (LCM) of input |x| and |y|.
    Both x and y must have integer types.
4529

T
Tao Luo 已提交
4530 4531 4532
    Note:
        lcm(0,0)=0, lcm(0, y)=0

T
Tao Luo 已提交
4533 4534
        If x.shape != y.shape, they must be broadcastable to a common shape (which becomes the shape of the output).

T
Tao Luo 已提交
4535
    Args:
4536 4537
        x (Tensor): An N-D Tensor, the data type is int32,int64.
        y (Tensor): An N-D Tensor, the data type is int32,int64.
T
Tao Luo 已提交
4538 4539 4540 4541 4542 4543 4544 4545 4546
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        out (Tensor): An N-D Tensor, the data type is the same with input.

    Examples:
        .. code-block:: python

            import paddle
4547

T
Tao Luo 已提交
4548 4549 4550 4551 4552 4553
            x1 = paddle.to_tensor(12)
            x2 = paddle.to_tensor(20)
            paddle.lcm(x1, x2)
            # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [60])

T
Tao Luo 已提交
4554
            x3 = paddle.arange(6)
T
Tao Luo 已提交
4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566
            paddle.lcm(x3, x2)
            # Tensor(shape=[6], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [0, 20, 20, 60, 20, 20])

            x4 = paddle.to_tensor(0)
            paddle.lcm(x4, x2)
            # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [0])

            paddle.lcm(x4, x4)
            # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [0])
4567

T
Tao Luo 已提交
4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578
            x5 = paddle.to_tensor(-20)
            paddle.lcm(x1, x5)
            # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [60])
    """
    d = paddle.gcd(x, y)
    # paddle.mod will raise an error when any element of y is 0. To avoid
    # that, we change those zeros to ones. Their values don't matter because
    # they won't be used.
    d_equal_0 = paddle.equal(d, 0)
    d_safe = paddle.where(d_equal_0, paddle.ones(d.shape, d.dtype), d)
4579 4580 4581
    out = paddle.where(
        d_equal_0, paddle.zeros(d.shape, d.dtype), paddle.abs(x * y) // d_safe
    )
T
Tao Luo 已提交
4582 4583
    return out

4584

A
andyjpaddle 已提交
4585 4586 4587
def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
    r"""
    Computes the n-th forward difference along the given axis.
4588
    The first-order differences is computed by using the following formula:
A
andyjpaddle 已提交
4589 4590 4591 4592

    .. math::

        out[i] = x[i+1] - x[i]
4593 4594

    Higher-order differences are computed by using paddle.diff() recursively.
A
andyjpaddle 已提交
4595 4596 4597
    Only n=1 is currently supported.

    Args:
4598
        x (Tensor): The input tensor to compute the forward difference on
4599
        n (int, optional): The number of times to recursively compute the difference.
A
andyjpaddle 已提交
4600
                          Only support n=1. Default:1
4601 4602
        axis (int, optional): The axis to compute the difference along. Default:-1
        prepend (Tensor, optional): The tensor to prepend to input along axis before computing the difference.
4603
                                   It's dimensions must be equivalent to that of x,
A
andyjpaddle 已提交
4604
                                   and its shapes must match x's shape except on axis.
4605 4606
        append (Tensor, optional): The tensor to append to input along axis before computing the difference,
                                   It's dimensions must be equivalent to that of x,
A
andyjpaddle 已提交
4607
                                   and its shapes must match x's shape except on axis.
4608
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
4609

A
andyjpaddle 已提交
4610 4611 4612 4613 4614 4615 4616
    Returns:
        Tensor: The output tensor with same dtype with x.

    Examples:
        .. code-block:: python

            import paddle
4617

A
andyjpaddle 已提交
4618 4619 4620 4621 4622 4623 4624 4625 4626
            x = paddle.to_tensor([1, 4, 5, 2])
            out = paddle.diff(x)
            print(out)
            # out:
            # [3, 1, -3]

            y = paddle.to_tensor([7, 9])
            out = paddle.diff(x, append=y)
            print(out)
4627
            # out:
A
andyjpaddle 已提交
4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649
            # [3, 1, -3, 5, 2]

            z = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
            out = paddle.diff(z, axis=0)
            print(out)
            # out:
            # [[3, 3, 3]]
            out = paddle.diff(z, axis=1)
            print(out)
            # out:
            # [[1, 1], [1, 1]]
    """

    if axis < 0:
        axis = axis + len(x.shape)
    if axis > len(x.shape):
        axis = len(x.shape)
    if axis < 0:
        axis = 0
    dtype = x.dtype
    axes = [axis]
    infer_flags = list(1 for i in range(len(axes)))
4650
    if in_dygraph_mode():
A
andyjpaddle 已提交
4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662
        has_pend = False
        input_list = []
        if prepend is not None and append is not None:
            input_list = [prepend, x, append]
            has_pend = True
        elif prepend is not None:
            input_list = [prepend, x]
            has_pend = True
        elif append is not None:
            input_list = [x, append]
            has_pend = True
        if has_pend:
4663
            new_input = _C_ops.concat(input_list, axis)
A
andyjpaddle 已提交
4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675
        else:
            new_input = x

        attrs_1 = ()
        attrs_2 = ()

        dim_len = new_input.shape[axis]

        starts_1 = [0]
        attrs_1 += ('starts', starts_1)
        ends_1 = [dim_len - 1]
        attrs_1 += ('ends', ends_1)
4676 4677 4678
        input_front = _C_ops.slice(
            new_input, axes, starts_1, ends_1, infer_flags, []
        )
A
andyjpaddle 已提交
4679 4680 4681 4682
        starts_2 = [1]
        attrs_2 += ('starts', starts_2)
        ends_2 = [dim_len]
        attrs_2 += ('ends', ends_2)
4683 4684 4685
        input_back = _C_ops.slice(
            new_input, axes, starts_2, ends_2, infer_flags, []
        )
4686 4687

        if x.dtype == paddle.bool:
4688
            return _C_ops.logical_xor(input_back, input_front)
4689
        else:
4690
            return _C_ops.subtract(input_back, input_front)
A
andyjpaddle 已提交
4691
    else:
4692
        check_variable_and_dtype(
4693 4694
            x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff'
        )
A
andyjpaddle 已提交
4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710
        check_type(axis, 'axis', (int), 'diff')
        helper = LayerHelper('diff', **locals())
        has_pend = False
        input_list = []
        if prepend is not None and append is not None:
            input_list = [prepend, x, append]
            has_pend = True
        elif prepend is not None:
            input_list = [prepend, x]
            has_pend = True
        elif append is not None:
            input_list = [x, append]
            has_pend = True

        if has_pend:
            new_input = helper.create_variable_for_type_inference(dtype)
4711 4712 4713 4714 4715 4716
            helper.append_op(
                type='concat',
                inputs={'X': input_list},
                outputs={'Out': [new_input]},
                attrs={'axis': axis},
            )
A
andyjpaddle 已提交
4717 4718 4719 4720 4721 4722 4723 4724 4725 4726
        else:
            new_input = x

        dim_len = new_input.shape[axis]
        attrs_1 = {'axes': axes}
        starts_1 = [0]
        ends_1 = [dim_len - 1]
        attrs_1['starts'] = starts_1
        attrs_1['ends'] = ends_1
        input_front = helper.create_variable_for_type_inference(dtype)
4727 4728 4729 4730 4731 4732
        helper.append_op(
            type='slice',
            inputs={'Input': new_input},
            attrs=attrs_1,
            outputs={'Out': input_front},
        )
A
andyjpaddle 已提交
4733 4734 4735 4736 4737 4738
        attrs_2 = {'axes': axes}
        starts_2 = [1]
        ends_2 = [dim_len]
        attrs_2['starts'] = starts_2
        attrs_2['ends'] = ends_2
        input_back = helper.create_variable_for_type_inference(dtype)
4739 4740 4741 4742 4743 4744
        helper.append_op(
            type='slice',
            inputs={'Input': new_input},
            attrs=attrs_2,
            outputs={'Out': input_back},
        )
A
andyjpaddle 已提交
4745 4746 4747

        if dtype == paddle.bool:
            out = helper.create_variable_for_type_inference(dtype)
4748 4749 4750 4751 4752
            helper.append_op(
                type='logical_xor',
                inputs={"X": input_back, "Y": input_front},
                outputs={"Out": out},
            )
A
andyjpaddle 已提交
4753
        else:
4754 4755 4756
            out = paddle.tensor.math._subtract_with_axis(
                input_back, input_front, axis=axis
            )
A
andyjpaddle 已提交
4757 4758

        return out
F
Feiyu Chan 已提交
4759

4760

F
Feiyu Chan 已提交
4761 4762
def angle(x, name=None):
    r"""
4763
    Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while
F
Feiyu Chan 已提交
4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775
    for negative real numbers, the angle is :math:`\pi`.

    Equation:
        .. math::

            angle(x)=arctan2(x.imag, x.real)

    Args:
        x (Tensor): An N-D Tensor, the data type is complex64, complex128, or float32, float64 .
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
4776
        Tensor: An N-D Tensor of real data type with the same precision as that of x's data type.
F
Feiyu Chan 已提交
4777 4778 4779 4780 4781 4782 4783 4784 4785

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32')
            y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32')
            z = x + 1j * y
4786 4787 4788 4789 4790 4791
            print(z)
            # Tensor(shape=[4, 4], dtype=complex64, place=Place(cpu), stop_gradient=True,
            #        [[(-2-2j), (-2-1j), (-2+0j), (-2+1j)],
            #         [(-1-2j), (-1-1j), (-1+0j), (-1+1j)],
            #         [-2j    , -1j    ,  0j    ,  1j    ],
            #         [ (1-2j),  (1-1j),  (1+0j),  (1+1j)]])
F
Feiyu Chan 已提交
4792 4793

            theta = paddle.angle(z)
4794 4795 4796 4797 4798 4799
            print(theta)
            # Tensor(shape=[4, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
            #        [[-2.35619450, -2.67794514,  3.14159274,  2.67794514],
            #         [-2.03444386, -2.35619450,  3.14159274,  2.35619450],
            #         [-1.57079637, -1.57079637,  0.        ,  1.57079637],
            #         [-1.10714877, -0.78539819,  0.        ,  0.78539819]])
F
Feiyu Chan 已提交
4800 4801
    """

W
WangZhen 已提交
4802
    if in_dygraph_mode():
F
Feiyu Chan 已提交
4803
        return _C_ops.angle(x)
4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816
    else:
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'complex64', 'complex128'], 'angle'
        )
        op_type = "angle"
        helper = LayerHelper(op_type, **locals())
        inputs = {"X": x}
        out = helper.create_variable_for_type_inference(
            dtype=_complex_to_real_dtype(x.dtype)
        )
        outputs = {"Out": out}
        helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
        return out
4817

4818

4819
def heaviside(x, y, name=None):
4820
    r"""
4821 4822 4823 4824 4825
    Computes the Heaviside step function determined by corresponding element in y for each element in x. The equation is

    .. math::
        heaviside(x, y)=
            \left\{
4826 4827 4828 4829
                \begin{array}{lcl}
                0,& &\text{if} \ x < 0, \\
                y,& &\text{if} \ x = 0, \\
                1,& &\text{if} \ x > 0.
4830
                \end{array}
4831
            \right.
4832

4833
    Note:
I
Infinity_lee 已提交
4834 4835 4836
        ``paddle.heaviside`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
4837 4838

    Args:
4839 4840
        x (Tensor): The input tensor of Heaviside step function, it's data type should be float16, float32, float64, int32 or int64.
        y (Tensor): The tensor that determines a Heaviside step function, it's data type should be float16, float32, float64, int32 or int64.
4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858
        name (str, optional): Name for the operation (optional, default is None). Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. If x and y have different shapes and are broadcastable, the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.

    Examples:
        .. code-block:: python

            import paddle
            x = paddle.to_tensor([-0.5, 0, 0.5])
            y = paddle.to_tensor([0.1])
            paddle.heaviside(x, y)
            #    [0.        , 0.10000000, 1.        ]
            x = paddle.to_tensor([[-0.5, 0, 0.5], [-0.5, 0.5, 0]])
            y = paddle.to_tensor([0.1, 0.2, 0.3])
            paddle.heaviside(x, y)
            #    [[0.        , 0.20000000, 1.        ],
            #     [0.        , 1.        , 0.30000001]]
4859
    """
4860 4861 4862
    op_type = 'elementwise_heaviside'
    axis = -1
    act = None
4863
    if in_dygraph_mode():
4864 4865 4866
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name=op_type
        )
4867 4868
    else:
        return _elementwise_op(LayerHelper(op_type, **locals()))
4869

4870

4871 4872 4873 4874 4875 4876
def frac(x, name=None):
    """
    This API is used to return the fractional portion of each element in input.

    Args:
        x (Tensor): The input tensor, which data type should be int32, int64, float32, float64.
4877
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
4878 4879 4880 4881 4882

    Returns:
        Tensor: The output Tensor of frac.

    Examples:
4883
        .. code-block:: python
4884 4885 4886

            import paddle

4887 4888
            input = paddle.to_tensor([[12.22000003, -1.02999997],
                                    [-0.54999995, 0.66000003]])
4889
            output = paddle.frac(input)
4890 4891 4892 4893
            print(output)
            # Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
            #        [[ 0.22000003, -0.02999997],
            #         [-0.54999995,  0.66000003]])
4894 4895 4896 4897
    """
    op_type = 'elementwise_sub'
    axis = -1
    act = None
4898
    if x.dtype not in [
4899 4900 4901 4902
        paddle.int32,
        paddle.int64,
        paddle.float32,
        paddle.float64,
4903
    ]:
4904
        raise TypeError(
4905 4906 4907 4908
            "The data type of input must be one of ['int32', 'int64', 'float32', 'float64'], but got {}".format(
                x.dtype
            )
        )
4909
    if in_dygraph_mode():
4910 4911
        y = _C_ops.trunc(x)
        return _C_ops.subtract(x, y)
4912
    else:
4913 4914
        inputs = {"X": x}
        attrs = {}
4915

4916 4917 4918 4919 4920 4921 4922 4923 4924
        helper = LayerHelper("trunc", **locals())
        check_variable_and_dtype(
            x, "X", ['int32', 'int64', 'float32', 'float64'], 'trunc'
        )
        y = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": y}
        )
        return _elementwise_op(LayerHelper(op_type, **locals()))
4925

4926

4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951
def sgn(x, name=None):
    """
    For complex tensor, this API returns a new tensor whose elements have the same angles as the corresponding
    elements of input and absolute values of one.
    For other float dtype tensor,
    this API returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero, same as paddle.sign.

    Args:
        x (Tensor): The input tensor, which data type should be float16, float32, float64, complex64, complex128.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: A sign Tensor for real input, or normalized Tensor for complex input, shape and data type are same as input.

    Examples:
        .. code-block:: Python

            import paddle

            x = paddle.to_tensor([[3 + 4j, 7 - 24j, 0, 1 + 2j], [6 + 8j, 3, 0, -2]])
            print(paddle.sgn(x))
            #[[0.6+0.8j       0.28-0.96j      0.+0.j      0.4472136+0.8944272j]
            # [0.6+0.8j       1.+0.j          0.+0.j      -1.+0.j]]

    """
4952
    if x.dtype not in [
4953 4954 4955 4956 4957
        paddle.float16,
        paddle.float32,
        paddle.float64,
        paddle.complex64,
        paddle.complex128,
4958
    ]:
4959
        raise TypeError(
4960 4961 4962 4963
            "The data type of input must be one of ['float16', 'float32', 'float64', 'complex64', 'complex128'], but got {}".format(
                x.dtype
            )
        )
4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974
    if paddle.is_complex(x):
        expand_x = paddle.as_real(x)
        x_abs = paddle.abs(x)
        x_abs = paddle.unsqueeze(x_abs, axis=-1)
        output = expand_x / x_abs
        zeros = paddle.zeros_like(output)
        output = paddle.where(paddle.isnan(output), zeros, output)

        return paddle.as_complex(output)
    else:
        return paddle.sign(x)
4975

4976

4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043
def take(x, index, mode='raise', name=None):
    """
    Returns a new tensor with the elements of input tensor x at the given index.
    The input tensor is treated as if it were viewed as a 1-D tensor.
    The result takes the same shape as the index.

    Args:
        x (Tensor): An N-D Tensor, its data type should be int32, int64, float32, float64.
        index (Tensor): An N-D Tensor, its data type should be int32, int64.
        mode (str, optional): Specifies how out-of-bounds index will behave. the candicates are ``'raise'``, ``'wrap'`` and ``'clip'``.

            - ``'raise'``: raise an error (default);
            - ``'wrap'``: wrap around;
            - ``'clip'``: clip to the range. ``'clip'`` mode means that all indices that are too large are replaced by the index that addresses the last element. Note that this disables indexing with negative numbers.

        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, Tensor with the same shape as index, the data type is the same with input.

    Examples:
        .. code-block:: python

            import paddle

            x_int = paddle.arange(0, 12).reshape([3, 4])
            x_float = x_int.astype(paddle.float64)

            idx_pos = paddle.arange(4, 10).reshape([2, 3])  # positive index
            idx_neg = paddle.arange(-2, 4).reshape([2, 3])  # negative index
            idx_err = paddle.arange(-2, 13).reshape([3, 5])  # index out of range

            paddle.take(x_int, idx_pos)
            # Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[4, 5, 6],
            #         [7, 8, 9]])

            paddle.take(x_int, idx_neg)
            # Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[10, 11, 0 ],
            #         [1 , 2 , 3 ]])

            paddle.take(x_float, idx_pos)
            # Tensor(shape=[2, 3], dtype=float64, place=Place(cpu), stop_gradient=True,
            #        [[4., 5., 6.],
            #         [7., 8., 9.]])

            x_int.take(idx_pos)
            # Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
            #        [[4, 5, 6],
            #         [7, 8, 9]])

            paddle.take(x_int, idx_err, mode='wrap')
            # Tensor(shape=[3, 5], dtype=int32, place=Place(cpu), stop_gradient=True,
            #        [[10, 11, 0 , 1 , 2 ],
            #         [3 , 4 , 5 , 6 , 7 ],
            #         [8 , 9 , 10, 11, 0 ]])

            paddle.take(x_int, idx_err, mode='clip')
            # Tensor(shape=[3, 5], dtype=int32, place=Place(cpu), stop_gradient=True,
            #        [[0 , 0 , 0 , 1 , 2 ],
            #         [3 , 4 , 5 , 6 , 7 ],
            #         [8 , 9 , 10, 11, 11]])

    """
    if mode not in ['raise', 'wrap', 'clip']:
        raise ValueError(
5044 5045 5046 5047
            "'mode' in 'take' should be 'raise', 'wrap', 'clip', but received {}.".format(
                mode
            )
        )
5048

5049
    if in_dygraph_mode():
5050 5051
        if not isinstance(index, (paddle.Tensor, Variable)):
            raise TypeError(
5052
                "The type of 'index' must be Tensor, but got {}".format(
5053 5054 5055
                    type(index)
                )
            )
5056 5057
        if index.dtype not in [paddle.int32, paddle.int64]:
            raise TypeError(
5058 5059 5060 5061
                "The data type of 'index' must be one of ['int32', 'int64'], but got {}".format(
                    index.dtype
                )
            )
5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074

    else:
        check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'take')

    input_1d = x.flatten()
    index_1d = index.flatten()
    max_index = input_1d.shape[-1]

    if mode == 'raise':
        # This processing enables 'take' to handle negative indexes within the correct range.
        index_1d = paddle.where(index_1d < 0, index_1d + max_index, index_1d)
    elif mode == 'wrap':
        # The out of range indices are constrained by taking the remainder.
5075
        index_1d = paddle.where(index_1d < 0, index_1d % max_index, index_1d)
5076 5077 5078
        index_1d = paddle.where(
            index_1d >= max_index, index_1d % max_index, index_1d
        )
5079 5080 5081 5082 5083 5084 5085
    elif mode == 'clip':
        # 'clip' mode disables indexing with negative numbers.
        index_1d = clip(index_1d, 0, max_index - 1)

    out = input_1d.index_select(index_1d).reshape(index.shape)

    return out
5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111


def frexp(x, name=None):
    """
    The function used to decompose a floating point number into mantissa and exponent.

    Args:
        x (Tensor): The input tensor, it's data type should be float32, float64.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
    Returns:

        - mantissa (Tensor), A mantissa Tensor. The shape and data type of mantissa tensor and exponential tensor are
            the same as those of input.

        - exponent (Tensor), A exponent Tensor. The shape and data type of mantissa tensor and exponential tensor are
            the same as those of input.

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.to_tensor([[1, 2, 3, 4]], dtype="float32")
            print(paddle.tensor.math.frexp(x))
            # (Tensor(shape=[1, 4], dtype=float32, place=Place(cpu), stop_gradient=True,[[0.50000000, 0.50000000, 0.75000000, 0.50000000]]),
            #  Tensor(shape=[1, 4], dtype=float32, place=Place(cpu), stop_gradient=True,[[1., 2., 2., 3.]]))
5112
    """
5113 5114
    if x.dtype not in [paddle.float32, paddle.float64]:
        raise TypeError(
5115 5116 5117 5118
            "The data type of input must be one of ['float32', 'float64'], but got {}".format(
                x.dtype
            )
        )
5119 5120
    input_x = paddle.abs(x)
    exponent = paddle.floor(paddle.log2(input_x))
5121 5122 5123
    exponent = paddle.where(
        paddle.isinf(exponent), paddle.full_like(exponent, 0), exponent
    )
5124 5125 5126 5127

    # 0填充
    mantissa = paddle.divide(input_x, 2**exponent)
    # 计算exponent
5128 5129 5130 5131 5132 5133 5134 5135 5136 5137
    exponent = paddle.where(
        (mantissa >= 1),
        paddle.add(exponent, paddle.ones_like(exponent)),
        exponent,
    )
    mantissa = paddle.where(
        (mantissa >= 1),
        paddle.divide(mantissa, 2 ** paddle.ones_like(exponent)),
        mantissa,
    )
5138 5139 5140

    mantissa = paddle.where((x < 0), mantissa * -1, mantissa)
    return mantissa, exponent