math.py 58.1 KB
Newer Older
W
WuHaobo 已提交
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15 16
"""
math functions
"""
17
from __future__ import print_function
18

19
from paddle.common_ops_import import *
20
from ..fluid import layers
L
Li Fuchen 已提交
21 22 23
from ..fluid.framework import core, _varbase_creator, in_dygraph_mode, Variable
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
24
from ..fluid.layers.layer_function_generator import _generate_doc_string_
25
import sys
26 27 28

# TODO: define math functions
# yapf: disable
29 30 31 32 33
from ..fluid.layers import abs    #DEFINE_ALIAS
from ..fluid.layers import acos    #DEFINE_ALIAS
from ..fluid.layers import asin    #DEFINE_ALIAS
from ..fluid.layers import ceil    #DEFINE_ALIAS
from ..fluid.layers import cos    #DEFINE_ALIAS
34 35
from ..fluid.layers import sinh    #DEFINE_ALIAS
from ..fluid.layers import cosh    #DEFINE_ALIAS
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
from ..fluid.layers import cumsum    #DEFINE_ALIAS
from ..fluid.layers import elementwise_add    #DEFINE_ALIAS
from ..fluid.layers import elementwise_div    #DEFINE_ALIAS
from ..fluid.layers import elementwise_floordiv    #DEFINE_ALIAS
from ..fluid.layers import elementwise_max    #DEFINE_ALIAS
from ..fluid.layers import elementwise_min    #DEFINE_ALIAS
from ..fluid.layers import elementwise_mod    #DEFINE_ALIAS
from ..fluid.layers import elementwise_mul    #DEFINE_ALIAS
from ..fluid.layers import elementwise_pow    #DEFINE_ALIAS
from ..fluid.layers import elementwise_sub    #DEFINE_ALIAS
from ..fluid.layers import exp    #DEFINE_ALIAS
from ..fluid.layers import floor    #DEFINE_ALIAS
from ..fluid.layers import log    #DEFINE_ALIAS
from ..fluid.layers import reciprocal    #DEFINE_ALIAS
from ..fluid.layers import reduce_max    #DEFINE_ALIAS
from ..fluid.layers import reduce_min    #DEFINE_ALIAS
from ..fluid.layers import reduce_prod    #DEFINE_ALIAS
from ..fluid.layers import reduce_sum    #DEFINE_ALIAS
from ..fluid.layers import round    #DEFINE_ALIAS
from ..fluid.layers import rsqrt    #DEFINE_ALIAS
from ..fluid.layers import scale    #DEFINE_ALIAS
from ..fluid.layers import sign    #DEFINE_ALIAS
from ..fluid.layers import square    #DEFINE_ALIAS
from ..fluid.layers import stanh    #DEFINE_ALIAS
from ..fluid.layers import atan    #DEFINE_ALIAS
from ..fluid.layers import erf    #DEFINE_ALIAS

63 64 65 66
from ..fluid.layers import increment    #DEFINE_ALIAS
from ..fluid.layers import multiplex    #DEFINE_ALIAS
from ..fluid.layers import sums    #DEFINE_ALIAS

67
__all__ = [
68 69 70 71 72 73
        'abs',
        'acos',
        'asin',
        'atan',
        'ceil',
        'cos',
74
        'cosh',
75 76 77 78 79 80 81 82 83 84 85
        'cumsum',
        'elementwise_add',
        'elementwise_div',
        'elementwise_floordiv',
        'elementwise_max',
        'elementwise_min',
        'elementwise_mod',
        'elementwise_pow',
        'elementwise_sub',
        'exp',
        'floor',
86
        'increment',
87 88
        'log',
        'mul',
89
        'multiplex',
90 91 92 93 94 95 96 97 98 99 100
        'pow',
        'reciprocal',
        'reduce_max',
        'reduce_min',
        'reduce_prod',
        'reduce_sum',
        'round',
        'rsqrt',
        'scale',
        'sign',
        'sin',
101
        'sinh',
102 103 104 105
        'sqrt',
        'square',
        'stanh',
        'sum',
106
        'sums',
107 108 109 110 111 112
        'tanh',
        'elementwise_sum',
        'max',
        'min',
        'mm',
        'div',
113
        'multiply',
114 115 116
        'add',
        'atan',
        'logsumexp',
117
        'inverse',
118 119 120 121 122
        'log1p',
        'erf',
        'addcmul',
        'addmm',
        'clamp',
L
Li Fuchen 已提交
123
        'trace',
124
        'kron'
125
]
126

127

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
# yapf: enable.


def generate_op_noattr(op_type):
    """Register the Python layer for an Operator without Attribute..

    Args:
       op_type: The name of the operator to be created.

    This function takes in the operator type (sin, tanh etc) and
    creates the operator functionality.

    """
    op_proto = OpProtoHolder.instance().get_op_proto(op_type)

W
WuHaobo 已提交
143
    def func(x, name=None):
144 145 146 147 148 149 150 151
        if in_dygraph_mode():
            op = getattr(core.ops, op_type)
            return op(x)

        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 op_type)
        helper = LayerHelper(op_type, **locals())

W
WuHaobo 已提交
152
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
        helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": out})
        return out

    func.__name__ = op_type
    func.__doc__ = _generate_doc_string_(
        op_proto,
        additional_args_lines=[
            "name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`.\n    "
            "out(Variable, optional): The default value is None. Optional output can be any created Variable that meets the requirements to store the result of operation. if out is None, a new Varibale will be create to store the result."
        ])
    func.__doc__ = func.__doc__ + """

Return type
  Variable
Examples:
    .. code-block:: python

        import numpy as np
171

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
        import paddle
        import paddle.fluid as fluid

        inputs = fluid.data(name="x", shape = [None, 4], dtype='float32')
        output = paddle.%s(inputs)

        exe = fluid.Executor(fluid.CPUPlace())
        exe.run(fluid.default_startup_program())

        #input.shape=1X4, batch_size=1
        img = np.array([[1.0, 2.0, 3.0, 4.0]]).astype(np.float32)
        res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
        print(res)
""" % op_type
    return func

188
@templatedoc()
W
WuHaobo 已提交
189
def pow(input, exponent, name=None):
190
    """
191 192
	:alias_main: paddle.pow
	:alias: paddle.pow,paddle.tensor.pow,paddle.tensor.math.pow
S
swtkiwi 已提交
193

194 195 196 197 198 199 200
    This is Pow Activation Operator.

    :math:`out = input^{exponent}`

    Args:
        input(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
        exponent(float32|Variable): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``.
201
        name(str, optional): The default value is None. Normally there is no need for user to set this property.
202 203 204 205 206 207 208 209 210 211
            For more information, please refer to :ref:`api_guide_Name` .

    Returns:
        Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``input``.

    Examples:

        .. code-block:: python

            import paddle
212
            import paddle.fluid as fluid
213

214
            x = fluid.data(name="x", shape=[32,32], dtype="float32")
215 216

            # example 1: argument exponent is float
W
WuHaobo 已提交
217
            y_1 = paddle.pow(x, 2.0)
218 219 220
            # y_1 is x^{2.0}

            # example 2: argument exponent is Variable
221
            exponent_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
W
WuHaobo 已提交
222
            y_2 = paddle.pow(x, exponent_tensor)
223 224
            # y_2 is x^{3.0}
    """
W
WuHaobo 已提交
225 226 227
    if in_dygraph_mode():
        return core.ops.pow(input, "exponent", exponent)

228 229 230 231 232 233 234 235 236
    helper = LayerHelper('pow', **locals())
    inputs = {'X': input}
    attrs = {}
    if isinstance(exponent, Variable):
        exponent.stop_gradient = True
        inputs['FactorTensor'] = exponent
    else:
        attrs['factor'] = exponent

W
WuHaobo 已提交
237 238 239 240 241
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    check_dtype(
        out.dtype, out.name,
        convert_dtype(input.dtype), 'pow',
        '(The out data type in pow must be the same with input data type.)')
242 243 244 245 246 247

    helper.append_op(
        type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
    return out


248 249 250 251 252 253 254 255 256
__ops__noattr__ = [
    'atan',
    'sin',
    'sqrt',
    'tanh',
]

for _OP in set(__ops__noattr__):
    globals()[_OP] = generate_op_noattr(_OP)
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290


@dygraph_only
def _elementwise_op_in_dygraph(x,
                               y,
                               axis=-1,
                               act=None,
                               use_mkldnn=False,
                               op_name=None):
    op = getattr(core.ops, op_name)
    out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)

    return dygraph_utils._append_activation_in_dygraph(
        out, act, use_mkldnn=use_mkldnn)


def _elementwise_op(helper):
    op_type = helper.layer_type
    original_op_type = helper.kwargs.get('original_op_type', op_type)
    x = helper.kwargs.get('x', None)
    y = helper.kwargs.get('y', None)

    assert x is not None, 'x cannot be None in {}'.format(original_op_type)
    assert y is not None, 'y cannot be None in {}'.format(original_op_type)
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
        original_op_type)
    check_variable_and_dtype(
        y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'],
        original_op_type)

    axis = helper.kwargs.get('axis', -1)
    use_mkldnn = helper.kwargs.get('use_mkldnn', False)
    name = helper.kwargs.get('name', None)
W
WuHaobo 已提交
291 292 293 294 295
    if name is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
    else:
        out = helper.create_variable(
            name=name, dtype=x.dtype, persistable=False)
296 297 298 299 300 301 302 303 304 305 306

    helper.append_op(
        type=op_type,
        inputs={'X': x,
                'Y': y},
        outputs={'Out': out},
        attrs={'axis': axis,
               'use_mkldnn': use_mkldnn})
    return helper.append_activation(out)


W
WuHaobo 已提交
307
def add(x, y, alpha=1, name=None):
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
    """
Examples:

    .. code-block:: python

        import paddle
        import paddle.fluid as fluid
        import numpy as np

        def gen_data():
            return {
                "x": np.array([2, 3, 4]).astype('float32'),
                "y": np.array([1, 5, 2]).astype('float32')
            }

        x = fluid.data(name="x", shape=[3], dtype='float32')
        y = fluid.data(name="y", shape=[3], dtype='float32')
        z1 = paddle.add(x, y)
        z2 = paddle.add(x, y, alpha=10)
        # z = x + y

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z1.name, z2.name])

        print(z_value[0]) # [3., 8., 6.]
        print(z_value[1]) # [12. 53. 24.]


    .. code-block:: python

        import paddle
        import paddle.fluid as fluid
        import numpy as np

        def gen_data():
            return {
                "x": np.ones((2, 3, 4, 5)).astype('float32'),
                "y": np.zeros((4, 5)).astype('float32')
            }

        x = fluid.data(name="x", shape=[2, 3, 4, 5], dtype='float32')
        y = fluid.data(name="y", shape=[4, 5], dtype='float32')
        z = paddle.add(x, y, name='z')
        # z = x + y

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value[0])
        print(z_value[0].shape) # z.shape=[2,3,4,5]


    ..  code-block:: python

        import paddle
        import paddle.fluid as fluid
        import numpy as np

        def gen_data():
            return {
                "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
                "y": np.random.randint(1, 5, size=[5]).astype('float32')
            }

        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[5], dtype='float32')
        z = paddle.add(x, y)
        # z = x / y

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])
        print(z_value[0])
        print(z_value[0].shape) # z.shape=[2,3,4,5]


    ..  code-block:: python

        import paddle
        import paddle.fluid as fluid
        import numpy as np

        x = fluid.data(name="x", shape=[3], dtype="float32")
        y = fluid.data(name='y', shape=[3], dtype='float32')
W
WuHaobo 已提交
399
        z = paddle.add(x, y)
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        data1 = np.array([2, 3, 4], dtype='float32')
        data2 = np.array([1, 5, 2], dtype='float32')
        z_value = exe.run(feed={'x': data1,
                                'y': data2},
                                fetch_list=[z])
        print(z_value[0]) # [3. 8. 6.]


    ..  code-block:: python

        import paddle
        import paddle.fluid as fluid
        import numpy as np

        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
            z = paddle.add(x, y, alpha=-0.5)
            np_z = z.numpy()
            print(np_z)  # [1.5, 0.5, 3. ]

    """
    op_type = 'elementwise_add'
    axis = -1
    act = None
    if alpha != 1:
        y = scale(y, scale=alpha)
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name=op_type)

    return _elementwise_op(LayerHelper(op_type, **locals()))


W
WuHaobo 已提交
439
def div(x, y, name=None):
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
    """
Examples:

    .. code-block:: python

        import paddle
        import paddle.fluid as fluid
        import numpy as np

        def gen_data():
            return {
                "x": np.array([2, 3, 4]).astype('float32'),
                "y": np.array([1, 5, 2]).astype('float32')
            }

        x = fluid.data(name="x", shape=[3], dtype='float32')
        y = fluid.data(name="y", shape=[3], dtype='float32')
        z = paddle.div(x, y)
        # z = x / y

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value) # [2., 0.6, 2.]


    .. code-block:: python

        import paddle
        import paddle.fluid as fluid
        import numpy as np

        def gen_data():
            return {
                "x": np.ones((2, 3, 4, 5)).astype('float32'),
                "y": np.zeros((4, 5)).astype('float32')
            }

        x = fluid.data(name="x", shape=[2, 3, 4, 5], dtype='float32')
        y = fluid.data(name="y", shape=[4, 5], dtype='float32')
        z = paddle.div(x, y, name='z')
        # z = x / y

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value[0])
        print(z_value[0].shape) # z.shape=[2,3,4,5]


    ..  code-block:: python

        import paddle
        import paddle.fluid as fluid
        import numpy as np

        def gen_data():
            return {
                "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
                "y": np.random.randint(1, 5, size=[5]).astype('float32')
            }

        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[5], dtype='float32')
W
WuHaobo 已提交
509
        z = paddle.div(x, y)
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
        # z = x / y

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])
        print(z_value[0])
        print(z_value[0].shape) # z.shape=[2,3,4,5]


    ..  code-block:: python

        import paddle
        import paddle.fluid as fluid
        import numpy as np

        with fluid.dygraph.guard(fluid.CPUPlace()):
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
            z = paddle.div(x, y)
            np_z = z.numpy()
            print(np_z)  # [2., 0.6, 2.]

    """
    op_type = 'elementwise_div'
    axis = -1
    act = None
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name=op_type)

    return _elementwise_op(LayerHelper(op_type, **locals()))


547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
def multiply(x, y, axis=-1, name=None):
    """
	:alias_main: paddle.multiply
	:alias: paddle.multiply,paddle.tensor.multiply,paddle.tensor.math.multiply

Examples:

    .. code-block:: python

        import paddle
        import numpy as np

        paddle.enable_imperative()
        x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
        y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
        x = paddle.imperative.to_variable(x_data)
        y = paddle.imperative.to_variable(y_data)
        res = paddle.multiply(x, y)
        print(res.numpy()) # [[5, 12], [21, 32]]

        x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
        y_data = np.array([1, 2], dtype=np.float32)
        x = paddle.imperative.to_variable(x_data)
        y = paddle.imperative.to_variable(y_data)
        res = paddle.multiply(x, y, axis=1)
        print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]]

    """
    op_type = 'elementwise_mul'
    act = None
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name=op_type)

    return _elementwise_op(LayerHelper(op_type, **locals()))


584 585 586
for func in [
        add,
        div,
587
        multiply,
588
]:
589
    proto_dict = {'add': 'elementwise_add', 'div': 'elementwise_div', 'multiply': 'elementwise_mul'}
590 591
    op_proto = OpProtoHolder.instance().get_op_proto(proto_dict[func.__name__])
    if func.__name__ in ['add']:
S
Shibo Tao 已提交
592 593 594
        alias_main = ':alias_main: paddle.%(func)s' % {'func': func.__name__}
        alias = ':alias: paddle.%(func)s, paddle.tensor.%(func)s, paddle.tensor.math.%(func)s' % {'func': func.__name__}

595 596 597 598 599 600 601 602 603 604 605 606 607
        additional_args_lines = [
            "alpha (int|float, optional): The alpha factor of the input. Default is 1. If alpha is not 1, the equation becomes Out = X + alpha * Y.",
            "name (string, optional): Name of the output. \
            Default is None. It's used to print debug info for developers. Details: \
            :ref:`api_guide_Name` "
        ]
    else:
        additional_args_lines = [
            "name (string, optional): Name of the output. \
            Default is None. It's used to print debug info for developers. Details: \
            :ref:`api_guide_Name` "
        ]

S
Shibo Tao 已提交
608
    func.__doc__ = alias_main + """\n""" + alias + """\n""" + _generate_doc_string_(
609 610
        op_proto,
        additional_args_lines=additional_args_lines,
611
        skip_attrs_set={"x_data_format", "y_data_format", "axis",
612
            "use_quantizer", "mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out"
613
        }) + """\n""" + str(func.__doc__)
614

615 616
def sum(input, dim=None, dtype=None, keep_dim=False, name=None):
    """
617 618
	:alias_main: paddle.sum
	:alias: paddle.sum,paddle.tensor.sum,paddle.tensor.math.sum
S
swtkiwi 已提交
619

620 621 622 623 624 625 626 627 628 629
    Computes the sum of tensor elements over the given dimension.

    Args:
        input (Variable): The input variable which is a Tensor, the data type is float32,
            float64, int32, int64.
        dim (list|int, optional): The dimensions along which the sum is performed. If
            :attr:`None`, sum all elements of :attr:`input` and return a
            Tensor variable with a single element, otherwise must be in the
            range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
            the dimension to reduce is :math:`rank + dim[i]`.
630
        dtype(str, optional): The dtype of output tensor. The default value is None, the dtype
631 632 633 634 635 636 637 638 639 640 641 642 643 644
            of output is the same as input tensor.
        keep_dim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result tensor will have one fewer dimension
            than the :attr:`input` unless :attr:`keep_dim` is true, default
            value is False.
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`

    Returns:
        Variable: Tensor, results of summation operation on the specified dim of input tensor,
        it's data type is the same as input's Tensor.

    Raises:
        ValueError, the :attr:`dtype` must be float64 or int64.
645

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
    Examples:
        .. code-block:: python

            import paddle
            import paddle.fluid as fluid
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
            # Each example is followed by the corresponding output tensor.
            x = fluid.data(name='x', shape=[2, 4], dtype='float32')
            out1 = paddle.sum(x)  # [3.5]
            out2 = paddle.sum(x, dim=0)  # [0.3, 0.5, 1.1, 1.6]
            out3 = paddle.sum(x, dim=-1)  # [1.9, 1.6]
            out4 = paddle.sum(x, dim=1, keep_dim=True)  # [[1.9], [1.6]]

            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
            #      [[[1, 2], [3, 4]],
            #      [[5, 6], [7, 8]]]
            # Each example is followed by the corresponding output tensor.
            y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
            out5 = paddle.sum(y, dim=[1, 2]) # [10, 26]
            out6 = paddle.sum(y, dim=[0, 1]) # [16, 20]

    """
    if dim is not None and not isinstance(dim, list):
        dim = [dim]
    attrs = {
        'dim': dim if dim != None and dim != [] else [0],
        'keep_dim': keep_dim,
        'reduce_all': True if dim == None or dim == [] else False,
    }
    dtype_flag = False
    if dtype is not None:
        if dtype in ['float64', 'int64']:
            if (convert_dtype(input.dtype) == "float32" and dtype == "float64") or \
               (convert_dtype(input.dtype) == "int32" and dtype == "int64"):
                attrs.update({
                    'in_dtype': input.dtype,
                    'out_dtype': convert_np_dtype_to_dtype_(dtype)
                })
                dtype_flag = True
        else:
            raise ValueError(
                "The value of 'dtype' in sum op must be float64, int64, but received of {}".
                format(dtype))

    if in_dygraph_mode():
        reduce_all = True if dim == None or dim == [] else False
        dim = dim if dim != None and dim != [] else [0]
        if dtype_flag:
            return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
                                       'reduce_all', reduce_all, 'in_dtype',
                                       input.dtype, 'out_dtype',
                                       convert_np_dtype_to_dtype_(dtype))
        else:
            return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
                                       'reduce_all', reduce_all)
    check_variable_and_dtype(
        input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
    helper = LayerHelper('sum', **locals())
    if dtype_flag:
        out = helper.create_variable_for_type_inference(
            dtype=convert_np_dtype_to_dtype_(dtype))
    else:
        out = helper.create_variable_for_type_inference(dtype=input.dtype)
    helper.append_op(
        type='reduce_sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs=attrs)
    return out
717

718

719 720 721
@templatedoc(op_type="sum")
def elementwise_sum(inputs, name=None):
    """
722 723
	:alias_main: paddle.elementwise_sum
	:alias: paddle.elementwise_sum,paddle.tensor.elementwise_sum,paddle.tensor.math.elementwise_sum
S
swtkiwi 已提交
724

725
    ${comment}
726

727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
    Case 1:
    ::
        Input:
            Input. Shape = [2, 3]
            Input = [[1, 2, 3],
                     [4, 5, 6]]

        Output:
            The output. Shape = [2, 3]
            Output = [[1, 2, 3],
                      [4, 5, 6]]

    Case 2:
    ::
        Input:
            First input:
            Input1. Shape = [2, 3]
            Input1 = [[1, 2, 3],
                      [4, 5, 6]]

        The second input:
            Input2. Shape = [2, 3]
            Input2 = [[7, 8, 9],
                      [10, 11, 12]]

        Output:
            The output. Shape = [2, 3]
            Output = [[8, 10, 12],
                      [14, 16, 18]]

    Args:
758 759
        inputs (Variable|list(Variable)):  A Varaible list. The shape and data type of the list elementsshould be consistent.
            Variable can be multi-dimensional Tensoror LoDTensor, and data types can be: float32, float64, int32, int64.
760 761 762 763
        name(str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information, please refer to :ref:`api_guide_Name`

    Returns:
764
        Variable: the sum of input :math:`inputs` . its shape and data types are consistent with :math:`inputs` .
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789

    Examples:
        .. code-block:: python

            import paddle
            import paddle.fluid as fluid

            input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5)
            input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3)
            sum = paddle.elementwise_sum([input0, input1])

            # You can print out 'sum' via executor.
            out = fluid.layers.Print(sum, message="the sum of input0 and input1: ")
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_main_program())

            # The printed result is:
            # 1570701754	the sum of input0 and input1: 	The place is:CPUPlace
            # Tensor[elementwise_sum_0.tmp_0]
            #    shape: [2,3,]
            #    dtype: l
            #    data: 8,8,8,8,8,8,

            # the sum of input0 and input1 is 2-D Tensor with shape [2,3].
            # dtype is the corresponding C++ data type, which may vary in different environments.
790 791
            # Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
            #       so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
792 793 794 795
            #       and '__int64' on Windows. They both represent 64-bit integer variables.
    """

    helper = LayerHelper('elementwise_sum', **locals())
796 797 798 799 800 801 802 803 804 805 806
    check_type(inputs, 'inputs', (Variable, tuple, list), 'elementwise_sum')
    if isinstance(inputs, list) or isinstance(inputs, tuple):
        if len(inputs) > 0:
            for input in inputs:
                check_variable_and_dtype(input, "inputs", \
                   ['float32', 'float64', 'int32', 'int64'], 'elementwise_sum')
    else:
        check_variable_and_dtype(inputs, "inputs", \
                ['float32', 'float64', 'int32', 'int64'], 'elementwise_sum')


807 808 809 810 811 812 813 814 815 816 817
    out = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype('inputs'))
    helper.append_op(
        type='sum',
        inputs={'X': inputs},
        outputs={'Out': out},
        attrs={'use_mkldnn': False})

    return out


W
WuHaobo 已提交
818
def mm(input, mat2, name=None):
819
    """
820 821
	:alias_main: paddle.mm
	:alias: paddle.mm,paddle.tensor.mm,paddle.tensor.math.mm
S
swtkiwi 已提交
822

823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
    Applies matrix multiplication to two tensors.

    Currently, the input tensors' rank can be any, but when the rank of any
    inputs is bigger than 3, this two inputs' rank should be equal.


    Also note that if the raw tensor :math:`x` or :math:`mat2` is rank-1 and
    nontransposed, the prepended or appended dimension :math:`1` will be
    removed after matrix multiplication.

    Args:
        x (Variable): The input variable which is a Tensor or LoDTensor.
        mat2 (Variable): The input variable which is a Tensor or LoDTensor.
        name(str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information, please refer to :ref:`api_guide_Name`

    Returns:
        Variable: The product Tensor (or LoDTensor) variable.

    Examples:
        .. code-block:: python

            # Examples to clarify shapes of the inputs and output
            # x: [B, ..., M, K], mat2: [B, ..., K, N]
            # fluid.layers.matmul(x, mat2)  # out: [B, ..., M, N]

            # x: [B, M, K], mat2: [B, K, N]
            # fluid.layers.matmul(x, mat2)  # out: [B, M, N]

            # x: [B, M, K], mat2: [K, N]
            # fluid.layers.matmul(x, mat2)  # out: [B, M, N]

            # x: [M, K], mat2: [K, N]
            # fluid.layers.matmul(x, mat2)  # out: [M, N]

            # x: [B, M, K], mat2: [K]
            # fluid.layers.matmul(x, mat2)  # out: [B, M]

            # x: [K], mat2: [K]
            # fluid.layers.matmul(x, mat2)  # out: [1]

            import paddle
            import paddle.fluid as fluid
            x = fluid.data(name='x', shape=[2, 3], dtype='float32')
            mat2 = fluid.data(name='mat2', shape=[3, 2], dtype='float32')
            out = paddle.mm(x, mat2) # out shape is [2, 2]
    """
    if in_dygraph_mode():
W
WuHaobo 已提交
871
        out = _varbase_creator(dtype=input.dtype)
872 873
        core.ops.matmul(input, mat2, out)
        return out
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910

    def __check_input(x, y):
        var_names = {'x': x, 'y': y}
        for name, val in var_names.items():
            check_variable_and_dtype(val, name,
                                     ['float16', 'float32', 'float64'], 'mm')
        x_shape = list(x.shape)
        y_shape = list(y.shape)
        if len(x_shape) == 1:
            x_shape = [1] + x_shape
        if len(y_shape) == 1:
            y_shape = y_shape + [1]

        # check the inner 2 dimensions
        if x_shape[-1] != y_shape[-2]:
            if not ((x_shape[-1] == -1) or (y_shape[-2] == -1)):
                raise ValueError(
                    "After performing an optional transpose, Input X's width should be "
                    "equal to Y's width for multiplication "
                    "prerequisites. But received X's shape: %s, Y's shape: %s\n"
                    % (x_shape, y_shape))

        if len(y_shape) > 2 and len(x_shape) > 2:
            for i, dim_x in enumerate(x_shape[:-2]):
                # don't check neg shape
                if dim_x < 0 or y_shape[i] < 0:
                    continue
                if dim_x != y_shape[i]:
                    raise ValueError(
                        "When the matrix is larger than 2 dimensions, the higher "
                        "dimensional values of the two matrices need to be equal. "
                        "But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
                        "Y's shape: %s.\n" % (i, i, x_shape, y_shape))

    __check_input(input, mat2)

    helper = LayerHelper('mm', **locals())
W
WuHaobo 已提交
911
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
912 913 914 915
    helper.append_op(
        type='matmul', inputs={'X': input,
                               'Y': mat2}, outputs={'Out': out})
    return out
916

917

Y
yaoxuefeng 已提交
918
def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
919
    """
920 921
	:alias_main: paddle.addmm
	:alias: paddle.addmm,paddle.tensor.addmm,paddle.tensor.math.addmm
S
swtkiwi 已提交
922

923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
    **addmm**

    This operator is used to perform matrix multiplication for input $x$ and $y$.
    $input$ is added to the final result.
    The equation is:

    ..  math::
        Out = alpha * x * y + beta * input

    $Input$, $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $input$.

    Args:
        input (Variable): The input Tensor/LoDTensor to be added to the final result.
        x (Variable): The first input Tensor/LoDTensor for matrix multiplication.
        y (Variable): The second input Tensor/LoDTensor for matrix multiplication.
        beta (float): Coefficient of $input$.
Y
yaoxuefeng 已提交
939
        alpha (float): Coefficient of $x*y$.
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
        name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.

    Returns:
        Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of addmm op.

    Examples:
        ..  code-block:: python

            import numpy as np
            import paddle

            data_x = np.ones((2, 2)).astype(np.float32)
            data_y = np.ones((2, 2)).astype(np.float32)
            data_input = np.ones((2, 2)).astype(np.float32)

Y
yaoxuefeng 已提交
955 956 957 958 959 960 961 962 963
            paddle.enable_imperative()

            x = paddle.imperative.to_variable(data_x)
            y = paddle.imperative.to_variable(data_y)
            input = paddle.imperative.to_variable(data_input)

            out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )

            print( out.numpy() )
964 965 966
            # [[10.5 10.5]
            # [10.5 10.5]]
    """
Y
yaoxuefeng 已提交
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
    input_shape = input.shape
    x_shape = x.shape
    y_shape = y.shape
    if not len(input_shape) == len(x_shape) == len(y_shape) == 2:
        raise ValueError("The dimention of input, x, y should be 2 but receive input's shape: {}, x's shape: {}, y's shape: {}".format(input_shape, x_shape, y_shape))
    if input_shape[0] != x_shape[0]:
        if input_shape[0] != 1:
            raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
        if input_shape[1] != y_shape[1] and input_shape[1] != 1:
            raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
    if input_shape[1] != y_shape[1]:
        if input_shape[1] != 1:
            raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
        if input_shape[0] != x_shape[0] and input_shape[0] != 1:
            raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
    if x_shape[1] != y_shape[0]:
        raise ValueError("The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(x_shape, y_shape))



987 988 989 990
    if in_dygraph_mode():
        out = core.ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
        return out

991 992 993 994
    inputs = {'Input': input, "X": x, "Y": y}
    attrs = {'Alpha': alpha, 'Beta': beta}

    helper = LayerHelper("addmm", **locals())
Y
yaoxuefeng 已提交
995
    check_variable_and_dtype(input, 'Input', ['float32', 'float64'], 'addmm')
996 997 998 999 1000 1001 1002
    check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
    check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    helper.append_op(
        type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out})
    return out
1003 1004


W
WuHaobo 已提交
1005
def logsumexp(x, dim=None, keepdim=False, name=None):
1006
    """
1007 1008
	:alias_main: paddle.logsumexp
	:alias: paddle.logsumexp,paddle.tensor.logsumexp,paddle.tensor.math.logsumexp
S
swtkiwi 已提交
1009

1010
    This operator calculates the log of the sum of exponentials of the input Tensor.
1011

1012 1013
    .. math::
       logsumexp(x) = \log\sum exp(x)
1014 1015


1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
    Parameters:
       x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
       dim (list|int, optional): The dimensions along which the sum is performed. If :attr:`None`,
         sum all elements of :attr:`input` and return a Tensor variable with a single element,
         otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
         the dimension to reduce is :math:`rank + dim[i]`.
       keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor.
         The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim`
         is true, default value is False.
       name (str, optional): The default value is None.  Normally there is no need for user to
         set this property.  For more information, please refer to :ref:`api_guide_Name`
1027

1028 1029
    Returns:
       Variable: The calcuated result Tensor/LoDTensor.
1030

1031
    Examples:
1032

1033
    .. code-block:: python
1034

1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
        import paddle
        import paddle.fluid as fluid
        import numpy as np

        with fluid.dygraph.guard():
          np_x = np.random.uniform(0.1, 1, [10]).astype(np.float32)
          x = fluid.dygraph.to_variable(np_x)
          print(paddle.logsumexp(x).numpy())

    ..  code-block:: python
1045

1046 1047 1048 1049 1050 1051 1052 1053 1054
        import paddle
        import paddle.fluid as fluid
        import numpy as np

        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32)
            x = fluid.dygraph.to_variable(np_x)
            print(paddle.logsumexp(x, dim=1).numpy())
            print(paddle.logsumexp(x, dim=[0, 2]).numpy())
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066

    """
    op_type = 'logsumexp'
    assert x is not None, 'x cannot be None in {}'.format(op_type)

    # reduce_sum does not support float16
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], op_type)

    exp_out = layers.exp(x)
    sum_out = layers.reduce_sum(exp_out, dim, keepdim)

    return layers.log(sum_out, name)
1067 1068


W
WuHaobo 已提交
1069
def inverse(input, name=None):
1070
    """
1071 1072
	:alias_main: paddle.inverse
	:alias: paddle.inverse,paddle.tensor.inverse,paddle.tensor.math.inverse
S
swtkiwi 已提交
1073

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
    Takes the inverse of the square matrix. A square matrix is a matrix with
    the same number of rows and columns. The input can be a square matrix
    (2-D Tensor) or batches of square matrices.

    Args:
        input (Variable): The input Variable which holds a Tensor. The last two
            dimensions should be equal. When the number of dimensions is
            greater than 2, it is treated as batches of square matrix. The data
            type can be float32 and float64.
        name (str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information,
            please refer to :ref:`api_guide_Name`

    Returns:
        Variable: A Tensor holds the inverse of input. The shape and data type
            is the same as input.

    Examples:
        .. code-block:: python

            import numpy as np
            import paddle
            import paddle.fluid as fluid

            mat_np = np.array([[2, 0], [0, 2]]).astype("float32")

            # example for static graph
            input = fluid.data("input", shape=[2, 2], dtype="float32")
            out = paddle.inverse(input)
1103

1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            results = exe.run(feed={"input": mat_np },
                              fetch_list=[out.name])
            print(results[0]) # [[0.5, 0], [0, 0.5]]

            # example for dynamic graph
            with fluid.dygraph.guard():
                mat = fluid.dygraph.to_variable(mat_np)
                inv = paddle.inverse(mat)
                print(inv) # [[0.5, 0], [0, 0.5]]
    """
    if in_dygraph_mode():
        return core.ops.inverse(input)

    def _check_input(input):
        check_variable_and_dtype(input, 'input',
                                 ['float32', 'float64'], 'inverse')
        if len(input.shape) < 2:
            raise ValueError(
                "The input of inverse is expected to be a Tensor whose number "
                "of dimensions is no less than 2. But reviced: %d, "
                "input's shape: %s." % (len(input.shape), input.shape))

    _check_input(input)

    helper = LayerHelper('inverse', **locals())
W
WuHaobo 已提交
1131
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
1132 1133 1134 1135 1136
    helper.append_op(
        type='inverse', inputs={'Input': [input] }, outputs={'Output': [out]})
    return out


W
WuHaobo 已提交
1137
def max(input, dim=None, keep_dim=False, name=None):
1138
    """
1139 1140
	:alias_main: paddle.max
	:alias: paddle.max,paddle.tensor.max,paddle.tensor.math.max
S
swtkiwi 已提交
1141

1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
    Computes the maximum of tensor elements over the given dimension.

    Args:
        input (Variable): The input variable which is a Tensor, the data type is float32,
            float64, int32, int64.
        dim (list|int, optional): The dimension along which the maximum is computed.
            If :attr:`None`, compute the maximum over all elements of
            :attr:`input` and return a Tensor variable with a single element,
            otherwise must be in the range :math:`[-rank(input), rank(input))`.
            If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
        keep_dim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result tensor will have one fewer dimension
            than the :attr:`input` unless :attr:`keep_dim` is true, default
            value is False.
1156
        name(str, optional): The default value is None.  Normally there is no need for
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`

    Returns:
        Variable: Tensor, results of maximum on the specified dim of input tensor,
        it's data type is the same as input's Tensor.

    Examples:
        .. code-block:: python
            import paddle
            import paddle.fluid as fluid
1167

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
            # Each example is followed by the corresponding output tensor.
            x = fluid.data(name='x', shape=[2, 4], dtype='float32')
            paddle.max(x)  # [0.9]
            paddle.max(x, dim=0)  # [0.2, 0.3, 0.6, 0.9]
            paddle.max(x, dim=-1)  # [0.9, 0.7]
            paddle.max(x, dim=1, keep_dim=True)  # [[0.9], [0.7]]
            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
            #      [[[1.0, 2.0], [3.0, 4.0]],
            #      [[5.0, 6.0], [7.0, 8.0]]]
            # Each example is followed by the corresponding output tensor.
            y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
            paddle.max(y, dim=[1, 2]) # [4.0, 8.0]
            paddle.max(y, dim=[0, 1]) # [7.0, 8.0]
    """

    helper = LayerHelper('max', **locals())
W
WuHaobo 已提交
1187
    out = helper.create_variable_for_type_inference(
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
            dtype=helper.input_dtype())
    if dim is not None and not isinstance(dim, list):
        dim = [dim]

    check_variable_and_dtype(
        input, 'input', ['float32', 'float64', 'int32', 'int64'], 'max')

    reduce_all = True if dim == None or dim == [] else False
    dim = dim if dim != None and dim != [] else [0]

    if in_dygraph_mode():
        return core.ops.reduce_max(input, 'dim', dim, 'keep_dim', keep_dim,
                                   'reduce_all', reduce_all)
    helper.append_op(
        type='reduce_max',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={
            'dim': dim,
            'keep_dim': keep_dim,
            'reduce_all': reduce_all
        })
    return out


W
WuHaobo 已提交
1213
def min(input, dim=None, keep_dim=False, name=None):
1214
    """
1215 1216
	:alias_main: paddle.min
	:alias: paddle.min,paddle.tensor.min,paddle.tensor.math.min
S
swtkiwi 已提交
1217

1218
    Computes the minimum of tensor elements over the given dimension.
1219

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
    Args:
        input (Variable): The input variable which is a Tensor, the data type is float32,
            float64, int32, int64.
        dim (list|int, optional): The dimensions along which the minimum is computed.
            If :attr:`None`, compute the minimum over all elements of
            :attr:`input` and return a Tensor variable with a single element,
            otherwise must be in the range :math:`[-rank(input), rank(input))`.
            If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
        keep_dim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result tensor will have one fewer dimension
            than the :attr:`input` unless :attr:`keep_dim` is true, default
            value is False.
W
WuHaobo 已提交
1232
        name(str, optional): The default value is None.  Normally there is no need for 
1233
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
1234

1235 1236 1237
    Returns:
        Variable: Tensor, result of minimum on the specified dim of input tensor,
        it's data type is the same as input's Tensor.
1238

1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
    Examples:
        .. code-block:: python
            import paddle
            import paddle.fluid as fluid
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
            # Each example is followed by the corresponding output tensor.
            x = fluid.data(name='x', shape=[2, 4], dtype='float32')
            paddle.min(x)  # [0.1]
            paddle.min(x, dim=0)  # [0.1, 0.2, 0.5, 0.7]
            paddle.min(x, dim=-1)  # [0.2, 0.1]
            paddle.min(x, dim=1, keep_dim=True)  # [[0.2], [0.1]]
            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
            #      [[[1.0, 2.0], [3.0, 4.0]],
            #      [[5.0, 6.0], [7.0, 8.0]]]
            # Each example is followed by the corresponding output tensor.
            y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
            paddle.min(y, dim=[1, 2]) # [1.0, 5.0]
            paddle.min(y, dim=[0, 1]) # [1.0, 2.0]
    """

    helper = LayerHelper('min', **locals())
W
WuHaobo 已提交
1262 1263
    out = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype())
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
    if dim is not None and not isinstance(dim, list):
        dim = [dim]

    check_variable_and_dtype(
        input, 'input', ['float32', 'float64', 'int32', 'int64'], 'max')

    reduce_all = True if dim == None or dim == [] else False
    dim = dim if dim != None and dim != [] else [0]

    if in_dygraph_mode():
        return core.ops.reduce_min(input, 'dim', dim, 'keep_dim', keep_dim,
                                   'reduce_all', reduce_all)
    helper.append_op(
        type='reduce_min',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={
            'dim': dim,
            'keep_dim': keep_dim,
            'reduce_all': reduce_all
        })
    return out


W
WuHaobo 已提交
1288
def log1p(x, name=None):
1289
    """
1290 1291
	:alias_main: paddle.log1p
	:alias: paddle.log1p,paddle.tensor.log1p,paddle.tensor.math.log1p
S
swtkiwi 已提交
1292

1293 1294 1295 1296 1297 1298 1299 1300 1301
    Calculates the natural log of the given input tensor, element-wise.
    .. math::
        Out = \\ln(x+1)
    Args:
        x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
        name(str, optional): The default value is None.  Normally there is no need for 
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
    Returns:
        Variable: The natural log of the input LoDTensor or Tensor computed element-wise.
1302

1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
    Examples:
        .. code-block:: python
            import paddle
            import paddle.fluid as fluid
            import numpy as np
            # Graph Organizing
            x = fluid.data(name="x", shape=[2,1], dtype="float32")
            res = paddle.log1p(x)
            # Create an executor using CPU as an example
            exe = fluid.Executor(fluid.CPUPlace())
            # Execute
            x_i = np.array([[0], [1]]).astype(np.float32)
            res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
            print(res_val) # [[0.], [0.6931472]]
    """

    if in_dygraph_mode():
        return core.ops.log1p(x)

    check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p")
    inputs = {'X': [x]}
    helper = LayerHelper('log1p', **locals())
    dtype = helper.input_dtype(input_param_name='x')
W
WuHaobo 已提交
1326
    out = helper.create_variable_for_type_inference(dtype)
1327 1328
    helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
    return out
B
Bai Yifan 已提交
1329

W
WuHaobo 已提交
1330

W
WuHaobo 已提交
1331
def addcmul(input, tensor1, tensor2, value=1.0, name=None):
B
Bai Yifan 已提交
1332
    """
1333 1334
	:alias_main: paddle.addcmul
	:alias: paddle.addcmul,paddle.tensor.addcmul,paddle.tensor.math.addcmul
S
swtkiwi 已提交
1335

B
Bai Yifan 已提交
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
    Calculate the element-wise multiplication of tensor1 and tensor2,
    then multiply the result by value, and add it to input. The shape of input,
    tensor1, tensor2 should be broadcastable.
    The equation is:
    ..  math::
        out = input + value * tensor1 * tensor2
    Args:
        input(Variable): The input to be added. A Tensor with type float32, float64, int32, int64.
        tensor1(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
        tensor2(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
        value(int|float): The multiplier for tensor1*tensor2. For float32 and float64 type input, value must be float, otherwise an integer.
        name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
    Returns:
        out(Variable): The output result. A Tensor with the same data type as input's.
    Examples:
        .. code-block:: python
          import paddle
          import paddle.fluid as fluid
          input = fluid.data(name='input', dtype='float32', shape=[3, 4])
          tensor1 = fluid.data(name='tenosr1', dtype='float32', shape=[1, 4])
          tensor2 = fluid.data(name='tensor2', dtype='float32', shape=[3, 4])
          data = paddle.addcmul(input, tensor1, tensor2, value=1.0)
    """

    check_variable_and_dtype(input, 'input', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
    check_variable_and_dtype(tensor1, 'tensor1', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
    check_variable_and_dtype(tensor2, 'tensor2', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
    if convert_dtype(input.dtype) in ['float32', 'float64']:
        check_type(value, 'value', float, 'addcmul')
    if convert_dtype(input.dtype) in ['int32', 'int64']:
        check_type(value, 'value', int, 'addcmul')

W
WuHaobo 已提交
1369
    out = layers.elementwise_add(input, layers.elementwise_mul(tensor1, tensor2) * value)
B
Bai Yifan 已提交
1370
    return out
1371 1372


W
WuHaobo 已提交
1373
def clamp(input, min=None, max=None, name=None):
1374
    """
1375 1376
	:alias_main: paddle.clamp
	:alias: paddle.clamp,paddle.tensor.clamp,paddle.tensor.math.clamp
S
swtkiwi 已提交
1377

1378 1379 1380 1381 1382 1383 1384
    **clampe layer**

    This operator clamps all elements in input into the range [ min, max ] and return
    a resulting tensor as the following equation:

    .. math::

1385
        Out = MIN(MAX(x, min), max)
1386 1387

    Args:
1388 1389
        input (Variable): An input N-D Tensor or LoDTensor
            with data type float32, float64.
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
        min (float32|Variable): The lower bound with type ``float32`` or a ``Tensor``
            with shape [1] and type ``int32``, ``float32``, ``float64``.
        max (float32|Variable): The upper bound with type ``float32`` or a ``Tensor``
            with shape [1] and type ``int32``, ``float32``, ``float64``.
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns:
        Variable: A Tensor or LodTensor with the same data type and data shape as input's.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.fluid as fluid
            import numpy as np

            in1 = np.array([[1.2,3.5],
                            [4.5,6.4]]).astype('float32')
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                out1 = paddle.tensor.clamp(x1, min=3.5, max=5.0)
                out2 = paddle.tensor.clamp(x1, min=2.5)
                print(out1.numpy())
                # [[3.5, 3.5]
                # [4.5, 5.0]]
                print(out2.numpy())
                # [[2.5, 3.5]
                # [[4.5, 6.4]
    """

    assert min is not None or max is not None, "either min or max should be defined."

W
WuHaobo 已提交
1424 1425 1426 1427 1428
    if in_dygraph_mode():
        min = sys.float_info.min if min is None else min
        max = sys.float_info.max if max is None else max
        return core.ops.clip(input, "min", min, "max", max)

1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
    if min is not None:
        check_type(min, 'min', (float, Variable), 'clamp')
        if isinstance(min, Variable):
            check_dtype(min.dtype, 'min', ['float32', 'float64', 'int32'],
                        'clamp', '(When the type of min in clamp is Variable.)')
    if max is not None:
        check_type(max, 'max', (float, Variable), 'clamp')
        if isinstance(max, Variable):
            check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'],
                        'clamp', '(When the type of max in clamp is Variable.)')

    inputs = {'X': input}
    attrs = {'min': sys.float_info.min, 'max': sys.float_info.max}

    if isinstance(min, Variable):
        min.stop_gradient = True
        inputs['Min'] = min
    elif min is not None:
        attrs['min'] = min

    if isinstance(max, Variable):
        max.stop_gradient = True
        inputs['Max'] = max
    elif max is not None:
        attrs['max'] = max

    helper = LayerHelper('clamp', **locals())
W
WuHaobo 已提交
1456
    output = helper.create_variable_for_type_inference(
1457 1458 1459 1460 1461
            dtype=helper.input_dtype())
    helper.append_op(
        type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs)

    return output
F
Feiyu Chan 已提交
1462

W
WuHaobo 已提交
1463

1464
def trace(x, offset=0, axis1=0, axis2=1, name=None):
L
Li Fuchen 已提交
1465
    """
1466 1467
	:alias_main: paddle.trace
	:alias: paddle.trace,paddle.tensor.trace,paddle.tensor.math.trace
S
swtkiwi 已提交
1468

1469
    This OP computes the sum along diagonals of the input tensor x.
1470 1471

    If ``x`` is 2D, returns the sum of diagonal.
L
Li Fuchen 已提交
1472

1473
    If ``x`` has larger dimensions, then returns an tensor of diagonals sum, diagonals be taken from
1474
    the 2D planes specified by axis1 and axis2. By default, the 2D planes formed by the first and second axes
1475
    of the input tensor x.
L
Li Fuchen 已提交
1476

1477
    The argument ``offset`` determines where diagonals are taken from input tensor x:
L
Li Fuchen 已提交
1478 1479 1480 1481

    - If offset = 0, it is the main diagonal.
    - If offset > 0, it is above the main diagonal.
    - If offset < 0, it is below the main diagonal.
1482

L
Li Fuchen 已提交
1483
    Args:
1484 1485 1486 1487
        x(Variable): The input tensor x. Must be at least 2-dimensional. The input data type should be float32, float64, int32, int64.
        offset(int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).
        axis1(int, optional): The first axis with respect to take diagonal. Default: 0.
        axis2(int, optional): The second axis with respect to take diagonal. Default: 1.
L
Li Fuchen 已提交
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
        name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.

    Returns:
        Variable: the output data type is the same as input data type.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np
1498

L
Li Fuchen 已提交
1499 1500 1501
            case1 = np.random.randn(2, 3).astype('float32')
            case2 = np.random.randn(3, 10, 10).astype('float32')
            case3 = np.random.randn(3, 10, 5, 10).astype('float32')
1502

1503 1504 1505 1506 1507 1508 1509 1510
            paddle.enable_imperative()

            case1 = paddle.imperative.to_variable(case1)
            case2 = paddle.imperative.to_variable(case2)
            case3 = paddle.imperative.to_variable(case3)
            data1 = paddle.trace(case1) # data1.shape = [1]
            data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]
            data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
L
Li Fuchen 已提交
1511
    """
1512 1513
    inputs = {'Input': [x]}
    attrs = {'offset': offset, 'axis1': axis1, 'axis2': axis2}
L
Li Fuchen 已提交
1514 1515

    def __check_input(input, offset, dim1, dim2):
1516
        check_dtype(x.dtype, 'Input',
L
Li Fuchen 已提交
1517 1518 1519
                    ['int32', 'int64', 'float16', 'float32', 'float64'],
                    'trace')

1520
        input_shape = list(x.shape)
L
Li Fuchen 已提交
1521
        assert len(input_shape) >= 2,                     \
1522 1523
                "The x must be at least 2-dimensional, "   \
                "But received Input x's dimensional: %s.\n" %  \
L
Li Fuchen 已提交
1524 1525
                len(input_shape)

1526 1527
        axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
        axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
L
Li Fuchen 已提交
1528

1529 1530 1531
        assert axis1_ < len(input_shape),     \
            "The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n"  \
            % (-(len(input_shape)), len(input_shape) - 1, axis1)
L
Li Fuchen 已提交
1532

1533 1534 1535
        assert axis2_ < len(input_shape),   \
            "The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n"   \
            % (-(len(input_shape)), len(input_shape) - 1, axis2)
L
Li Fuchen 已提交
1536 1537


1538 1539 1540
        assert  axis1_ != axis2_,   \
               "axis1 and axis2 cannot be the same axis." \
                "But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
L
Li Fuchen 已提交
1541 1542

    if not in_dygraph_mode():
1543
        __check_input(input, offset, axis1, axis2)
L
Li Fuchen 已提交
1544 1545
    helper = LayerHelper('trace', **locals())

1546
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
L
Li Fuchen 已提交
1547 1548 1549

    helper.append_op(
        type='trace',
1550
        inputs={'Input': [x]},
L
Li Fuchen 已提交
1551
        attrs={'offset': offset,
1552 1553
               'axis1': axis1,
               'axis2': axis2},
L
Li Fuchen 已提交
1554 1555 1556
        outputs={'Out': [out]})
    return out

F
Feiyu Chan 已提交
1557
@templatedoc(op_type="kron")
W
WuHaobo 已提交
1558
def kron(x, y, name=None):
S
swtkiwi 已提交
1559
    """
1560 1561
	:alias_main: paddle.kron
	:alias: paddle.kron,paddle.tensor.kron,paddle.tensor.math.kron
S
swtkiwi 已提交
1562 1563

${comment}
F
Feiyu Chan 已提交
1564 1565

    Args:
1566
        x (Variable): the fist operand of kron op, data type: float16, float32,
F
Feiyu Chan 已提交
1567
            float64, int32 or int64.
1568 1569
        y (Variable): the second operand of kron op, data type: float16,
            float32, float64, int32 or int64. Its data type should be the same
F
Feiyu Chan 已提交
1570
            with x.
1571 1572
        name(str, optional): The default value is None.  Normally there is no
            need for user to set this property.  For more information, please
F
Feiyu Chan 已提交
1573 1574 1575 1576 1577 1578 1579
            refer to :ref:`api_guide_Name`.

    Returns:
        Variable: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x.

    Examples:
        .. code-block:: python
1580

F
Feiyu Chan 已提交
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
          import paddle
          from paddle import fluid
          import paddle.fluid.dygraph as dg
          import numpy as np

          a = np.arange(1, 5).reshape(2, 2).astype(np.float32)
          b = np.arange(1, 10).reshape(3, 3).astype(np.float32)

          place = fluid.CPUPlace()
          with dg.guard(place):
              a_var = dg.to_variable(a)
              b_var = dg.to_variable(b)
              c_var = paddle.kron(a_var, b_var)
              c_np = c_var.numpy()
          print(c_np)

          #[[ 1.  2.  3.  2.  4.  6.]
          # [ 4.  5.  6.  8. 10. 12.]
          # [ 7.  8.  9. 14. 16. 18.]
          # [ 3.  6.  9.  4.  8. 12.]
          # [12. 15. 18. 16. 20. 24.]
          # [21. 24. 27. 28. 32. 36.]]
    """
    if in_dygraph_mode():
        return core.ops.kron(x, y)

    helper = LayerHelper('kron', **locals())
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
    check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')

W
WuHaobo 已提交
1611
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
F
Feiyu Chan 已提交
1612 1613
    helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out})
    return out