math.py 64.6 KB
Newer Older
W
WuHaobo 已提交
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15 16
"""
math functions
"""
17
from __future__ import print_function
18

19
from paddle.common_ops_import import *
20
from ..fluid import layers
L
Li Fuchen 已提交
21 22 23
from ..fluid.framework import core, _varbase_creator, in_dygraph_mode, Variable
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
24
from ..fluid.layers.layer_function_generator import _generate_doc_string_, generate_activation_fn, generate_layer_fn
25
import sys
26 27 28

# TODO: define math functions
# yapf: disable
29 30 31 32 33
from ..fluid.layers import abs    #DEFINE_ALIAS
from ..fluid.layers import acos    #DEFINE_ALIAS
from ..fluid.layers import asin    #DEFINE_ALIAS
from ..fluid.layers import ceil    #DEFINE_ALIAS
from ..fluid.layers import cos    #DEFINE_ALIAS
34 35
from ..fluid.layers import sinh    #DEFINE_ALIAS
from ..fluid.layers import cosh    #DEFINE_ALIAS
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
from ..fluid.layers import elementwise_add    #DEFINE_ALIAS
from ..fluid.layers import elementwise_div    #DEFINE_ALIAS
from ..fluid.layers import elementwise_floordiv    #DEFINE_ALIAS
from ..fluid.layers import elementwise_mod    #DEFINE_ALIAS
from ..fluid.layers import elementwise_mul    #DEFINE_ALIAS
from ..fluid.layers import elementwise_pow    #DEFINE_ALIAS
from ..fluid.layers import elementwise_sub    #DEFINE_ALIAS
from ..fluid.layers import exp    #DEFINE_ALIAS
from ..fluid.layers import floor    #DEFINE_ALIAS
from ..fluid.layers import log    #DEFINE_ALIAS
from ..fluid.layers import reciprocal    #DEFINE_ALIAS
from ..fluid.layers import reduce_max    #DEFINE_ALIAS
from ..fluid.layers import reduce_min    #DEFINE_ALIAS
from ..fluid.layers import reduce_prod    #DEFINE_ALIAS
from ..fluid.layers import reduce_sum    #DEFINE_ALIAS
from ..fluid.layers import round    #DEFINE_ALIAS
from ..fluid.layers import rsqrt    #DEFINE_ALIAS
from ..fluid.layers import scale    #DEFINE_ALIAS
from ..fluid.layers import square    #DEFINE_ALIAS
from ..fluid.layers import stanh    #DEFINE_ALIAS
from ..fluid.layers import atan    #DEFINE_ALIAS
from ..fluid.layers import erf    #DEFINE_ALIAS
58 59
from ..fluid.layers import sqrt    #DEFINE_ALIAS
from ..fluid.layers import sin    #DEFINE_ALIAS
60

61 62 63
from ..fluid.layers import increment    #DEFINE_ALIAS
from ..fluid.layers import multiplex    #DEFINE_ALIAS
from ..fluid.layers import sums    #DEFINE_ALIAS
G
guofei 已提交
64
from ..fluid import layers
65

66
__all__ = [
67 68 69 70 71 72
        'abs',
        'acos',
        'asin',
        'atan',
        'ceil',
        'cos',
73
        'cosh',
74 75 76 77 78 79 80 81 82
        'cumsum',
        'elementwise_add',
        'elementwise_div',
        'elementwise_floordiv',
        'elementwise_mod',
        'elementwise_pow',
        'elementwise_sub',
        'exp',
        'floor',
83
        'increment',
84 85
        'log',
        'mul',
86
        'multiplex',
G
guofei 已提交
87
        'prod',
88 89 90 91 92 93 94 95 96 97 98
        'pow',
        'reciprocal',
        'reduce_max',
        'reduce_min',
        'reduce_prod',
        'reduce_sum',
        'round',
        'rsqrt',
        'scale',
        'sign',
        'sin',
99
        'sinh',
100 101 102 103
        'sqrt',
        'square',
        'stanh',
        'sum',
104
        'sums',
105 106 107
        'tanh',
        'elementwise_sum',
        'max',
108
        'maximum',
109
        'min',
110
        'minimum',
111
        'mm',
112 113 114 115 116
        'divide',
        'floor_divide',
        'remainder',
        'mod',
        'floor_mod',
117
        'multiply',
118 119 120
        'add',
        'atan',
        'logsumexp',
121
        'inverse',
122 123 124 125
        'log1p',
        'erf',
        'addcmul',
        'addmm',
Y
Yang Zhang 已提交
126
        'clip',
L
Li Fuchen 已提交
127
        'trace',
128
        'kron'
129 130 131
]
# yapf: enable.

132
@templatedoc()
W
WuHaobo 已提交
133
def pow(input, exponent, name=None):
134
    """
135 136
	:alias_main: paddle.pow
	:alias: paddle.pow,paddle.tensor.pow,paddle.tensor.math.pow
S
swtkiwi 已提交
137

138 139 140 141 142 143 144
    This is Pow Activation Operator.

    :math:`out = input^{exponent}`

    Args:
        input(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
        exponent(float32|Variable): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``.
145
        name(str, optional): The default value is None. Normally there is no need for user to set this property.
146 147 148 149 150 151 152 153 154 155
            For more information, please refer to :ref:`api_guide_Name` .

    Returns:
        Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``input``.

    Examples:

        .. code-block:: python

            import paddle
156
            import paddle.fluid as fluid
157

158
            x = fluid.data(name="x", shape=[32,32], dtype="float32")
159 160

            # example 1: argument exponent is float
W
WuHaobo 已提交
161
            y_1 = paddle.pow(x, 2.0)
162 163 164
            # y_1 is x^{2.0}

            # example 2: argument exponent is Variable
165
            exponent_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
W
WuHaobo 已提交
166
            y_2 = paddle.pow(x, exponent_tensor)
167 168
            # y_2 is x^{3.0}
    """
W
WuHaobo 已提交
169 170 171
    if in_dygraph_mode():
        return core.ops.pow(input, "exponent", exponent)

172 173 174 175 176 177 178 179 180
    helper = LayerHelper('pow', **locals())
    inputs = {'X': input}
    attrs = {}
    if isinstance(exponent, Variable):
        exponent.stop_gradient = True
        inputs['FactorTensor'] = exponent
    else:
        attrs['factor'] = exponent

W
WuHaobo 已提交
181 182 183 184 185
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    check_dtype(
        out.dtype, out.name,
        convert_dtype(input.dtype), 'pow',
        '(The out data type in pow must be the same with input data type.)')
186 187 188 189 190 191

    helper.append_op(
        type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
    return out


192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
@dygraph_only
def _elementwise_op_in_dygraph(x,
                               y,
                               axis=-1,
                               act=None,
                               use_mkldnn=False,
                               op_name=None):
    op = getattr(core.ops, op_name)
    out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)

    return dygraph_utils._append_activation_in_dygraph(
        out, act, use_mkldnn=use_mkldnn)


def _elementwise_op(helper):
    op_type = helper.layer_type
    original_op_type = helper.kwargs.get('original_op_type', op_type)
    x = helper.kwargs.get('x', None)
    y = helper.kwargs.get('y', None)

    assert x is not None, 'x cannot be None in {}'.format(original_op_type)
    assert y is not None, 'y cannot be None in {}'.format(original_op_type)
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
        original_op_type)
    check_variable_and_dtype(
        y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'],
        original_op_type)

    axis = helper.kwargs.get('axis', -1)
    use_mkldnn = helper.kwargs.get('use_mkldnn', False)
    name = helper.kwargs.get('name', None)
W
WuHaobo 已提交
224 225 226 227 228
    if name is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
    else:
        out = helper.create_variable(
            name=name, dtype=x.dtype, persistable=False)
229 230 231 232 233 234 235 236 237 238 239

    helper.append_op(
        type=op_type,
        inputs={'X': x,
                'Y': y},
        outputs={'Out': out},
        attrs={'axis': axis,
               'use_mkldnn': use_mkldnn})
    return helper.append_activation(out)


Y
Yang Zhang 已提交
240
def add(x, y, name=None):
241 242 243 244 245 246 247 248
    """
Examples:

    ..  code-block:: python

        import paddle
        import numpy as np

Y
Yang Zhang 已提交
249 250 251 252 253
        paddle.disable_static()
        np_x = np.array([2, 3, 4]).astype('float64')
        np_y = np.array([1, 5, 2]).astype('float64')
        x = paddle.to_variable(np_x)
        y = paddle.to_variable(np_y)
W
WuHaobo 已提交
254
        z = paddle.add(x, y)
Y
Yang Zhang 已提交
255 256
        np_z = z.numpy()
        print(np_z)  # [3., 8., 6. ]
257 258 259 260 261 262

    """
    op_type = 'elementwise_add'
    axis = -1
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
Y
Yang Zhang 已提交
263
            x, y, axis=axis, op_name=op_type)
264 265 266 267

    return _elementwise_op(LayerHelper(op_type, **locals()))


268
def divide(x, y, name=None):
269
    """
270
    Divide two tensors element-wise. The equation is:
271

272 273
    .. math::
        out = x / y
274

275 276
    **Note**:
    ``paddle.divide`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
277

278 279 280 281
    Args:
        x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
282

283 284
    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with $x$.
285

286
    Examples:
287

288
        ..  code-block:: python
289

290 291
            import paddle
            import numpy as np
292

293
            paddle.disable_static()
294

295 296 297 298 299 300
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.divide(x, y)
            print(z.numpy())  # [2., 0.6, 2.]
301

302 303 304 305 306 307 308
    """
    op_type = 'elementwise_div'
    axis = -1
    act = None
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name=op_type)
309

310
    return _elementwise_op(LayerHelper(op_type, **locals()))
311 312


313 314 315
def floor_divide(x, y, name=None):
    """
    Floor divide two tensors element-wise. The equation is:
316

317 318
    .. math::
        out = x // y
319

320 321
    **Note**:
    ``paddle.floor_divide`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
322

323 324 325 326
    Args:
        x (Tensor): the input tensor, it's data type should be int32, int64.
        y (Tensor): the input tensor, it's data type should be int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
327

328 329
    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with $x$.
330

331
    Examples:
332

333
        ..  code-block:: python
334

335 336
            import paddle
            import numpy as np
337

338
            paddle.disable_static()
339

340 341 342 343 344 345
            np_x = np.array([2, 3, 8, 7])
            np_y = np.array([1, 5, 3, 3])
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.floor_divide(x, y)
            print(z.numpy())  # [2, 0, 2, 2]
346

347 348 349 350 351 352
    """
    op_type = 'elementwise_floordiv'
    axis = -1
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, op_name=op_type)
353

354
    return _elementwise_op(LayerHelper(op_type, **locals()))
355 356


357
def remainder(x, y, name=None):
358
    """
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
    Mod two tensors element-wise. The equation is:

    .. math::
        out = x \% y

    **Note**:
    ``paddle.remainder`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .

    Args:
        x (Tensor): the input tensor, it's data type should be int32, int64.
        y (Tensor): the input tensor, it's data type should be int32, int64.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with $x$.

    Examples:

        ..  code-block:: python

            import paddle
            import numpy as np

            paddle.disable_static()

            np_x = np.array([2, 3, 8, 7])
            np_y = np.array([1, 5, 3, 3])
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.remainder(x, y)
            print(z.numpy())  # [0, 3, 2, 1]

    """
    op_type = 'elementwise_mod'
393 394 395
    axis = -1
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
396
            x, y, axis=axis, op_name=op_type)
397 398 399 400

    return _elementwise_op(LayerHelper(op_type, **locals()))


401 402 403 404
mod = remainder  #DEFINE_ALIAS
floor_mod = remainder  #DEFINE_ALIAS


405 406 407 408 409 410 411 412 413 414 415 416
def multiply(x, y, axis=-1, name=None):
    """
	:alias_main: paddle.multiply
	:alias: paddle.multiply,paddle.tensor.multiply,paddle.tensor.math.multiply

Examples:

    .. code-block:: python

        import paddle
        import numpy as np

417
        paddle.disable_static()
418 419
        x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
        y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
420 421
        x = paddle.to_variable(x_data)
        y = paddle.to_variable(y_data)
422 423 424 425 426
        res = paddle.multiply(x, y)
        print(res.numpy()) # [[5, 12], [21, 32]]

        x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
        y_data = np.array([1, 2], dtype=np.float32)
427 428
        x = paddle.to_variable(x_data)
        y = paddle.to_variable(y_data)
429 430 431 432 433 434 435 436 437 438 439 440
        res = paddle.multiply(x, y, axis=1)
        print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]]

    """
    op_type = 'elementwise_mul'
    act = None
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name=op_type)

    return _elementwise_op(LayerHelper(op_type, **locals()))

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
def maximum(x, y, axis=-1, name=None):
    """
Examples:

    .. code-block:: python

        import paddle
        import numpy as np

        paddle.disable_static()
  
        x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
        y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
        x = paddle.to_variable(x_data)
        y = paddle.to_variable(y_data)
        res = paddle.maximum(x, y)
        print(res.numpy())
        #[[5. 6.]
        # [7. 8.]]

        x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
        y_data = np.array([1, 2], dtype=np.float32)
        x = paddle.to_variable(x_data)
        y = paddle.to_variable(y_data)
        res = paddle.maximum(x, y, axis=1)
        print(res.numpy())
        #[[[1. 2. 3.]
        #  [2. 2. 3.]]]

        x_data = np.array([2, 3, 5], dtype=np.float32)
        y_data = np.array([1, 4, np.nan], dtype=np.float32)
        x = paddle.to_variable(x_data)
        y = paddle.to_variable(y_data)
        res = paddle.maximum(x, y)
        print(res.numpy())
        #[ 2.  4. nan]

        x_data = np.array([5, 3, np.inf], dtype=np.float32)
        y_data = np.array([1, 4, 5], dtype=np.float32)
        x = paddle.to_variable(x_data)
        y = paddle.to_variable(y_data)
        res = paddle.maximum(x, y)
        print(res.numpy())
        #[ 5.  4. inf]
    """
    op_type = 'elementwise_max'
    act = None
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name=op_type)
    return _elementwise_op(LayerHelper(op_type, **locals()))

def minimum(x, y, axis=-1, name=None):
    """
Examples:

    .. code-block:: python

        import paddle
        import numpy as np
        paddle.disable_static()
  
        x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
        y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
        x = paddle.to_variable(x_data)
        y = paddle.to_variable(y_data)
        res = paddle.minimum(x, y)
        print(res.numpy())
        #[[1. 2.]
        # [3. 4.]]

        x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
        y_data = np.array([1, 2], dtype=np.float32)
        x = paddle.to_variable(x_data)
        y = paddle.to_variable(y_data)
        res = paddle.minimum(x, y, axis=1)
        print(res.numpy())
        #[[[1. 1. 1.]
        #  [2. 2. 2.]]]

        x_data = np.array([2, 3, 5], dtype=np.float32)
        y_data = np.array([1, 4, np.nan], dtype=np.float32)
        x = paddle.to_variable(x_data)
        y = paddle.to_variable(y_data)
        res = paddle.minimum(x, y)
        print(res.numpy())
        #[ 1.  3. nan]

        x_data = np.array([5, 3, np.inf], dtype=np.float32)
        y_data = np.array([1, 4, 5], dtype=np.float32)
        x = paddle.to_variable(x_data)
        y = paddle.to_variable(y_data)
        res = paddle.minimum(x, y)
        print(res.numpy())
        #[1. 3. 5.]
    """
    op_type = 'elementwise_min'
    act = None
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name=op_type)
    return _elementwise_op(LayerHelper(op_type, **locals()))
543

544 545
for func in [
        add,
546 547 548
        maximum,
        minimum,
        multiply
549
]:
550
    proto_dict = {'add': 'elementwise_add', 'div': 'elementwise_div', 'maximum': 'elementwise_max', 'minimum': 'elementwise_min', 'multiply': 'elementwise_mul'}
551 552
    op_proto = OpProtoHolder.instance().get_op_proto(proto_dict[func.__name__])

Y
Yang Zhang 已提交
553 554 555 556 557 558 559
    additional_args_lines = [
        "name (string, optional): Name of the output. \
        Default is None. It's used to print debug info for developers. Details: \
        :ref:`api_guide_Name` "
    ]

    func.__doc__ = _generate_doc_string_(
560 561
        op_proto,
        additional_args_lines=additional_args_lines,
562
        skip_attrs_set={"x_data_format", "y_data_format", "axis",
563
            "use_quantizer", "mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out"
564
        }) + """\n""" + str(func.__doc__)
565

Y
Yang Zhang 已提交
566

567
def sum(x, axis=None, dtype=None, keepdim=False, name=None):
568 569 570 571
    """
    Computes the sum of tensor elements over the given dimension.

    Args:
572 573 574
        x (Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64.
        axis (int|list|tuple, optional): The dimensions along which the sum is performed. If
            :attr:`None`, sum all elements of :attr:`x` and return a
575
            Tensor variable with a single element, otherwise must be in the
576 577 578 579 580 581 582
            range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
            the dimension to reduce is :math:`rank + axis[i]`.
        dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype
            of output is the same as input Tensor `x`.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result Tensor will have one fewer dimension
            than the :attr:`x` unless :attr:`keepdim` is true, default
583
            value is False.
584
        name (str, optional): The default value is None. Normally there is no need for
585 586 587
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`

    Returns:
588 589
        Tensor: Results of summation operation on the specified axis of input Tensor `x`,
        it's data type is the same as `x`.
590 591

    Raises:
592 593
        ValueError: The :attr:`dtype` must be float64 or int64.
        TypeError: The type of :attr:`axis` must be int, list or tuple.
594

595 596 597
    Examples:
        .. code-block:: python

598
            import numpy as np
599
            import paddle
600 601
            paddle.disable_static()

602 603 604 605
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
            # Each example is followed by the corresponding output tensor.
606 607
            x_data = np.array([[0.2, 0.3, 0.5, 0.9],[0.1, 0.2, 0.6, 0.7]]).astype('float32')
            x = paddle.to_variable(x_data)
608
            out1 = paddle.sum(x)  # [3.5]
609 610 611
            out2 = paddle.sum(x, axis=0)  # [0.3, 0.5, 1.1, 1.6]
            out3 = paddle.sum(x, axis=-1)  # [1.9, 1.6]
            out4 = paddle.sum(x, axis=1, keepdim=True)  # [[1.9], [1.6]]
612 613 614 615 616

            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
            #      [[[1, 2], [3, 4]],
            #      [[5, 6], [7, 8]]]
            # Each example is followed by the corresponding output tensor.
617 618 619 620
            y_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype('float32')
            y = paddle.to_variable(y_data)
            out5 = paddle.sum(y, axis=[1, 2]) # [10, 26]
            out6 = paddle.sum(y, axis=[0, 1]) # [16, 20]
621
    """
622 623 624 625 626 627 628 629 630 631 632
    if axis is not None and not isinstance(axis, (list, tuple)):
        axis = [axis]

    if not axis:
        reduce_all_flag = True
    else:
        if len(axis) == len(x.shape):
            reduce_all_flag = True
        else:
            reduce_all_flag = False

633
    attrs = {
634 635 636
        'dim': axis if axis != None and axis != [] and axis != () else [0],
        'keep_dim': keepdim,
        'reduce_all': reduce_all_flag
637 638 639 640
    }
    dtype_flag = False
    if dtype is not None:
        if dtype in ['float64', 'int64']:
641 642
            if (convert_dtype(x.dtype) == "float32" and dtype == "float64") or \
               (convert_dtype(x.dtype) == "int32" and dtype == "int64"):
643
                attrs.update({
644
                    'in_dtype': x.dtype,
645 646 647 648 649 650 651 652 653
                    'out_dtype': convert_np_dtype_to_dtype_(dtype)
                })
                dtype_flag = True
        else:
            raise ValueError(
                "The value of 'dtype' in sum op must be float64, int64, but received of {}".
                format(dtype))

    if in_dygraph_mode():
654
        axis = axis if axis != None and axis != [] else [0]
655
        if dtype_flag:
656 657 658
            return core.ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
                                       'reduce_all', reduce_all_flag, 'in_dtype',
                                       x.dtype, 'out_dtype',
659 660
                                       convert_np_dtype_to_dtype_(dtype))
        else:
661 662
            return core.ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
                                       'reduce_all', reduce_all_flag)
663
    check_variable_and_dtype(
664 665 666
        x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sum')
    check_type(axis, 'axis', (int, list, tuple, type(None)), 'sum')

667 668 669 670 671
    helper = LayerHelper('sum', **locals())
    if dtype_flag:
        out = helper.create_variable_for_type_inference(
            dtype=convert_np_dtype_to_dtype_(dtype))
    else:
672
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
673 674
    helper.append_op(
        type='reduce_sum',
675
        inputs={'X': x},
676 677 678
        outputs={'Out': out},
        attrs=attrs)
    return out
679

680

681 682 683
@templatedoc(op_type="sum")
def elementwise_sum(inputs, name=None):
    """
684 685
	:alias_main: paddle.elementwise_sum
	:alias: paddle.elementwise_sum,paddle.tensor.elementwise_sum,paddle.tensor.math.elementwise_sum
S
swtkiwi 已提交
686

687
    ${comment}
688

689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
    Case 1:
    ::
        Input:
            Input. Shape = [2, 3]
            Input = [[1, 2, 3],
                     [4, 5, 6]]

        Output:
            The output. Shape = [2, 3]
            Output = [[1, 2, 3],
                      [4, 5, 6]]

    Case 2:
    ::
        Input:
            First input:
            Input1. Shape = [2, 3]
            Input1 = [[1, 2, 3],
                      [4, 5, 6]]

        The second input:
            Input2. Shape = [2, 3]
            Input2 = [[7, 8, 9],
                      [10, 11, 12]]

        Output:
            The output. Shape = [2, 3]
            Output = [[8, 10, 12],
                      [14, 16, 18]]

    Args:
720 721
        inputs (Variable|list(Variable)):  A Varaible list. The shape and data type of the list elementsshould be consistent.
            Variable can be multi-dimensional Tensoror LoDTensor, and data types can be: float32, float64, int32, int64.
722 723 724 725
        name(str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information, please refer to :ref:`api_guide_Name`

    Returns:
726
        Variable: the sum of input :math:`inputs` . its shape and data types are consistent with :math:`inputs` .
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751

    Examples:
        .. code-block:: python

            import paddle
            import paddle.fluid as fluid

            input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5)
            input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3)
            sum = paddle.elementwise_sum([input0, input1])

            # You can print out 'sum' via executor.
            out = fluid.layers.Print(sum, message="the sum of input0 and input1: ")
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_main_program())

            # The printed result is:
            # 1570701754	the sum of input0 and input1: 	The place is:CPUPlace
            # Tensor[elementwise_sum_0.tmp_0]
            #    shape: [2,3,]
            #    dtype: l
            #    data: 8,8,8,8,8,8,

            # the sum of input0 and input1 is 2-D Tensor with shape [2,3].
            # dtype is the corresponding C++ data type, which may vary in different environments.
752 753
            # Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
            #       so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
754 755 756 757
            #       and '__int64' on Windows. They both represent 64-bit integer variables.
    """

    helper = LayerHelper('elementwise_sum', **locals())
758 759 760 761 762 763 764 765 766 767 768
    check_type(inputs, 'inputs', (Variable, tuple, list), 'elementwise_sum')
    if isinstance(inputs, list) or isinstance(inputs, tuple):
        if len(inputs) > 0:
            for input in inputs:
                check_variable_and_dtype(input, "inputs", \
                   ['float32', 'float64', 'int32', 'int64'], 'elementwise_sum')
    else:
        check_variable_and_dtype(inputs, "inputs", \
                ['float32', 'float64', 'int32', 'int64'], 'elementwise_sum')


769 770 771 772 773 774 775 776 777 778 779
    out = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype('inputs'))
    helper.append_op(
        type='sum',
        inputs={'X': inputs},
        outputs={'Out': out},
        attrs={'use_mkldnn': False})

    return out


W
WuHaobo 已提交
780
def mm(input, mat2, name=None):
781
    """
782 783
	:alias_main: paddle.mm
	:alias: paddle.mm,paddle.tensor.mm,paddle.tensor.math.mm
S
swtkiwi 已提交
784

785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
    Applies matrix multiplication to two tensors.

    Currently, the input tensors' rank can be any, but when the rank of any
    inputs is bigger than 3, this two inputs' rank should be equal.


    Also note that if the raw tensor :math:`x` or :math:`mat2` is rank-1 and
    nontransposed, the prepended or appended dimension :math:`1` will be
    removed after matrix multiplication.

    Args:
        x (Variable): The input variable which is a Tensor or LoDTensor.
        mat2 (Variable): The input variable which is a Tensor or LoDTensor.
        name(str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information, please refer to :ref:`api_guide_Name`

    Returns:
        Variable: The product Tensor (or LoDTensor) variable.

    Examples:
        .. code-block:: python

            # Examples to clarify shapes of the inputs and output
            # x: [B, ..., M, K], mat2: [B, ..., K, N]
            # fluid.layers.matmul(x, mat2)  # out: [B, ..., M, N]

            # x: [B, M, K], mat2: [B, K, N]
            # fluid.layers.matmul(x, mat2)  # out: [B, M, N]

            # x: [B, M, K], mat2: [K, N]
            # fluid.layers.matmul(x, mat2)  # out: [B, M, N]

            # x: [M, K], mat2: [K, N]
            # fluid.layers.matmul(x, mat2)  # out: [M, N]

            # x: [B, M, K], mat2: [K]
            # fluid.layers.matmul(x, mat2)  # out: [B, M]

            # x: [K], mat2: [K]
            # fluid.layers.matmul(x, mat2)  # out: [1]

            import paddle
            import paddle.fluid as fluid
            x = fluid.data(name='x', shape=[2, 3], dtype='float32')
            mat2 = fluid.data(name='mat2', shape=[3, 2], dtype='float32')
            out = paddle.mm(x, mat2) # out shape is [2, 2]
    """
    if in_dygraph_mode():
W
WuHaobo 已提交
833
        out = _varbase_creator(dtype=input.dtype)
834 835
        core.ops.matmul(input, mat2, out)
        return out
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872

    def __check_input(x, y):
        var_names = {'x': x, 'y': y}
        for name, val in var_names.items():
            check_variable_and_dtype(val, name,
                                     ['float16', 'float32', 'float64'], 'mm')
        x_shape = list(x.shape)
        y_shape = list(y.shape)
        if len(x_shape) == 1:
            x_shape = [1] + x_shape
        if len(y_shape) == 1:
            y_shape = y_shape + [1]

        # check the inner 2 dimensions
        if x_shape[-1] != y_shape[-2]:
            if not ((x_shape[-1] == -1) or (y_shape[-2] == -1)):
                raise ValueError(
                    "After performing an optional transpose, Input X's width should be "
                    "equal to Y's width for multiplication "
                    "prerequisites. But received X's shape: %s, Y's shape: %s\n"
                    % (x_shape, y_shape))

        if len(y_shape) > 2 and len(x_shape) > 2:
            for i, dim_x in enumerate(x_shape[:-2]):
                # don't check neg shape
                if dim_x < 0 or y_shape[i] < 0:
                    continue
                if dim_x != y_shape[i]:
                    raise ValueError(
                        "When the matrix is larger than 2 dimensions, the higher "
                        "dimensional values of the two matrices need to be equal. "
                        "But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
                        "Y's shape: %s.\n" % (i, i, x_shape, y_shape))

    __check_input(input, mat2)

    helper = LayerHelper('mm', **locals())
W
WuHaobo 已提交
873
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
874 875 876 877
    helper.append_op(
        type='matmul', inputs={'X': input,
                               'Y': mat2}, outputs={'Out': out})
    return out
878

879

Y
yaoxuefeng 已提交
880
def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
881
    """
882 883
	:alias_main: paddle.addmm
	:alias: paddle.addmm,paddle.tensor.addmm,paddle.tensor.math.addmm
S
swtkiwi 已提交
884

885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
    **addmm**

    This operator is used to perform matrix multiplication for input $x$ and $y$.
    $input$ is added to the final result.
    The equation is:

    ..  math::
        Out = alpha * x * y + beta * input

    $Input$, $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $input$.

    Args:
        input (Variable): The input Tensor/LoDTensor to be added to the final result.
        x (Variable): The first input Tensor/LoDTensor for matrix multiplication.
        y (Variable): The second input Tensor/LoDTensor for matrix multiplication.
        beta (float): Coefficient of $input$.
Y
yaoxuefeng 已提交
901
        alpha (float): Coefficient of $x*y$.
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
        name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.

    Returns:
        Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of addmm op.

    Examples:
        ..  code-block:: python

            import numpy as np
            import paddle

            data_x = np.ones((2, 2)).astype(np.float32)
            data_y = np.ones((2, 2)).astype(np.float32)
            data_input = np.ones((2, 2)).astype(np.float32)

917
            paddle.disable_static()
Y
yaoxuefeng 已提交
918

919 920 921
            x = paddle.to_variable(data_x)
            y = paddle.to_variable(data_y)
            input = paddle.to_variable(data_input)
Y
yaoxuefeng 已提交
922 923 924 925

            out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )

            print( out.numpy() )
926 927 928
            # [[10.5 10.5]
            # [10.5 10.5]]
    """
Y
yaoxuefeng 已提交
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
    input_shape = input.shape
    x_shape = x.shape
    y_shape = y.shape
    if not len(input_shape) == len(x_shape) == len(y_shape) == 2:
        raise ValueError("The dimention of input, x, y should be 2 but receive input's shape: {}, x's shape: {}, y's shape: {}".format(input_shape, x_shape, y_shape))
    if input_shape[0] != x_shape[0]:
        if input_shape[0] != 1:
            raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
        if input_shape[1] != y_shape[1] and input_shape[1] != 1:
            raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
    if input_shape[1] != y_shape[1]:
        if input_shape[1] != 1:
            raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
        if input_shape[0] != x_shape[0] and input_shape[0] != 1:
            raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
    if x_shape[1] != y_shape[0]:
        raise ValueError("The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(x_shape, y_shape))



949 950 951 952
    if in_dygraph_mode():
        out = core.ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
        return out

953 954 955 956
    inputs = {'Input': input, "X": x, "Y": y}
    attrs = {'Alpha': alpha, 'Beta': beta}

    helper = LayerHelper("addmm", **locals())
Y
yaoxuefeng 已提交
957
    check_variable_and_dtype(input, 'Input', ['float32', 'float64'], 'addmm')
958 959 960 961 962 963 964
    check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
    check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    helper.append_op(
        type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out})
    return out
965 966


W
WuHaobo 已提交
967
def logsumexp(x, dim=None, keepdim=False, name=None):
968
    """
969 970
	:alias_main: paddle.logsumexp
	:alias: paddle.logsumexp,paddle.tensor.logsumexp,paddle.tensor.math.logsumexp
S
swtkiwi 已提交
971

972
    This operator calculates the log of the sum of exponentials of the input Tensor.
973

974 975
    .. math::
       logsumexp(x) = \log\sum exp(x)
976 977


978 979 980 981 982 983 984 985 986 987 988
    Parameters:
       x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
       dim (list|int, optional): The dimensions along which the sum is performed. If :attr:`None`,
         sum all elements of :attr:`input` and return a Tensor variable with a single element,
         otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
         the dimension to reduce is :math:`rank + dim[i]`.
       keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor.
         The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim`
         is true, default value is False.
       name (str, optional): The default value is None.  Normally there is no need for user to
         set this property.  For more information, please refer to :ref:`api_guide_Name`
989

990 991
    Returns:
       Variable: The calcuated result Tensor/LoDTensor.
992

993
    Examples:
994

995
    .. code-block:: python
996

997 998 999 1000 1001 1002 1003 1004 1005 1006
        import paddle
        import paddle.fluid as fluid
        import numpy as np

        with fluid.dygraph.guard():
          np_x = np.random.uniform(0.1, 1, [10]).astype(np.float32)
          x = fluid.dygraph.to_variable(np_x)
          print(paddle.logsumexp(x).numpy())

    ..  code-block:: python
1007

1008 1009 1010 1011 1012 1013 1014 1015 1016
        import paddle
        import paddle.fluid as fluid
        import numpy as np

        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32)
            x = fluid.dygraph.to_variable(np_x)
            print(paddle.logsumexp(x, dim=1).numpy())
            print(paddle.logsumexp(x, dim=[0, 2]).numpy())
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028

    """
    op_type = 'logsumexp'
    assert x is not None, 'x cannot be None in {}'.format(op_type)

    # reduce_sum does not support float16
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], op_type)

    exp_out = layers.exp(x)
    sum_out = layers.reduce_sum(exp_out, dim, keepdim)

    return layers.log(sum_out, name)
1029 1030


S
swtkiwi 已提交
1031

1032 1033
def inverse(x, name=None):
    """
1034 1035 1036 1037 1038
    Takes the inverse of the square matrix. A square matrix is a matrix with
    the same number of rows and columns. The input can be a square matrix
    (2-D Tensor) or batches of square matrices.

    Args:
1039
        x (Variable): The input tensor. The last two
1040 1041 1042 1043 1044 1045 1046 1047
            dimensions should be equal. When the number of dimensions is
            greater than 2, it is treated as batches of square matrix. The data
            type can be float32 and float64.
        name (str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information,
            please refer to :ref:`api_guide_Name`

    Returns:
1048 1049
        Variable: A Tensor holds the inverse of x. The shape and data type
                        is the same as x.
1050 1051 1052 1053 1054 1055 1056 1057

    Examples:
        .. code-block:: python

            import numpy as np
            import paddle

            mat_np = np.array([[2, 0], [0, 2]]).astype("float32")
1058 1059 1060 1061
            paddle.disable_static()
            mat = paddle.to_variable(mat_np)
            inv = paddle.inverse(mat)
            print(inv) # [[0.5, 0], [0, 0.5]]
1062 1063 1064

    """
    if in_dygraph_mode():
1065
        return core.ops.inverse(x)
1066

1067 1068
    def _check_input(x):
        check_variable_and_dtype(x, 'x',
1069
                                 ['float32', 'float64'], 'inverse')
1070
        if len(x.shape) < 2:
1071 1072 1073
            raise ValueError(
                "The input of inverse is expected to be a Tensor whose number "
                "of dimensions is no less than 2. But reviced: %d, "
1074 1075
                "x's shape: %s." % (len(x.shape), x.shape))
    _check_input(x)
1076
    helper = LayerHelper('inverse', **locals())
1077
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1078
    helper.append_op(
1079
        type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]})
1080 1081 1082
    return out


1083
def max(x, axis=None, keepdim=False, name=None):
1084
    """
S
swtkiwi 已提交
1085

1086
    Computes the maximum of tensor elements over the given axis.
1087 1088

    Args:
1089
        x(Tensor): A tensor, the data type is float32,
1090
            float64, int32, int64.
1091
        axis(list|int, optional): The axis along which the maximum is computed.
1092
            If :attr:`None`, compute the maximum over all elements of
1093
             `x` and return a Tensor variable with a single element,
1094 1095 1096
            otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`.
            If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
        keepdim(bool, optional): Whether to reserve the reduced dimension in the
1097
            output Tensor. The result tensor will have one fewer dimension
1098
            than the `x` unless :attr:`keepdim` is true, default
1099
            value is False.
1100
        name(str, optional): The default value is None.  Normally there is no need for
1101 1102 1103
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`

    Returns:
1104
        Tensor, results of maximum on the specified axis of input tensor,
1105
        it's data type is the same as `x`.
1106 1107 1108

    Examples:
        .. code-block:: python
1109 1110

            import numpy as np
1111
            import paddle
1112

1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
            paddle.disable_static()

            # data_x is a variable with shape [2, 4]
            # the axis is a int element
            data_x = np.array([[0.2, 0.3, 0.5, 0.9],
                               [0.1, 0.2, 0.6, 0.7]])
            x = paddle.to_variable(data_x)
            result1 = paddle.max(x)
            print(result1.numpy())
            #[0.9]
            result2 = paddle.max(x, axis=0)
            print(result2.numpy()) 
            #[0.2 0.3 0.6 0.9]
            result3 = paddle.max(x, axis=-1)
            print(result3.numpy())
            #[0.9 0.7]
            result4 = paddle.max(x, axis=1, keepdim=True)
            print(result4.numpy())
            #[[0.9]
            # [0.7]]

            # data_y is a variable with shape [2, 2, 2]
            # the axis is list 
            data_y = np.array([[[1.0, 2.0], [3.0, 4.0]],
                               [[5.0, 6.0], [7.0, 8.0]]])
            y = paddle.to_variable(data_y)
            result5 = paddle.max(y, axis=[1, 2])
            print(result5.numpy())
            #[4. 8.]
            result6 = paddle.max(y, axis=[0, 1])
            print(result6.numpy())
            #[7. 8.]
1145 1146
    """

1147
    if axis is not None and not isinstance(axis, list):
1148 1149 1150 1151 1152 1153 1154 1155
        if isinstance(axis, tuple):
            axis = list(axis)
        elif isinstance(axis, int):
            axis= [axis]
        else:
            raise TypeError(
                "The type of axis must be int, list or tuple, but received {}".format(type(axis)))

1156 1157 1158 1159 1160
    reduce_all = True if axis == None or axis == [] else False
    axis = axis if axis != None and axis != [] else [0]
    if in_dygraph_mode():
        return core.ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim,
                                   'reduce_all', reduce_all)
1161

1162
    helper = LayerHelper('max', **locals())
1163
    check_variable_and_dtype(
1164
        x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max')
1165

1166 1167
    out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
1168 1169
    helper.append_op(
        type='reduce_max',
1170
        inputs={'X': x},
1171 1172
        outputs={'Out': out},
        attrs={
1173 1174
            'dim': axis,
            'keep_dim': keepdim,
1175 1176 1177 1178
            'reduce_all': reduce_all
        })
    return out

1179
def min(x, axis=None, keepdim=False, name=None):
1180
    """
S
swtkiwi 已提交
1181

1182
    Computes the minimum of tensor elements over the given axis
1183

1184
    Args:
1185 1186
        x(Tensor): A tensor, the data type is float32, float64, int32, int64.
        axis(list|int, optional): The axis along which the minimum is computed.
1187
            If :attr:`None`, compute the minimum over all elements of
1188
            `x` and return a Tensor variable with a single element,
1189 1190 1191
            otherwise must be in the range :math:`[-x.ndim, x.ndim)`.
            If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
        keepdim(bool, optional): Whether to reserve the reduced dimension in the
1192
            output Tensor. The result tensor will have one fewer dimension
1193
            than the `x` unless :attr:`keepdim` is true, default
1194
            value is False.
W
WuHaobo 已提交
1195
        name(str, optional): The default value is None.  Normally there is no need for 
1196
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
1197

1198
    Returns:
1199
        Tensor, results of minimum on the specified axis of input tensor,
1200
        it's data type is the same as input's Tensor.
1201

1202 1203 1204
    Examples:
        .. code-block:: python

1205 1206
            import numpy as np
            import paddle
1207

1208
            paddle.disable_static()
1209

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
            # data_x is a variable with shape [2, 4]
            # the axis is a int element
            data_x = np.array([[0.2, 0.3, 0.5, 0.9],
                            [0.1, 0.2, 0.6, 0.7]])
            x = paddle.to_variable(data_x)
            result1 = paddle.min(x)
            print(result1.numpy())
            #[0.1]
            result2 = paddle.min(x, axis=0)
            print(result2.numpy())
            #[0.1 0.2 0.5 0.7]
            result3 = paddle.min(x, axis=-1)
            print(result3.numpy()) 
            #[0.2 0.1]
            result4 = paddle.min(x, axis=1, keepdim=True)
            print(result4.numpy())
            #[[0.2]
            # [0.1]]

            # data_y is a variable with shape [2, 2, 2]
            # the axis is list 
            data_y = np.array([[[1.0, 2.0], [3.0, 4.0]],
                               [[5.0, 6.0], [7.0, 8.0]]])
            y = paddle.to_variable(data_y)
            result5 = paddle.min(y, axis=[1, 2])
            print(result5.numpy()) 
            #[1. 5.]
            result6 = paddle.min(y, axis=[0, 1])
            print(result6.numpy())
            #[1. 2.]
    """
1241

1242
    if axis is not None and not isinstance(axis, list):
1243 1244 1245 1246 1247 1248 1249
        if isinstance(axis, tuple):
            axis = list(axis)
        elif isinstance(axis, int):
            axis= [axis]
        else:
            raise TypeError(
                "The type of axis must be int, list or tuple, but received {}".format(type(axis)))
1250 1251
    reduce_all = True if axis == None or axis == [] else False
    axis = axis if axis != None and axis != [] else [0]
1252
    if in_dygraph_mode():
1253
        return core.ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim,
1254
                                   'reduce_all', reduce_all)
1255 1256 1257 1258 1259 1260 1261

    helper = LayerHelper('min', **locals())
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min')

    out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
1262 1263
    helper.append_op(
        type='reduce_min',
1264
        inputs={'X': x},
1265 1266
        outputs={'Out': out},
        attrs={
1267 1268
            'dim': axis,
            'keep_dim': keepdim,
1269 1270 1271 1272 1273
            'reduce_all': reduce_all
        })
    return out


W
WuHaobo 已提交
1274
def log1p(x, name=None):
1275
    """
1276 1277
	:alias_main: paddle.log1p
	:alias: paddle.log1p,paddle.tensor.log1p,paddle.tensor.math.log1p
S
swtkiwi 已提交
1278

1279 1280 1281 1282 1283 1284 1285 1286 1287
    Calculates the natural log of the given input tensor, element-wise.
    .. math::
        Out = \\ln(x+1)
    Args:
        x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
        name(str, optional): The default value is None.  Normally there is no need for 
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
    Returns:
        Variable: The natural log of the input LoDTensor or Tensor computed element-wise.
1288

1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
    Examples:
        .. code-block:: python
            import paddle
            import paddle.fluid as fluid
            import numpy as np
            # Graph Organizing
            x = fluid.data(name="x", shape=[2,1], dtype="float32")
            res = paddle.log1p(x)
            # Create an executor using CPU as an example
            exe = fluid.Executor(fluid.CPUPlace())
            # Execute
            x_i = np.array([[0], [1]]).astype(np.float32)
            res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
            print(res_val) # [[0.], [0.6931472]]
    """

    if in_dygraph_mode():
        return core.ops.log1p(x)

    check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p")
    inputs = {'X': [x]}
    helper = LayerHelper('log1p', **locals())
    dtype = helper.input_dtype(input_param_name='x')
W
WuHaobo 已提交
1312
    out = helper.create_variable_for_type_inference(dtype)
1313 1314
    helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
    return out
B
Bai Yifan 已提交
1315

W
WuHaobo 已提交
1316

W
WuHaobo 已提交
1317
def addcmul(input, tensor1, tensor2, value=1.0, name=None):
B
Bai Yifan 已提交
1318
    """
1319 1320
	:alias_main: paddle.addcmul
	:alias: paddle.addcmul,paddle.tensor.addcmul,paddle.tensor.math.addcmul
S
swtkiwi 已提交
1321

B
Bai Yifan 已提交
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
    Calculate the element-wise multiplication of tensor1 and tensor2,
    then multiply the result by value, and add it to input. The shape of input,
    tensor1, tensor2 should be broadcastable.
    The equation is:
    ..  math::
        out = input + value * tensor1 * tensor2
    Args:
        input(Variable): The input to be added. A Tensor with type float32, float64, int32, int64.
        tensor1(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
        tensor2(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
        value(int|float): The multiplier for tensor1*tensor2. For float32 and float64 type input, value must be float, otherwise an integer.
        name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
    Returns:
        out(Variable): The output result. A Tensor with the same data type as input's.
    Examples:
        .. code-block:: python
          import paddle
          import paddle.fluid as fluid
          input = fluid.data(name='input', dtype='float32', shape=[3, 4])
          tensor1 = fluid.data(name='tenosr1', dtype='float32', shape=[1, 4])
          tensor2 = fluid.data(name='tensor2', dtype='float32', shape=[3, 4])
          data = paddle.addcmul(input, tensor1, tensor2, value=1.0)
    """

    check_variable_and_dtype(input, 'input', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
    check_variable_and_dtype(tensor1, 'tensor1', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
    check_variable_and_dtype(tensor2, 'tensor2', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
    if convert_dtype(input.dtype) in ['float32', 'float64']:
        check_type(value, 'value', float, 'addcmul')
    if convert_dtype(input.dtype) in ['int32', 'int64']:
        check_type(value, 'value', int, 'addcmul')

W
WuHaobo 已提交
1355
    out = layers.elementwise_add(input, layers.elementwise_mul(tensor1, tensor2) * value)
B
Bai Yifan 已提交
1356
    return out
1357 1358


Y
Yang Zhang 已提交
1359
def clip(x, min=None, max=None, name=None):
1360
    """
Y
Yang Zhang 已提交
1361 1362
        :alias_main: paddle.clip
        :alias: paddle.clip,paddle.tensor.clip,paddle.tensor.math.clip
S
swtkiwi 已提交
1363

Y
Yang Zhang 已提交
1364
    **clip layer**
1365

Y
Yang Zhang 已提交
1366
    This operator clip all elements in input into the range [ min, max ] and return
1367 1368 1369 1370
    a resulting tensor as the following equation:

    .. math::

1371
        Out = MIN(MAX(x, min), max)
1372 1373

    Args:
Y
Yang Zhang 已提交
1374 1375
        x (Tensor): An N-D Tensor with data type float32 or float64.
        min (float32|Tensor): The lower bound with type ``float32`` or a ``Tensor``
1376
            with shape [1] and type ``int32``, ``float32``, ``float64``.
Y
Yang Zhang 已提交
1377
        max (float32|Tensor): The upper bound with type ``float32`` or a ``Tensor``
1378 1379 1380 1381 1382 1383
            with shape [1] and type ``int32``, ``float32``, ``float64``.
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns:
Y
Yang Zhang 已提交
1384
        Tensor: A Tensor with the same data type and data shape as input.
1385 1386 1387 1388 1389 1390 1391

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

Y
Yang Zhang 已提交
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
            paddle.disable_static()
            x = np.array([[1.2,3.5], [4.5,6.4]]).astype('float32')
            x1 = paddle.to_variable(x)
            out1 = paddle.clip(x1, min=3.5, max=5.0)
            out2 = paddle.clip(x1, min=2.5)
            print(out1.numpy())
            # [[3.5, 3.5]
            # [4.5, 5.0]]
            print(out2.numpy())
            # [[2.5, 3.5]
            # [[4.5, 6.4]
1403 1404 1405 1406
    """

    assert min is not None or max is not None, "either min or max should be defined."

W
WuHaobo 已提交
1407 1408 1409
    if in_dygraph_mode():
        min = sys.float_info.min if min is None else min
        max = sys.float_info.max if max is None else max
Y
Yang Zhang 已提交
1410
        return core.ops.clip(x, "min", min, "max", max)
W
WuHaobo 已提交
1411

1412
    if min is not None:
Y
Yang Zhang 已提交
1413
        check_type(min, 'min', (float, int, Variable), 'clip')
1414 1415
        if isinstance(min, Variable):
            check_dtype(min.dtype, 'min', ['float32', 'float64', 'int32'],
Y
Yang Zhang 已提交
1416
                        'clip', '(When the type of min in clip is Variable.)')
1417
    if max is not None:
Y
Yang Zhang 已提交
1418
        check_type(max, 'max', (float, int, Variable), 'clip')
1419 1420
        if isinstance(max, Variable):
            check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'],
Y
Yang Zhang 已提交
1421
                        'clip', '(When the type of max in clip is Variable.)')
1422

Y
Yang Zhang 已提交
1423 1424 1425
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'clip')

    inputs = {'X': x}
1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
    attrs = {'min': sys.float_info.min, 'max': sys.float_info.max}

    if isinstance(min, Variable):
        min.stop_gradient = True
        inputs['Min'] = min
    elif min is not None:
        attrs['min'] = min

    if isinstance(max, Variable):
        max.stop_gradient = True
        inputs['Max'] = max
    elif max is not None:
        attrs['max'] = max

Y
Yang Zhang 已提交
1440
    helper = LayerHelper('clip', **locals())
W
WuHaobo 已提交
1441
    output = helper.create_variable_for_type_inference(
Y
Yang Zhang 已提交
1442
        dtype=helper.input_dtype())
1443 1444 1445 1446
    helper.append_op(
        type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs)

    return output
F
Feiyu Chan 已提交
1447

W
WuHaobo 已提交
1448

1449
def trace(x, offset=0, axis1=0, axis2=1, name=None):
L
Li Fuchen 已提交
1450
    """
1451 1452
	:alias_main: paddle.trace
	:alias: paddle.trace,paddle.tensor.trace,paddle.tensor.math.trace
S
swtkiwi 已提交
1453

1454
    This OP computes the sum along diagonals of the input tensor x.
1455 1456

    If ``x`` is 2D, returns the sum of diagonal.
L
Li Fuchen 已提交
1457

1458
    If ``x`` has larger dimensions, then returns an tensor of diagonals sum, diagonals be taken from
1459
    the 2D planes specified by axis1 and axis2. By default, the 2D planes formed by the first and second axes
1460
    of the input tensor x.
L
Li Fuchen 已提交
1461

1462
    The argument ``offset`` determines where diagonals are taken from input tensor x:
L
Li Fuchen 已提交
1463 1464 1465 1466

    - If offset = 0, it is the main diagonal.
    - If offset > 0, it is above the main diagonal.
    - If offset < 0, it is below the main diagonal.
1467

L
Li Fuchen 已提交
1468
    Args:
1469 1470 1471 1472
        x(Variable): The input tensor x. Must be at least 2-dimensional. The input data type should be float32, float64, int32, int64.
        offset(int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).
        axis1(int, optional): The first axis with respect to take diagonal. Default: 0.
        axis2(int, optional): The second axis with respect to take diagonal. Default: 1.
L
Li Fuchen 已提交
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
        name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.

    Returns:
        Variable: the output data type is the same as input data type.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np
1483

L
Li Fuchen 已提交
1484 1485 1486
            case1 = np.random.randn(2, 3).astype('float32')
            case2 = np.random.randn(3, 10, 10).astype('float32')
            case3 = np.random.randn(3, 10, 5, 10).astype('float32')
1487

1488
            paddle.disable_static()
1489

1490 1491 1492
            case1 = paddle.to_variable(case1)
            case2 = paddle.to_variable(case2)
            case3 = paddle.to_variable(case3)
1493 1494 1495
            data1 = paddle.trace(case1) # data1.shape = [1]
            data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]
            data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
L
Li Fuchen 已提交
1496
    """
1497 1498
    inputs = {'Input': [x]}
    attrs = {'offset': offset, 'axis1': axis1, 'axis2': axis2}
L
Li Fuchen 已提交
1499 1500

    def __check_input(input, offset, dim1, dim2):
1501
        check_dtype(x.dtype, 'Input',
L
Li Fuchen 已提交
1502 1503 1504
                    ['int32', 'int64', 'float16', 'float32', 'float64'],
                    'trace')

1505
        input_shape = list(x.shape)
L
Li Fuchen 已提交
1506
        assert len(input_shape) >= 2,                     \
1507 1508
                "The x must be at least 2-dimensional, "   \
                "But received Input x's dimensional: %s.\n" %  \
L
Li Fuchen 已提交
1509 1510
                len(input_shape)

1511 1512
        axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
        axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
L
Li Fuchen 已提交
1513

1514 1515 1516
        assert axis1_ < len(input_shape),     \
            "The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n"  \
            % (-(len(input_shape)), len(input_shape) - 1, axis1)
L
Li Fuchen 已提交
1517

1518 1519 1520
        assert axis2_ < len(input_shape),   \
            "The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n"   \
            % (-(len(input_shape)), len(input_shape) - 1, axis2)
L
Li Fuchen 已提交
1521 1522


1523 1524 1525
        assert  axis1_ != axis2_,   \
               "axis1 and axis2 cannot be the same axis." \
                "But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
L
Li Fuchen 已提交
1526 1527

    if not in_dygraph_mode():
1528
        __check_input(input, offset, axis1, axis2)
L
Li Fuchen 已提交
1529 1530
    helper = LayerHelper('trace', **locals())

1531
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
L
Li Fuchen 已提交
1532 1533 1534

    helper.append_op(
        type='trace',
1535
        inputs={'Input': [x]},
L
Li Fuchen 已提交
1536
        attrs={'offset': offset,
1537 1538
               'axis1': axis1,
               'axis2': axis2},
L
Li Fuchen 已提交
1539 1540 1541
        outputs={'Out': [out]})
    return out

F
Feiyu Chan 已提交
1542
@templatedoc(op_type="kron")
W
WuHaobo 已提交
1543
def kron(x, y, name=None):
S
swtkiwi 已提交
1544
    """
1545 1546
	:alias_main: paddle.kron
	:alias: paddle.kron,paddle.tensor.kron,paddle.tensor.math.kron
S
swtkiwi 已提交
1547 1548

${comment}
F
Feiyu Chan 已提交
1549 1550

    Args:
1551
        x (Variable): the fist operand of kron op, data type: float16, float32,
F
Feiyu Chan 已提交
1552
            float64, int32 or int64.
1553 1554
        y (Variable): the second operand of kron op, data type: float16,
            float32, float64, int32 or int64. Its data type should be the same
F
Feiyu Chan 已提交
1555
            with x.
1556 1557
        name(str, optional): The default value is None.  Normally there is no
            need for user to set this property.  For more information, please
F
Feiyu Chan 已提交
1558 1559 1560 1561 1562 1563 1564
            refer to :ref:`api_guide_Name`.

    Returns:
        Variable: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x.

    Examples:
        .. code-block:: python
1565

F
Feiyu Chan 已提交
1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
          import paddle
          from paddle import fluid
          import paddle.fluid.dygraph as dg
          import numpy as np

          a = np.arange(1, 5).reshape(2, 2).astype(np.float32)
          b = np.arange(1, 10).reshape(3, 3).astype(np.float32)

          place = fluid.CPUPlace()
          with dg.guard(place):
              a_var = dg.to_variable(a)
              b_var = dg.to_variable(b)
              c_var = paddle.kron(a_var, b_var)
              c_np = c_var.numpy()
          print(c_np)

          #[[ 1.  2.  3.  2.  4.  6.]
          # [ 4.  5.  6.  8. 10. 12.]
          # [ 7.  8.  9. 14. 16. 18.]
          # [ 3.  6.  9.  4.  8. 12.]
          # [12. 15. 18. 16. 20. 24.]
          # [21. 24. 27. 28. 32. 36.]]
    """
    if in_dygraph_mode():
        return core.ops.kron(x, y)

    helper = LayerHelper('kron', **locals())
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
    check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')

W
WuHaobo 已提交
1596
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
F
Feiyu Chan 已提交
1597 1598
    helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out})
    return out
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617


def cumsum(x, axis=None, dtype=None, name=None):
    """
    The cumulative sum of the elements along a given axis. The first element of the result is the same of the first element of the input. 

    Args:
        x (Tensor): Input of cumsum operator, the Tensor needed to be cumsumed. 
        axis (int, optional): The dimension to accumulate along. -1 means the last dimension. The default (None) is to compute the cumsum over the flattened array.
        dtype (str, optional): The data type of the output tensor, can be float32, float64, int32, int64. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None. 
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, the result of cumsum operator, output of cumsum operator. 

    Examples:
        .. code-block:: python
            
            import paddle
1618
            from paddle import to_variable
1619 1620
            import numpy as np

1621
            paddle.disable_static()
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
            data_np = np.arange(12).reshape(3, 4)
            data = to_variable(data_np)

            y = paddle.cumsum(data)
            print(y.numpy())
            # [ 0  1  3  6 10 15 21 28 36 45 55 66]

            y = paddle.cumsum(data, axis=0)
            print(y.numpy())
            # [[ 0  1  2  3]
            #  [ 4  6  8 10]
            #  [12 15 18 21]]
            
            y = paddle.cumsum(data, axis=-1)
            print(y.numpy())
            # [[ 0  1  3  6]
            #  [ 4  9 15 22]
            #  [ 8 17 27 38]]

            y = paddle.cumsum(data, dtype='float64')
            print(y.dtype)
            # VarType.FP64
    """
    if axis is None:
        flatten = True
    else:
        flatten = False
    if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
        x = layers.cast(x, dtype)

    if in_dygraph_mode():
        if axis is None:
            return core.ops.cumsum(x, 'flatten', flatten)
        else:
            return core.ops.cumsum(x, 'axis', axis, 'flatten', flatten)

    check_type(x, 'x', (Variable), 'cumsum')
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            kwargs[name] = val
    _cum_sum_ = generate_layer_fn('cumsum')
    return _cum_sum_(**kwargs)
G
guofei 已提交
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747

def prod(x, axis=None, keepdim=False, dtype=None, name=None):
    """
    Compute the product of tensor elements over the given axis.

    Args:
        x(Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64.
        axis(int|list|tuple, optional): The axis along which the product is computed. If :attr:`None`, 
            multiply all elements of `x` and return a Tensor with a single element, 
            otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`, 
            the axis to reduce is :math:`x.ndim + axis[i]`. Default is None.
        dtype(str|np.dtype, optional): The desired date type of returned tensor, can be float32, float64, 
            int32, int64. If specified, the input tensor is casted to dtype before operator performed. 
            This is very useful for avoiding data type overflows. The default value is None, the dtype 
            of output is the same as input Tensor `x`.
        keepdim(bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result 
            tensor will have one fewer dimension than the input unless keep_dim is true. Default is False.
        name(string, optional): The default value is None. Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name` .

    Returns:
        Tensor, result of product on the specified dim of input tensor.

    Raises:
        ValueError: The :attr:`dtype` must be float32, float64, int32 or int64.
        TypeError: The type of :attr:`axis` must be int, list or tuple.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

            paddle.disable_static()

            # the axis is a int element
            data_x = np.array([[0.2, 0.3, 0.5, 0.9],
                         [0.1, 0.2, 0.6, 0.7]]).astype(np.float32)
            x = paddle.to_tensor(data_x)
            out1 = paddle.prod(x)
            print(out1.numpy())
            # [0.0002268]

            out2 = paddle.prod(x, -1)
            print(out2.numpy())
            # [0.027  0.0084]

            out3 = paddle.prod(x, 0)
            print(out3.numpy())
            # [0.02 0.06 0.3  0.63]
            print(out3.numpy().dtype)
            # float32

            out4 = paddle.prod(x, 0, keepdim=True)
            print(out4.numpy())
            # [[0.02 0.06 0.3  0.63]]

            out5 = paddle.prod(x, 0, dtype='int64')
            print(out5.numpy())
            # [0 0 0 0]
            print(out5.numpy().dtype)
            # int64

            # the axis is list
            data_y = np.array([[[1.0, 2.0], [3.0, 4.0]],
                               [[5.0, 6.0], [7.0, 8.0]]])
            y = paddle.to_tensor(data_y)
            out6 = paddle.prod(y, [0, 1])
            print(out6.numpy())
            # [105. 384.]

            out7 = paddle.prod(y, (1, 2))
            print(out7.numpy())
            # [  24. 1680.]

    """
    if dtype is not None:
        check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod')
        if x.dtype != convert_np_dtype_to_dtype_(dtype):
            x = layers.cast(x, dtype)

    return layers.reduce_prod(input=x, dim=axis, keep_dim=keepdim, name=name)
W
WangXi 已提交
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822


def sign(x, name=None):
    """
    This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.

    Args:
        x(Tensor): The input tensor. The data type can be float16, float32 or float64.
        name (str, optional): The default value is None. Normally there is no need for user to
            set this property. For more information, please refer to :ref:`api_guide_Name`

    Returns:
        Tensor: The output sign tensor with identical shape and data type to the input :attr:`x`.

    Examples:
        .. code-block:: python

          import numpy as np
          import paddle

          data = np.array([3.0, 0.0, -2.0, 1.7], dtype='float32')
          paddle.disable_static()
          x = paddle.to_tensor(data)
          out = paddle.sign(x=x)
          print(out)  # [1.0, 0.0, -1.0, 1.0]
    """
    if in_dygraph_mode():
        return core.ops.sign(x)

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sign')
    helper = LayerHelper("sign", **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})

    return out


def tanh(x, name=None):
    """
    Tanh Activation Operator.

    .. math::
        out = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}

    Args:
        x (Tensor): Input of Tanh operator, an N-D Tensor, with data type float32, float64 or float16.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Output of Tanh operator, a Tensor with same data type and shape as input.

    Examples:

        .. code-block:: python

            import paddle
            import numpy as np

            paddle.disable_static()

            x_data = np.array([-0.4, -0.2, 0.1, 0.3])
            x = paddle.to_tensor(x_data)
            out = paddle.tanh(x)
            print(out.numpy())
            # [-0.37994896 -0.19737532  0.09966799  0.29131261]
    """
    if in_dygraph_mode():
        return core.ops.tanh(x)

    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'tanh')
    helper = LayerHelper('tanh', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out})
    return out