linalg.py 30.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
from paddle.common_ops_import import *
Z
Zhang Ting 已提交
16 17
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type
18
from ..fluid.framework import in_dygraph_mode, _varbase_creator
19

20 21
from ..fluid.layers import transpose  #DEFINE_ALIAS

22 23
__all__ = [
    'matmul',
L
liuwei1031 已提交
24
    'dot',
25
    #       'einsum',
26
    'norm',
27
    'transpose',
Z
Zhang Ting 已提交
28
    'dist',
29
    't',
30
    'cross',
G
Guo Sheng 已提交
31
    'cholesky',
32
    #       'tensordot',
Q
Qi Li 已提交
33 34
    'bmm',
    'histogram'
35 36 37
]


S
ShenLiang 已提交
38
def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
39
    """
S
ShenLiang 已提交
40 41 42
    Applies matrix multiplication to two tensors. `matmul` follows 
    the complete broadcast rules, 
    and its behavior is consistent with `np.matmul`.
S
swtkiwi 已提交
43

S
ShenLiang 已提交
44 45
    Currently, the input tensors' number of dimensions can be any, `matmul` can be used to
    achieve the `dot`, `matmul` and `batchmatmul`.
46 47 48 49 50

    The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
    flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:

    - If a transpose flag is specified, the last two dimensions of the tensor
S
ShenLiang 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
      are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor 
      is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas 
      for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`.

    The multiplication behavior depends on the dimensions of `x` and `y`. Specifically:

    - If both tensors are 1-dimensional, the dot product result is obtained.

    - If both tensors are 2-dimensional, the matrix-matrix product is obtained.

    - If the `x` is 1-dimensional and the `y` is 2-dimensional, 
      a `1` is prepended to its dimension in order to conduct the matrix multiply. 
      After the matrix multiply, the prepended dimension is removed.
      
    - If the `x` is 2-dimensional and `y` is 1-dimensional, 
      the matrix-vector product is obtained.

    - If both arguments are at least 1-dimensional and at least one argument 
      is N-dimensional (where N > 2), then a batched matrix multiply is obtained. 
      If the first argument is 1-dimensional, a 1 is prepended to its dimension 
      in order to conduct the batched matrix multiply and removed after. 
      If the second argument is 1-dimensional, a 1 is appended to its 
      dimension for the purpose of the batched matrix multiple and removed after. 
      The non-matrix (exclude the last two dimensions) dimensions are 
      broadcasted according the broadcast rule. 
      For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, 
      out will be a (j, k, n, p) tensor.
78 79

    Args:
S
ShenLiang 已提交
80 81
        x (Tensor): The input tensor which is a Tensor.
        y (Tensor): The input tensor which is a Tensor.
82 83 84 85 86 87
        transpose_x (bool): Whether to transpose :math:`x` before multiplication.
        transpose_y (bool): Whether to transpose :math:`y` before multiplication.
        name(str|None): A name for this layer(optional). If set None, the layer
            will be named automatically.

    Returns:
S
ShenLiang 已提交
88
        Tensor: The output Tensor.
89 90 91

    Examples:

S
ShenLiang 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
    .. code-block:: python

        import paddle
        import numpy as np

        paddle.disable_static()
        # vector * vector
        x_data = np.random.random([10]).astype(np.float32)
        y_data = np.random.random([10]).astype(np.float32)
        x = paddle.to_tensor(x_data)
        y = paddle.to_tensor(y_data)
        z = paddle.matmul(x, y)
        print(z.numpy().shape)
        # [1]

        # matrix * vector
        x_data = np.random.random([10, 5]).astype(np.float32)
        y_data = np.random.random([5]).astype(np.float32)
        x = paddle.to_tensor(x_data)
        y = paddle.to_tensor(y_data)
        z = paddle.matmul(x, y)
        print(z.numpy().shape)
        # [10]

        # batched matrix * broadcasted vector
        x_data = np.random.random([10, 5, 2]).astype(np.float32)
        y_data = np.random.random([2]).astype(np.float32)
        x = paddle.to_tensor(x_data)
        y = paddle.to_tensor(y_data)
        z = paddle.matmul(x, y)
        print(z.numpy().shape)
        # [10, 5]

        # batched matrix * batched matrix
        x_data = np.random.random([10, 5, 2]).astype(np.float32)
        y_data = np.random.random([10, 2, 5]).astype(np.float32)
        x = paddle.to_tensor(x_data)
        y = paddle.to_tensor(y_data)
        z = paddle.matmul(x, y)
        print(z.numpy().shape)
        # [10, 5, 5]

        # batched matrix * broadcasted matrix
        x_data = np.random.random([10, 1, 5, 2]).astype(np.float32)
        y_data = np.random.random([1, 3, 2, 5]).astype(np.float32)
        x = paddle.to_tensor(x_data)
        y = paddle.to_tensor(y_data)
        z = paddle.matmul(x, y)
        print(z.numpy().shape)
        # [10, 3, 5, 5]
142 143

    """
S
ShenLiang 已提交
144 145 146 147 148
    op_type = 'matmul_v2'
    if in_dygraph_mode():
        op = getattr(core.ops, op_type)
        return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y)

149
    attrs = {
S
ShenLiang 已提交
150 151
        'trans_x': transpose_x,
        'trans_y': transpose_y,
152 153 154 155 156
    }

    def __check_input(x, y):
        var_names = {'x': x, 'y': y}
        for name, val in var_names.items():
S
ShenLiang 已提交
157 158
            check_variable_and_dtype(val, name, ['float32', 'float64'],
                                     'matmul')
159 160 161

    __check_input(x, y)

S
ShenLiang 已提交
162
    helper = LayerHelper('matmul_v2', **locals())
163 164
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
S
ShenLiang 已提交
165
        type='matmul_v2',
166 167 168 169 170
        inputs={'X': x,
                'Y': y},
        outputs={'Out': out},
        attrs=attrs)
    return out
Z
Zhang Ting 已提交
171 172


173 174
def norm(input, p='fro', axis=None, keepdim=False, out=None, name=None):
    """
175 176
	:alias_main: paddle.norm
	:alias: paddle.norm,paddle.tensor.norm,paddle.tensor.linalg.norm
S
swtkiwi 已提交
177

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
    Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean
    or 2-norm, and in general the p-norm for p > 0) of a given tensor.

    Args:
        input (Variable): The input tensor could be N-D tensor, and the input data
            type could be float32 or float64.
        p (float|string, optional): Order of the norm. Supported values are `fro`, `1`, `2`,
            and any positive real number yielding the corresponding p-norm.
        axis (int|list, optional): The axis on which to apply norm operation. If axis is int
            or list with only one element, the vector norm is computed over the axis.
            If axis is a list with two elements, the matrix norm is computed over the axis.
            If `axis < 0`, the dimension to norm operation is rank(input) + axis.
        keepdim (bool, optional): Whether to reserve the reduced dimension in the
            output Tensor. The result tensor will have fewer dimension
            than the :attr:`input` unless :attr:`keepdim` is true, default
            value is False.
        out (Variable, optional): The output tensor, default value is None. It's data type
            must be the same as the input Tensor.
        name (str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Variable: Tensor, results of norm operation on the specified axis of input tensor,
        it's data type is the same as input's Tensor.
 
    Raises:
        TypeError, if out data type is different with the input data type.
        ValueError, If `p` or `axis` is invalid.
    
    Examples:
        .. code-block:: python
            
            import paddle
            import paddle.fluid as fluid
            x = fluid.data(name='x', shape=[2, 3, 5], dtype='float64')
            
            # compute frobenius norm along last two dimensions.
            out_fro = paddle.norm(x, p='fro', axis=[1,2])
            
            # compute 2-order vector norm along last dimension.
            out_pnorm = paddle.norm(x, p=2, axis=-1)
    """

    def frobenius_norm(input, dim=None, keepdim=False, out=None, name=None):
        """
        The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.
        Args:
          input (Variable): Tensor, data type float32, float64.
          dim (list, optional): None for last two dimensions.
          keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
          out (Variable, optional): The tensor variable storing the output.
        """
        if dim is not None and not (isinstance(dim, list) and len(dim) == 2):
            raise ValueError(
                "The dim of frobenius norm op should be None or two elements list!"
            )
        attrs = {
            'dim': dim if dim != None else [-2, -1],
            'keep_dim': keepdim,
            'reduce_all': False
        }
        if len(attrs['dim']) == len(input.shape):
            attrs['reduce_all'] = True
        check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                                 'frobenius_norm')

        helper = LayerHelper('frobenius_norm', **locals())
        if out is None:
            out = helper.create_variable_for_type_inference(
                dtype=helper.input_dtype())
        else:
            check_type(out, 'out', (Variable), 'frobenius_norm')
            check_dtype(
                out.dtype, out.name,
                convert_dtype(input.dtype), 'frobenius_norm',
                '(The out data type in frobenius_norm must be the same with input data type.)'
            )

        helper.append_op(
            type='frobenius_norm',
            inputs={'X': input},
            outputs={'Out': out},
            attrs=attrs)
        return out

    def vector_norm(input,
                    porder=None,
                    axis=None,
                    keepdim=False,
                    out=None,
                    name=None):
        """
        Calculate the p-order vector norm for certain  dimension of Tensor `input`.
        Args:
          input (Variable): Tensor, data type float32, float64.
          porder (float, optional): None for porder=2.0.
          axis (int, optional): None for last dimension.
          keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
          out (Variable, optional): The tensor variable storing the output.
        """
        if porder is not None:
            check_type(porder, 'porder', (float, int), 'p_norm')
        if axis is not None:
            check_type(axis, 'axis', (int), 'p_norm')
        attrs = {
            'axis': axis if axis is not None else -1,
            'porder': float(porder) if porder is not None else 2.0,
            'keepdim': keepdim,
            'epsilon': 1e-12,
        }
        check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                                 'p_norm')

        helper = LayerHelper('p_norm', **locals())
        if out is None:
            out = helper.create_variable_for_type_inference(
                dtype=helper.input_dtype())
        else:
            check_type(out, 'out', (Variable), 'p_norm')
            check_dtype(
                out.dtype, out.name,
                convert_dtype(input.dtype), 'p_norm',
                '(The out data type in p_norm must be the same with input data type.)'
            )

        helper.append_op(
            type='p_norm',
            inputs={'X': input},
            outputs={'Out': out},
            attrs=attrs)
        return out

    if axis is None and p is not None:
        if isinstance(p, str):
            if p == "fro":
                return frobenius_norm(
                    input, dim=axis, keepdim=keepdim, out=out, name=name)
            else:
                raise ValueError(
                    "only valid string values are 'fro', found {}".format(p))
        elif isinstance(p, (int, float)):
            return vector_norm(
                input, porder=p, axis=axis, keepdim=keepdim, out=out, name=name)
        else:
            raise ValueError("only valid p type is string or float, found {}".
                             format(type(p)))

    if isinstance(axis, list) and len(axis) == 1:
        axis = axis[0]

    #calculate vector norm, where axis is int or list with only one integer
    if isinstance(axis, int):
        if isinstance(p, (int, float)):
            return vector_norm(
                input, axis=axis, porder=p, keepdim=keepdim, out=out, name=name)
        else:
            raise ValueError(
                "unspport p for p-order vector norm. except float, found {}".
                format(p))
    #calculate matrix norm, where axis is list with two integers
    elif isinstance(axis, list) and len(axis) == 2:
        if p == "fro":
            return frobenius_norm(
                input, dim=axis, keepdim=keepdim, out=out, name=name)
        else:
            raise ValueError(
                "unspport p for matrix norm, expcept 'fro', found {}".format(p))
    else:
        raise ValueError(
            "except axis type int or list (length of list <=2), found {}".
            format(axis))


Z
Zhang Ting 已提交
351 352
def dist(x, y, p=2):
    """
353 354
	:alias_main: paddle.dist
	:alias: paddle.dist,paddle.tensor.dist,paddle.tensor.linalg.dist
S
swtkiwi 已提交
355

Z
Zhang Ting 已提交
356
    This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
357 358
    of distance. The shapes of x and y must be broadcastable. The definition is as follows, for
    details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_:
Z
Zhang Ting 已提交
359

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
    - Each input has at least one dimension.
    - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist.

    Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be
    obtained as follows:

    1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the
    tensor with fewer dimensions.

    For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the
    dimension of y.

    x (4-D Tensor):  8 x 1 x 6 x 1

    y (4-D Tensor):  1 x 7 x 1 x 5

    2. Determine the size of each dimension of the output z: choose the maximum value from the
    two input dimensions.

    z (4-D Tensor):  8 x 7 x 6 x 5

    If the number of dimensions of the two inputs are the same, the size of the output can be
    directly determined in step 2. When p takes different values, the norm formula is as follows:
Z
Zhang Ting 已提交
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449

    When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z.

    .. math::

        ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p}

    When p = inf, the inf-norm of z is the maximum element of z.

    .. math::

        ||z||_\infty=\max_i |z_i|

    When p = -inf, the negative-inf-norm of z is the minimum element of z.

    .. math::

        ||z||_{-\infty}=\min_i |z_i|

    Otherwise, the p-norm of z follows the formula,

    .. math::

        ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}}

    Args:
        x (Variable): 1-D to 6-D Tensor, its data type is float32 or float64.
        y (Variable): 1-D to 6-D Tensor, its data type is float32 or float64.
        p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2.

    Returns:
        Variable: Tensor that is the p-norm of (x - y).

    Examples:
        .. code-block:: python

            import paddle
            import paddle.fluid as fluid
            import numpy as np

            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(np.array([[3, 3],[3, 3]]).astype(np.float32))
                y = fluid.dygraph.to_variable(np.array([[3, 3],[3, 1]]).astype(np.float32))
                out = paddle.dist(x, y, 0)
                print(out.numpy()) # out = [1.]

                out = paddle.dist(x, y, 2)
                print(out.numpy()) # out = [2.]

                out = paddle.dist(x, y, float("inf"))
                print(out.numpy()) # out = [2.]

                out = paddle.dist(x, y, float("-inf"))
                print(out.numpy()) # out = [0.]
    """
    check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')
    check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')
    check_type(p, 'p', (float, int), 'dist')
    helper = LayerHelper("dist", **locals())
    out = helper.create_variable_for_type_inference(x.dtype)

    inputs = {"X": [x], "Y": [y]}
    outputs = {'Out': [out]}
    attrs = {"p": float(p)}
    helper.append_op(
        type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs)
    return out
L
liuwei1031 已提交
450 451 452 453 454 455 456


def dot(x, y, name=None):
    """
    This operator calculates inner product for vectors.
   
    .. note::
S
ShenLiang 已提交
457 458
       Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix 
       is the batch dimension, which means that the vectors of multiple batches are dotted. 
L
liuwei1031 已提交
459 460

    Parameters:
S
ShenLiang 已提交
461 462
        x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64``
        y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64``
L
liuwei1031 已提交
463 464
        name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`

465
    Returns:
466
        Variable: the calculated result Tensor.
467

L
liuwei1031 已提交
468 469 470 471 472 473
    Examples:

    .. code-block:: python

        import paddle
        import numpy as np
474 475 476 477

        paddle.disable_static()
        x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
        y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
S
ShenLiang 已提交
478 479
        x = paddle.to_tensor(x_data)
        y = paddle.to_tensor(y_data)
480 481
        z = paddle.dot(x, y)
        print(z.numpy())
L
liuwei1031 已提交
482 483 484

    """
    op_type = 'dot'
485 486 487 488 489
    # skip var type check in dygraph mode to improve efficiency
    if in_dygraph_mode():
        op = getattr(core.ops, op_type)
        return op(x, y)

L
liuwei1031 已提交
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
    assert x is not None, 'x cannot be None in {}'.format(op_type)
    assert y is not None, 'y cannot be None in {}'.format(op_type)

    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             op_type)
    check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
                             op_type)

    helper = LayerHelper(op_type, **locals())
    if name is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
    else:
        out = helper.create_variable(
            name=name, dtype=x.dtype, persistable=False)
    helper.append_op(
        type="dot", inputs={'X': x,
                            'Y': y}, attrs={}, outputs={"Out": out})
    return out
508 509 510 511


def t(input, name=None):
    """
512 513
	:alias_main: paddle.t
	:alias: paddle.t,paddle.tensor.t,paddle.tensor.linalg.t
S
swtkiwi 已提交
514

515 516 517 518 519
    Transpose <=2-D tensor. 
    0-D and 1-D tensors are returned as it is and 2-D tensor is equal to 
    the fluid.layers.transpose function which perm dimensions set 0 and 1.
    
    Args:
520
        input (Variable): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32.
521 522 523
        name(str, optional): The default value is None.  Normally there is no need for 
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
    Returns:
524
        Variable: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64.
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
    
    For Example:
        .. code-block:: text
        # Example 1 (0-D tensor)
         x = tensor([0.79])
         paddle.t(x) = tensor([0.79])
         # Example 2 (1-D tensor)
         x = tensor([0.79, 0.84, 0.32])
         paddle.t(x) = tensor([0.79, 0.84, 0.32])
        
         # Example 3 (2-D tensor)
         x = tensor([0.79, 0.84, 0.32],
                    [0.64, 0.14, 0.57])
         paddle.t(x) = tensor([0.79, 0.64],
                              [0.84, 0.14],
                              [0.32, 0.57])
    
     Examples:
        .. code-block:: python
            import paddle
            import paddle.fluid as fluid
            x = fluid.data(name='x', shape=[2, 3],
                            dtype='float32')
            x_transposed = paddle.t(x)
            print x_transposed.shape
            #(3L, 2L)
    """
    if len(input.shape) > 2:
        raise ValueError(
            "Input(input) only support N-D (N<=2) tensor, but received "
            "length of Input(input) is %s. Perhaps you can use paddle."
            "tensor.transpose() instead." % len(input.shape))
    if in_dygraph_mode():
        if len(input.shape) == 1:
            return input
        # 2-D tensor
        perm = [1, 0]
        out, _ = core.ops.transpose2(input, 'axis', perm)
        return out

    check_variable_and_dtype(
        input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
        'transpose')

    helper = LayerHelper('t', **locals())
    out = helper.create_variable_for_type_inference(input.dtype)
    input_shape = helper.create_variable_for_type_inference(input.dtype)
    if len(input.shape) == 1:
        out = input
    else:
        helper.append_op(
            type='transpose2',
            inputs={'X': [input]},
            outputs={'Out': [out],
                     'XShape': [input_shape]},
            attrs={'axis': [1, 0]})
    return out
582 583


584
def cross(x, y, axis=None, name=None):
585
    """
586 587
	:alias_main: paddle.cross
	:alias: paddle.cross,paddle.tensor.cross,paddle.tensor.linalg.cross
S
swtkiwi 已提交
588

589 590 591
    Computes the cross product between two tensors along an axis.
    Inputs must have the same shape, and the length of their axes should be equal to 3.
    If `axis` is not given, it defaults to the first axis found with the length 3.
592 593
    
    Args:
594 595 596 597 598
        x (Variable): The first input tensor variable.
        y (Variable): The second input tensor variable.
        axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3.
        name (str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
599 600

    Returns:
601
        Variable: A Tensor with same data type as `x`.
602 603 604 605
        
    Examples:
        .. code-block:: python
            import paddle
606
            from paddle import to_variable
607 608
            import numpy as np

609
            paddle.disable_static()
610

611 612 613 614 615 616
            data_x = np.array([[1.0, 1.0, 1.0],
                               [2.0, 2.0, 2.0],
                               [3.0, 3.0, 3.0]])
            data_y = np.array([[1.0, 1.0, 1.0],
                               [1.0, 1.0, 1.0],
                               [1.0, 1.0, 1.0]])
617 618 619 620 621 622 623 624 625 626 627 628 629 630
            x = to_variable(data_x)
            y = to_variable(data_y)

            z1 = paddle.cross(x, y)
            print(z1.numpy())
            # [[-1. -1. -1.]
            #  [ 2.  2.  2.]
            #  [-1. -1. -1.]]

            z2 = paddle.cross(x, y, axis=1)
            print(z2.numpy())
            # [[0. 0. 0.]
            #  [0. 0. 0.]
            #  [0. 0. 0.]]
631 632
    """
    if in_dygraph_mode():
633
        if axis is not None:
634
            return core.ops.cross(x, y, 'dim', axis)
635
        else:
636
            return core.ops.cross(x, y)
637

638 639
    helper = LayerHelper("cross", **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
640
    attrs = dict()
641
    attrs['dim'] = axis
642 643 644

    helper.append_op(
        type='cross',
645 646
        inputs={'X': x,
                'Y': y},
647 648 649
        outputs={'Out': out},
        attrs=attrs)
    return out
650 651


652
def cholesky(x, upper=False, name=None):
G
Guo Sheng 已提交
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
    """
    Computes the Cholesky decomposition of one symmetric positive-definite
    matrix or batches of symmetric positive-definite matrice. 
    
    If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` ,
    and the returned matrix :math:`U` is upper-triangular. Otherwise, the
    decomposition has the form  :math:`A = LL^{T}` , and the returned matrix
    :math:`L` is lower-triangular.

    Args:
        x (Variable): The input tensor. Its shape should be `[*, M, M]`,
            where * is zero or more batch dimensions, and matrices on the
            inner-most 2 dimensions all should be symmetric positive-definite.
            Its data type should be float32 or float64.
        upper (bool): The flag indicating whether to return upper or lower
            triangular matrices. Default: False.

    Returns:
        Variable: A Tensor with same shape and data type as `x`. It represents \
            triangular matrices generated by Cholesky decomposition.
        
    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

680 681 682 683 684 685 686 687 688 689
            paddle.disable_static()
            a = np.random.rand(3, 3)
            a_t = np.transpose(a, [1, 0])
            x_data = np.matmul(a, a_t) + 1e-03
            x = paddle.to_variable(x_data)
            out = paddle.cholesky(x, upper=False)
            print(out.numpy())
            # [[1.190523   0.         0.        ]
            #  [0.9906703  0.27676893 0.        ]
            #  [1.25450498 0.05600871 0.06400121]]
G
Guo Sheng 已提交
690 691

    """
692 693
    if in_dygraph_mode():
        return core.ops.cholesky(x, "upper", upper)
G
Guo Sheng 已提交
694 695 696 697 698 699 700 701 702 703 704 705
    check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky')
    check_type(upper, 'upper', bool, 'cholesky')
    helper = LayerHelper('cholesky', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type='cholesky',
        inputs={'X': [x]},
        outputs={'Out': out},
        attrs={'upper': upper})
    return out


706 707
def bmm(x, y, name=None):
    """
708 709
	:alias_main: paddle.bmm
	:alias: paddle.bmm,paddle.tensor.bmm,paddle.tensor.linalg.bmm
S
swtkiwi 已提交
710

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
    Applies batched matrix multiplication to two tensors.

    Both of the two input tensors must be three-dementional and share the same batch size.

    if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.

    Args:
        x (Variable): The input variable which is a Tensor or LoDTensor.
        y (Variable): The input variable which is a Tensor or LoDTensor.
        name(str|None): A name for this layer(optional). If set None, the layer
            will be named automatically.

    Returns:
        Variable: The product Tensor (or LoDTensor) variable.

    Examples:
        import paddle
Y
yaoxuefeng 已提交
728 729

        # In imperative mode:
730 731 732 733
        # size input1: (2, 2, 3) and input2: (2, 3, 2)
        input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]])
        input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])

734
        paddle.disable_static()
Y
yaoxuefeng 已提交
735
        
736 737
        x = paddle.to_variable(input1)
        y = paddle.to_variable(input2)
Y
yaoxuefeng 已提交
738 739 740 741 742
        out = paddle.bmm(x, y)
        #output size: (2, 2, 2)
        #output value:
        #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]
        out_np = out.numpy()
743
    """
Y
yaoxuefeng 已提交
744 745 746 747 748 749 750 751 752 753
    x_shape = x.shape
    y_shape = y.shape
    if not len(x_shape) == len(y_shape) == 3:
        raise ValueError(
            "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}".
            format(x_shape, y_shape))
    if x_shape[2] != y_shape[1]:
        raise ValueError(
            "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}".
            format(x_shape, y_shape))
754 755 756 757 758 759
    helper = LayerHelper('bmm', **locals())
    if in_dygraph_mode():
        return core.ops.bmm(x, y)
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
    return out
Q
Qi Li 已提交
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780


def histogram(input, bins=100, min=0, max=0):
    """
    Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. 
    If min and max are both zero, the minimum and maximum values of the data are used.

    Args:
        input (Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor
            should be float32, float64, int32, int64.
        bins (int): number of histogram bins
        min (int): lower end of the range (inclusive)
        max (int): upper end of the range (inclusive)

    Returns:
        Variable: Tensor or LoDTensor calculated by histogram layer. The data type is int64.

    Code Example 1:
        .. code-block:: python
            import paddle
            import numpy as np
781 782 783
            startup_program = paddle.static.Program()
            train_program = paddle.static.Program()
            with paddle.static.program_guard(train_program, startup_program):
Q
Qi Li 已提交
784 785 786
                inputs = paddle.data(name='input', dtype='int32', shape=[2,3])
                output = paddle.histogram(inputs, bins=5, min=1, max=5)
                place = paddle.CPUPlace()
787
                exe = paddle.static.Executor(place)
Q
Qi Li 已提交
788 789 790 791 792 793 794 795 796 797 798
                exe.run(startup_program)
                img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int32)
                res = exe.run(train_program,
                              feed={'input': img},
                              fetch_list=[output])
                print(np.array(res[0])) # [0,3,0,2,1]

    Code Example 2:
        .. code-block:: python
            import paddle
            import numpy as np
799 800 801 802 803 804
            paddle.disable_static(paddle.CPUPlace())
            inputs_np = np.array([1, 2, 1]).astype(np.float)
            inputs = paddle.to_variable(inputs_np)
            result = paddle.histogram(inputs, bins=4, min=0, max=3)
            print(result) # [0, 2, 1, 0]
            paddle.enable_static()
Q
Qi Li 已提交
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
    """
    if in_dygraph_mode():
        return core.ops.histogram(input, "bins", bins, "min", min, "max", max)

    helper = LayerHelper('histogram', **locals())
    check_variable_and_dtype(
        input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram')
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
    helper.append_op(
        type='histogram',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'bins': bins,
               'min': min,
               'max': max})
    return out