tensor.py 65.4 KB
Newer Older
1
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import math
16 17 18
import numpy
import warnings

Y
Yu Yang 已提交
19
from ..layer_helper import LayerHelper
20
from ..param_attr import ParamAttr
21
from ..initializer import Initializer
22 23 24 25 26 27 28 29 30 31
from ..framework import (
    _current_expected_place,
    convert_np_dtype_to_dtype_,
    _non_static_mode,
    _varbase_creator,
    device_guard,
    _in_legacy_dygraph,
    in_dygraph_mode,
    _get_paddle_place,
)
X
xuwei06 已提交
32
from ..framework import Variable
33
from ..initializer import Constant
34
from ..core import VarDesc
35
from .. import core
36
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
37
from . import utils
38 39 40 41 42 43
from ..data_feeder import (
    check_variable_and_dtype,
    check_type,
    check_dtype,
    convert_dtype,
)
44
from paddle.utils import deprecated
45

46
from .utils import check_shape
47
from paddle import _C_ops, _legacy_C_ops
Y
Yu Yang 已提交
48 49

__all__ = [
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
    'create_tensor',
    'create_parameter',
    'create_global_var',
    'cast',
    'tensor_array_to_tensor',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
    'argmin',
    'argmax',
    'argsort',
    'ones',
    'zeros',
    'reverse',
    'has_inf',
    'linspace',
    'zeros_like',
    'ones_like',
    'diag',
    'eye',
    'triu',
Y
Yu Yang 已提交
73 74 75
]


X
xuwei06 已提交
76
def create_tensor(dtype, name=None, persistable=False):
77
    """
W
wangchaochaohu 已提交
78
    Create a variable, which will hold a Tensor with data type dtype.
79 80

    Args:
W
wangchaochaohu 已提交
81 82
        dtype(string|numpy.dtype): the data type of Tensor to be created, the
            data type is bool, float16, float32, float64, int8, int16, int32 and int64.
83
        name(string, optional): The default value is None.  Normally there is no need for
W
wangchaochaohu 已提交
84
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
Q
update  
qiaolongfei 已提交
85
        persistable(bool): Set the persistable flag of the create tensor.
W
wangchaochaohu 已提交
86
            default value is False.
87 88

    Returns:
W
wangchaochaohu 已提交
89
        Variable: The tensor to be created according to dtype.
90 91 92 93

    Examples:
        .. code-block:: python

94
          import paddle.fluid as fluid
95 96
          tensor = fluid.layers.create_tensor(dtype='float32')
    """
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int8',
            'int32',
            'int32',
            'int64',
        ],
        'create_tensor',
    )
Y
Yu Yang 已提交
112
    helper = LayerHelper("create_tensor", **locals())
113 114 115
    return helper.create_variable(
        name=helper.name, dtype=dtype, persistable=persistable
    )
Y
Yu Yang 已提交
116 117


118 119 120
def create_parameter(
    shape, dtype, name=None, attr=None, is_bias=False, default_initializer=None
):
121
    """
122
        :api_attr: Static Graph
S
swtkiwi 已提交
123

124
    This function creates a parameter. The parameter is a learnable variable, which can have
Y
yuyang18 已提交
125 126 127 128 129
    gradient, and can be optimized.

    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.

130 131 132 133 134 135 136
    Parameters:
        shape (list of int): Shape of the parameter
        dtype (str): Data type of the parameter
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
        attr (ParamAttr, optional): Attributes of the parameter
        is_bias (bool, optional): This can affect which default initializer is chosen
137 138 139
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
140
        default_initializer (Initializer, optional): Initializer for the parameter
141 142

    Returns:
143
        The created parameter.
Y
yuyang18 已提交
144 145

    Examples:
146 147
        .. code-block:: python

148 149 150
            import paddle
            paddle.enable_static()
            W = paddle.static.create_parameter(shape=[784, 200], dtype='float32')
151
    """
152 153
    check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter')
    for item in shape:
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
        check_type(
            item,
            'item of shape',
            (
                int,
                numpy.uint8,
                numpy.int8,
                numpy.int16,
                numpy.int32,
                numpy.int64,
            ),
            'create_parameter',
        )

    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int8',
            'int16',
            'int32',
            'int64',
            'uint8',
        ],
        'create_parameter',
    )
184
    check_type(attr, 'attr', (type(None), ParamAttr), 'create_parameter')
185 186 187 188 189 190
    check_type(
        default_initializer,
        'default_initializer',
        (type(None), Initializer),
        'create_parameter',
    )
191

Q
Qiao Longfei 已提交
192
    helper = LayerHelper("create_parameter", **locals())
193
    if attr is None:
X
xuwei06 已提交
194
        attr = ParamAttr(name=name)
195 196 197
    return helper.create_parameter(
        attr, shape, convert_dtype(dtype), is_bias, default_initializer
    )
198 199


200 201 202
def create_global_var(
    shape, value, dtype, persistable=False, force_cpu=False, name=None
):
203
    """
204
    This function creates a new tensor variable with value in the global block(block 0).
F
fengjiayi 已提交
205

206
    Parameters:
207
        shape (list[int]|tuple[int]): Shape of the variable
208
        value (float): The value of the variable. The new created
F
fengjiayi 已提交
209
                      variable will be filled with it.
210 211
        dtype (str): Data type of the variable
        persistable (bool, optional): If this variable is persistable.
F
fengjiayi 已提交
212
                           Default: False
213
        force_cpu (bool, optional): Force this variable to be on CPU.
F
fengjiayi 已提交
214
                         Default: False
215 216
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
217 218

    Returns:
219
        Variable: The created Variable
F
fengjiayi 已提交
220 221 222 223

    Examples:
        .. code-block:: python

224 225 226
            import paddle
            paddle.enable_static()
            var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
227
                                           persistable=True, force_cpu=True, name='new_var')
228
    """
229 230 231
    check_type(
        shape, 'shape', (list, tuple, numpy.ndarray), 'create_global_var'
    )
232
    for item in shape:
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
        check_type(
            item,
            'item of shape',
            (
                int,
                numpy.uint8,
                numpy.int8,
                numpy.int16,
                numpy.int32,
                numpy.int64,
            ),
            'create_global_var',
        )

    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int8',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'create_global_var',
    )
264

Q
Qiao Longfei 已提交
265
    helper = LayerHelper("global_var", **locals())
266 267 268 269 270 271 272 273 274 275
    var = helper.create_global_variable(
        dtype=dtype,
        shape=shape,
        persistable=persistable,
        name=name,
        stop_gradient=True,
    )
    helper.set_variable_initializer(
        var, initializer=Constant(value=float(value), force_cpu=force_cpu)
    )
M
minqiyang 已提交
276

Q
Qiao Longfei 已提交
277 278 279
    return var


280
def cast(x, dtype):
Y
Yu Yang 已提交
281
    """
S
swtkiwi 已提交
282

283
    This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
284 285
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
286 287

    Args:
288
        x(Tensor): An input N-D Tensor with data type bool, float16,
289
            float32, float64, int32, int64, uint8.
290
        dtype(np.dtype|str): Data type of the output:
291
            bool, float16, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
292 293

    Returns:
294
        Tensor: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
295 296 297

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
298

299
            import paddle
300

301 302
            x = paddle.to_tensor([2, 3, 4], 'float64')
            y = paddle.cast(x, 'uint8')
Y
Yu Yang 已提交
303
    """
H
hong 已提交
304 305 306
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
307
        return _C_ops.cast(x, dtype)
H
hong 已提交
308

J
Jiabin Yang 已提交
309
    if _non_static_mode():
310 311
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
312
        out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
Z
Zhang Ting 已提交
313
        return out
314

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
    check_variable_and_dtype(
        x,
        'x',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'cast',
    )
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int8',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'cast',
    )
348 349

    helper = LayerHelper('cast', **locals())
350
    out = helper.create_variable_for_type_inference(
351 352 353 354 355 356 357 358
        dtype=dtype, stop_gradient=x.stop_gradient
    )
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype},
    )
Y
Yu Yang 已提交
359 360 361
    return out


362
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
363
    """
364
    This OP concatenates the input along the axis.
365 366

    Args:
367
        input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type
368
            bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type.
369 370
        axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
            It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64.
371
            The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way
372
            as ``axis+R``. Default is 0.
373 374 375
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
376 377

    Returns:
378
        Tensor: A Tensor with the same data type as ``input``.
379 380 381

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
382

383
            import paddle.fluid as fluid
384 385
            import numpy as np

386 387 388 389 390 391
            in1 = np.array([[1, 2, 3],
                            [4, 5, 6]])
            in2 = np.array([[11, 12, 13],
                            [14, 15, 16]])
            in3 = np.array([[21, 22],
                            [23, 24]])
392 393 394 395
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
396 397
                # When the axis is negative, the real axis is (axis + Rank(x)).
                # As follows, axis is -1, Rank(x) is 2, the real axis is 1
398 399
                out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1, x2], axis=0)
400 401 402 403 404 405 406 407
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
408
    """
409

410 411 412 413 414 415
    if in_dygraph_mode():
        if isinstance(axis, Variable):
            axis = axis.numpy()
            axis = axis.item(0)
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
416
        out = _C_ops.concat(input, axis)
417
        return out
418 419

    if _in_legacy_dygraph():
S
songyouwei 已提交
420 421
        if isinstance(axis, Variable):
            axis = axis.numpy()
422
            axis = axis.item(0)
423 424
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
425
        out = _varbase_creator()
426
        _legacy_C_ops.concat(input, out, 'axis', axis)
427
        return out
428

429 430 431 432
    check_type(input, 'input', (list, tuple, Variable), 'concat')
    if not isinstance(input, Variable):
        for id, x in enumerate(input):
            check_variable_and_dtype(
433 434
                x,
                'input[' + str(id) + ']',
435
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
436 437
                'concat',
            )
438 439
            if x.dtype != input[0].dtype:
                raise TypeError(
440 441
                    "All the Tensors in the input must have the same data type."
                )
442
    else:
443
        input = [input]
444
    check_type(axis, 'axis', (int, Variable), 'concat')
445

446 447
    if isinstance(axis, Variable):
        check_dtype(
448 449 450 451 452
            axis.dtype,
            'axis',
            ['int32', 'int64'],
            'concat',
            "The data type of axis must be int32 or int64 when axis is a Tensor",
453
        )
454

455
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
456
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
457 458

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
459 460 461 462
        # NOTE(liym27): Don't remove this if branch!
        # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
        # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.

463 464 465 466
        assert len(input) == 1, (
            "If the elements of 'input' in concat are Variable(LoDTensorArray), "
            "number of the elements must be 1, but received %s." % len(input)
        )
467
        out_index = helper.create_variable_for_type_inference(dtype="int32")
468 469 470 471 472 473
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out], 'OutIndex': [out_index]},
            attrs={'axis': axis, 'use_stack': False},
        )
474 475 476 477 478
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
479
        attrs['axis'] = axis
480

481 482 483
        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs
        )
Y
Yu Yang 已提交
484 485 486
    return out


G
Guo Sheng 已提交
487
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
488
    r"""
G
Guo Sheng 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
539 540

    Args:
G
Guo Sheng 已提交
541 542 543 544 545 546 547
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
548 549

    Returns:
G
Guo Sheng 已提交
550 551 552
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
553 554 555 556

    Examples:
        .. code-block:: python

557
            import paddle.fluid as fluid
558
            import numpy as np
G
Guo Sheng 已提交
559 560 561 562 563 564 565
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            array = fluid.layers.create_array(dtype='float32')
            fluid.layers.array_write(x0, i, array)
            fluid.layers.array_write(x1, i + 1, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
566
    """
J
Jiabin Yang 已提交
567
    if _non_static_mode():
568
        assert isinstance(
569 570
            input, list
        ), "The 'input' in tensor_array_to_tensor must be list"
571
        from .nn import concat
572
        from ..dygraph import to_variable
573
        from paddle import stack
574

575 576 577
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
578 579
            numpy.array(list(map(lambda x: int(x.shape[axis]), input)))
        )
580 581
        return res, sizes

582 583 584
    check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
    if isinstance(input, list):
        for i, input_x in enumerate(input):
585 586 587 588 589 590
            check_type(
                input_x,
                'input[' + str(i) + ']',
                Variable,
                'tensor_array_to_tensor',
            )
L
li099 已提交
591
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
592 593
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
594 595 596 597 598 599
    helper.append_op(
        type='tensor_array_to_tensor',
        inputs={'X': input},
        outputs={'Out': [out], 'OutIndex': [out_index]},
        attrs={'axis': axis, 'use_stack': use_stack},
    )
L
li099 已提交
600 601 602
    return out, out_index


603
def sums(input, out=None):
604
    r"""
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
626 627

    Args:
628 629 630 631
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
632 633

    Returns:
634 635
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
636 637

    Examples:
F
fengjiayi 已提交
638
        .. code-block:: python
K
kavyasrinet 已提交
639

640 641 642 643 644 645 646 647 648
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
649

650 651
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
652
    """
653 654 655
    check_type(input, 'input', (Variable, tuple, list), 'sums')
    if isinstance(input, list) or isinstance(input, tuple):
        for input_section in input:
656 657 658 659 660 661
            check_variable_and_dtype(
                input_section,
                "input",
                ['float16', 'float32', 'float64', 'int32', 'int64'],
                'sums',
            )
662
    else:
663 664 665 666 667 668
        check_variable_and_dtype(
            input,
            "input",
            ['float16', 'float32', 'float64', 'int32', 'int64'],
            'sums',
        )
669

Y
Yu Yang 已提交
670 671
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
672
        out = helper.create_variable_for_type_inference(
673 674
            dtype=helper.input_dtype()
        )
675
    else:
676 677 678 679 680 681 682 683 684 685
        check_variable_and_dtype(
            out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums'
        )

    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False},
    )
Y
Yu Yang 已提交
686 687 688
    return out


F
fengjiayi 已提交
689
def assign(input, output=None):
690
    """
S
swtkiwi 已提交
691

692
    The OP copies the :attr:`input` to the :attr:`output`.
693

694
    Parameters:
695 696 697 698
        input (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
            or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
            Note: the float64 data will be converted to float32 because of current platform protobuf
            data limitation.
699
        output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
700
            be created as :attr:`output`. Default: None.
701 702

    Returns:
703
        Tensor: A tensor with the same shape, data type and value as :attr:`input`.
704 705 706

    Examples:
        .. code-block:: python
707

708
          import paddle
709
          import numpy as np
710
          data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
711 712 713 714
          array = np.array([[1, 1],
                            [3, 4],
                            [1, 3]]).astype(np.int64)
          result1 = paddle.zeros(shape=[3, 3], dtype='float32')
715 716 717
          paddle.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
          result2 = paddle.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
718
    """
Y
Yu Yang 已提交
719
    helper = LayerHelper('assign', **locals())
720 721 722 723 724 725
    check_type(
        input,
        'input',
        (Variable, numpy.ndarray, list, tuple, float, int, bool),
        'assign',
    )
726 727
    is_inplace = True if output is not None else False

728 729 730 731
    if numpy.isscalar(input) and not isinstance(input, str):
        input = numpy.array([input])
    elif isinstance(input, (list, tuple)):
        input = numpy.array(input)
732 733
    # NOTE(Aurelius84): Why we judge core.VarBase?
    # In case of @to_static, a VarBase can be as input of `assign`,
J
Jiabin Yang 已提交
734
    # but _non_static_mode()==False under @to_static, which means
735 736 737
    # isinstance(VarBase, Variable) == False. It will cause return None
    # after this api.
    if isinstance(input, (Variable, core.VarBase)):
738
        if _non_static_mode():
C
chentianyu03 已提交
739
            if in_dygraph_mode() and output is None:
740
                output = _C_ops.assign(input)
741 742
            elif in_dygraph_mode() and output is not None:
                _C_ops.assign_out_(input, output)
C
chentianyu03 已提交
743 744 745 746 747 748
            else:
                if output is None:
                    if _in_legacy_dygraph():
                        output = core.VarBase()
                    else:
                        output = core.eager.Tensor()
749
                _legacy_C_ops.assign(input, output)
750
        else:
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
            check_dtype(
                input.dtype,
                'input',
                [
                    'float16',
                    'uint16',
                    'float32',
                    'float64',
                    'int32',
                    'int64',
                    'uint8',
                    'bool',
                ],
                'assign',
                '(When the type of input in assign is Variable.)',
            )
767 768
            if output is None:
                output = helper.create_variable_for_type_inference(
769 770 771 772 773
                    dtype=input.dtype
                )
            helper.append_op(
                type='assign', inputs={'X': [input]}, outputs={'Out': [output]}
            )
X
xuwei06 已提交
774
    elif isinstance(input, numpy.ndarray):
775 776 777 778 779
        # Not support [var, var, ...] currently.
        if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
            raise TypeError(
                "Required type(input) numpy.ndarray, but found `list(Variable)` in input."
            )
X
xuwei06 已提交
780
        dtype = convert_np_dtype_to_dtype_(input.dtype)
781 782 783 784 785 786
        if dtype == VarDesc.VarType.FP64:
            # Setting FP64 numpy data is not supported in Paddle, so we
            # use FP32 here
            warnings.warn(
                "paddle.assign doesn't support float64 input now due "
                "to current platform protobuf data limitation, we convert "
787 788
                "it to float32"
            )
789
            dtype = VarDesc.VarType.FP32
790 791
        if dtype == VarDesc.VarType.BOOL:
            value_name = "bool_values"
W
wanghuancoder 已提交
792
            values = [int(v) for v in input.flat]
793
        elif dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
794
            value_name = "fp32_values"
795
            values = [float(v) for v in input.flat]
796
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
797
            value_name = "int32_values"
798
            values = [int(v) for v in input.flat]
799 800 801
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
802
        else:
803 804
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
805
                "the data type of 'input' must be bool, float32, int32 or int64, but "
806 807
                "received %s." % convert_dtype(dtype)
            )
808
        if input.size > 1024 * 1024:
809 810 811 812
            raise ValueError(
                "The size of input is too big. Please consider "
                "saving it to file and 'load_op' to load it"
            )
813 814 815
        if in_dygraph_mode():
            if output is None:
                output = zeros(list(input.shape), dtype)
816 817 818 819 820 821 822
            _C_ops.assign_value_(
                output,
                list(input.shape),
                dtype,
                values,
                _current_expected_place(),
            )
823 824 825
        elif _in_legacy_dygraph():
            if output is None:
                output = core.VarBase()
826 827 828 829 830 831 832 833 834
            _legacy_C_ops.assign_value(
                output,
                'shape',
                list(input.shape),
                'dtype',
                dtype,
                value_name,
                values,
            )
835
        else:
836 837
            if output is None:
                output = helper.create_variable_for_type_inference(
838 839 840 841 842 843 844 845 846 847 848
                    dtype=input.dtype
                )
            helper.append_op(
                type='assign_value',
                outputs={'Out': [output]},
                attrs={
                    'dtype': dtype,
                    'shape': list(input.shape),
                    value_name: values,
                },
            )
X
xuwei06 已提交
849

J
Jiabin Yang 已提交
850
    if is_inplace and _non_static_mode():
851
        output._bump_inplace_version()
852

Y
Yu Yang 已提交
853 854 855
    return output


856
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
Y
Yu Yang 已提交
857
    """
S
swtkiwi 已提交
858

W
wangchaochaohu 已提交
859
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
860
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
861

T
tianshuo78520a 已提交
862
    The attribute `stop_gradient` of the created Tensor is set to True.
863 864

    Args:
865 866 867
        shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
            If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
            If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
W
wangchaochaohu 已提交
868
        dtype(np.dtype|str): Data type of the output Tensor which can
869
            be float16, float32, float64, uint8, int16, int32, int64.
870
        value(bool|float|int|Tensor): The constant value used to initialize
871 872
            the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
        force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
873
        out(Tensor, optional): Optional output which can be any created
874 875
            Tensor that meets the requirements to store the result of operation.
            if ``out`` is None, a new Tensor will be create to store the result.
876 877
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
878 879

    Returns:
880
        Tensor: Tensor which is created according to shape and dtype.
W
wangchaochaohu 已提交
881

882 883 884
    Examples:
        .. code-block:: python

885
          import paddle.fluid as fluid
886
          # attr shape is a list which doesn't contain  Tensor.
887 888
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
889
          # data1=[[5], [5]] data2=[[5], [5]]
890

891
          # attr shape is a list which contains Tensor.
892
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
893
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
894

895
          # attr shape is a Tensor.
896
          shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
897
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
898

899
          # attr value is a Tensor.
W
wangchaochaohu 已提交
900 901
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
902
    """
903

W
wangchaochaohu 已提交
904
    attrs = {'force_cpu': force_cpu}
905
    dtype = convert_dtype(dtype)
906
    if not isinstance(value, Variable):
907
        if dtype in ['uint8', 'int16', 'int32', 'int64']:
W
wangchaochaohu 已提交
908
            attrs['str_value'] = str(int(value))
909
            attrs['value'] = int(value)
W
wangchaochaohu 已提交
910 911
        else:
            attrs['str_value'] = str(float(value))
912
            attrs['value'] = float(value)
913

914 915 916 917 918
    if in_dygraph_mode():
        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
        if isinstance(shape, (list, tuple)):
919
            shape = utils.convert_shape_to_list(shape)
920 921 922 923 924

        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        if out is None:
925
            out = _C_ops.full(shape, float(value), dtype, place)
926 927 928
            out.stop_gradient = True
            return out

929 930
        if out is not None:
            # final state mode is support out is not None.
931
            _C_ops.full_(out, shape, float(value), dtype, place)
932 933
            out.stop_gradient = True
            return out
934

935 936 937 938 939 940 941 942 943 944 945
    if _in_legacy_dygraph():
        shape = utils.convert_shape_to_list(shape)
        if out is None:
            out = _varbase_creator(dtype=dtype)

        if isinstance(value, Variable):
            if dtype in ['uint8', 'int16', 'int32', 'int64']:
                attrs['str_value'] = str(int(value.numpy().item(0)))
            else:
                attrs['str_value'] = str(float(value.numpy().item(0)))

946 947 948 949 950 951 952 953 954 955 956 957 958
        _legacy_C_ops.fill_constant(
            out,
            'value',
            float(value),
            'force_cpu',
            force_cpu,
            'dtype',
            out.dtype,
            'str_value',
            attrs['str_value'],
            'shape',
            shape,
        )
959 960 961
        out.stop_gradient = True
        return out

962 963 964
    helper = LayerHelper("fill_constant", **locals())
    inputs = {}
    if isinstance(value, Variable):
965 966
        if convert_dtype(value.dtype) != dtype:
            value = cast(value, dtype)
967 968
        inputs['ValueTensor'] = value

969
    check_shape(shape)
970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'uint8',
            'int16',
            'int32',
            'int64',
            'complex64',
            'complex128',
        ],
        'fill_constant',
    )
987
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
988

989
    if out is not None:
990 991 992
        check_variable_and_dtype(
            out, 'out', [convert_dtype(dtype)], 'fill_constant'
        )
993 994

    helper = LayerHelper("fill_constant", **locals())
995 996 997
    utils.get_shape_tensor_inputs(
        inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant'
    )
L
liym27 已提交
998

Y
Yu Yang 已提交
999
    if out is None:
X
Xin Pan 已提交
1000
        out = helper.create_variable_for_type_inference(dtype=dtype)
L
liym27 已提交
1001
    attrs['dtype'] = out.dtype
1002 1003 1004 1005 1006 1007 1008
    helper.append_op(
        type='fill_constant',
        inputs=inputs,
        outputs={'Out': [out]},
        attrs=attrs,
        stop_gradient=True,
    )
Y
Yu Yang 已提交
1009 1010 1011 1012
    out.stop_gradient = True
    return out


1013
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
Y
yuyang18 已提交
1014
@templatedoc()
1015 1016 1017 1018 1019 1020 1021 1022 1023
def fill_constant_batch_size_like(
    input,
    shape,
    dtype,
    value,
    input_dim_idx=0,
    output_dim_idx=0,
    force_cpu=False,
):
1024
    """
T
tianshuo78520a 已提交
1025
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
1026 1027 1028 1029
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
1030 1031

    Args:
W
wangchaochaohu 已提交
1032 1033 1034 1035 1036
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
1037
        value(float|int): The constant value used to initialize the Tensor to be created.
W
wangchaochaohu 已提交
1038 1039 1040 1041 1042
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
1043
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
1044 1045

    Returns:
W
wangchaochaohu 已提交
1046
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
1047 1048 1049 1050 1051

    Examples:

        .. code-block:: python

1052
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
1053
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
1054
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
1055
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
1056

1057
    """
1058 1059 1060 1061 1062 1063 1064
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
1065 1066 1067
        out = _C_ops.full_batch_size_like(
            input, shape, dtype, value, input_dim_idx, output_dim_idx, place
        )
1068 1069 1070
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
1071
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
1072
    out = helper.create_variable_for_type_inference(dtype=dtype)
1073 1074 1075 1076 1077 1078
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
1079
        'force_cpu': force_cpu,
1080 1081 1082 1083 1084
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
1085 1086 1087 1088 1089 1090
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
        attrs=attrs,
    )
Y
Yu Yang 已提交
1091 1092 1093 1094
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
1095 1096
def argmin(x, axis=0):
    """
1097 1098 1099
        :alias_main: paddle.argmin
        :alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
        :old_api: paddle.fluid.layers.argmin
S
swtkiwi 已提交
1100

S
sneaxiy 已提交
1101 1102
    **argmin**

1103 1104
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
1105 1106

    Args:
1107 1108 1109 1110 1111
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
1112

S
sneaxiy 已提交
1113
    Returns:
1114
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
1115

S
sneaxiy 已提交
1116 1117
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
1118

1119
            import paddle.fluid as fluid
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
1147
    """
1148
    check_variable_and_dtype(
1149 1150 1151 1152 1153
        x,
        'x',
        ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin',
    )
S
sneaxiy 已提交
1154
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
1155
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
1156 1157 1158 1159 1160 1161
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
1162
    out.stop_gradient = True
S
sneaxiy 已提交
1163 1164 1165 1166 1167 1168 1169
    return out


def argmax(x, axis=0):
    """
    **argmax**

1170 1171
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
1172 1173

    Args:
1174 1175 1176 1177 1178
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
1179

S
sneaxiy 已提交
1180
    Returns:
1181
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
1182

S
sneaxiy 已提交
1183 1184
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
1185

1186
            import paddle.fluid as fluid
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
1214
    """
1215
    check_variable_and_dtype(
1216 1217 1218 1219 1220
        x,
        'x',
        ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax',
    )
S
sneaxiy 已提交
1221
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
1222
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
1223 1224 1225 1226 1227 1228
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
1229
    out.stop_gradient = True
S
sneaxiy 已提交
1230 1231 1232
    return out


1233
def argsort(input, axis=-1, descending=False, name=None):
Y
Yibing Liu 已提交
1234
    """
1235 1236 1237
        :alias_main: paddle.argsort
        :alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
        :old_api: paddle.fluid.layers.argsort
S
swtkiwi 已提交
1238

1239 1240 1241
    This OP sorts the input along the given axis, and returns sorted output
    data Varibale and its corresponding index Variable with the same shape as
    :attr:`input`.
Y
Yibing Liu 已提交
1242 1243

    Args:
1244 1245 1246 1247 1248
        input(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
1249 1250 1251
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
1252 1253 1254
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
Y
Yibing Liu 已提交
1255 1256

    Returns:
1257 1258 1259
        tuple: A tuple of sorted data Variable(with the same shape and data
        type as input) and the sorted indices(with the same shape as input's
        and with data type int64).
Y
Yibing Liu 已提交
1260 1261 1262 1263

    Examples:
        .. code-block:: python

1264
            import paddle.fluid as fluid
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]]).astype(np.float32)
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argsort(input=x, axis=-1)
                out2 = fluid.layers.argsort(input=x, axis=0)
                out3 = fluid.layers.argsort(input=x, axis=1)
                print(out1[0].numpy())
                # [[[5. 5. 8. 9.]
                #   [0. 0. 1. 7.]
                #   [2. 4. 6. 9.]]
                #  [[2. 2. 4. 5.]
                #   [4. 7. 7. 9.]
                #   [0. 1. 6. 7.]]]
                print(out1[1].numpy())
                # [[[0 3 1 2]
                #   [0 1 2 3]
                #   [2 3 0 1]]
                #  [[1 3 2 0]
                #   [0 1 2 3]
                #   [2 0 3 1]]]
                print(out2[0].numpy())
                # [[[5. 2. 4. 2.]
                #   [0. 0. 1. 7.]
                #   [1. 7. 0. 4.]]
                #  [[5. 8. 9. 5.]
                #   [4. 7. 7. 9.]
                #   [6. 9. 2. 6.]]]
                print(out3[0].numpy())
                # [[[0. 0. 1. 4.]
                #   [5. 8. 2. 5.]
                #   [6. 9. 9. 7.]]
                #  [[1. 2. 0. 2.]
                #   [4. 7. 4. 6.]
                #   [5. 7. 7. 9.]]]
Y
Yibing Liu 已提交
1306
    """
1307
    check_variable_and_dtype(
1308 1309 1310 1311 1312
        input,
        'input',
        ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
        'argsort',
    )
Y
Yibing Liu 已提交
1313
    helper = LayerHelper("argsort", **locals())
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
    out = helper.create_variable_for_type_inference(
        dtype=input.dtype, stop_gradient=True
    )
    ids = helper.create_variable_for_type_inference(
        VarDesc.VarType.INT64, stop_gradient=True
    )
    helper.append_op(
        type='argsort',
        inputs={'X': input},
        outputs={'Out': out, 'Indices': ids},
        attrs={'axis': axis, 'descending': descending},
    )
Y
Yibing Liu 已提交
1326 1327 1328
    return out, ids


Y
Yang Yu 已提交
1329
def ones(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
1330
    """
1331 1332
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1333

1334
    Parameters:
1335
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of shape is int32 or int64.
W
wangchaochaohu 已提交
1336
        dtype (np.dtype|str): Data type of output Tensor, it supports
1337
            bool, float16, float32, float64, int32 and int64.
1338 1339
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1340
            Default: False.
1341 1342

    Returns:
1343
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
1344 1345 1346 1347

    Examples:
        .. code-block:: python

1348
          import paddle.fluid as fluid
1349
          data0 = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
1350

1351 1352 1353
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.ones(shape=shape, dtype='int32') #[[1, 1], [1, 1]]
Y
Yu Yang 已提交
1354 1355 1356 1357
    """
    return fill_constant(value=1.0, **locals())


1358
def zeros(shape, dtype, force_cpu=False, name=None):
Y
Yu Yang 已提交
1359
    """
1360 1361
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1362

1363
    Parameters:
1364
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
W
wangchaochaohu 已提交
1365
        dtype (np.dtype|str): Data type of output Tensor, it supports
1366
            bool, float16, float32, float64, int32 and int64.
1367 1368
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1369
            Default: False.
1370 1371
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
1372 1373

    Returns:
1374
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
1375 1376 1377 1378

    Examples:
        .. code-block:: python

1379
          import paddle.fluid as fluid
1380
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
1381

1382 1383 1384
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
Y
Yu Yang 已提交
1385 1386
    """
    return fill_constant(value=0.0, **locals())
1387 1388


F
fengjiayi 已提交
1389 1390
def reverse(x, axis):
    """
1391 1392 1393
        :alias_main: paddle.reverse
        :alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse
        :old_api: paddle.fluid.layers.reverse
S
swtkiwi 已提交
1394

1395
    The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
F
fengjiayi 已提交
1396

1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
    .. code-block:: text

        Case 1:

            Given a LoDTensor:
                x = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
                axis = [0, 1]

            Then:
                output = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]

        Case 2:

            Given a LoDTensorArray:
                x = {[[0, 1], [2, 3]],
                     [[4, 5, 6]],
                     [[7],[8], [9]]}
                axis = 0

            Then:
                output = {[[7],[8], [9]],
                          [[4, 5, 6]],
                          [[0, 1], [2, 3]]}

1421
    Parameters:
1422 1423
        x (Variable): A tensor or LoDTensorArray to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
                      If input is a LoDTensorArray, returns a new reversed LoDTensorArray without changing the internal order of each inner tensor.
1424 1425
        axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
            in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
1426 1427
            will be apply on each axis in the tuple or list. If input is a LoDTensorArray, the value of axis shall be 0, or a
            list [0] or tuple (0, ) with shape [1].
F
fengjiayi 已提交
1428 1429

    Returns:
1430
        Variable: The reversed tensor with the same shape and data type as :attr:`x`.
F
fengjiayi 已提交
1431 1432 1433 1434

    Examples:
        .. code-block:: python

1435
          import paddle.fluid as fluid
1436 1437 1438 1439
          import numpy as np
          data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
          result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
          result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449

          # example of LoDTensorArray
          data1 = fluid.layers.assign(np.array([[0, 1, 2]], dtype='float32'))
          data2 = fluid.layers.assign(np.array([[3, 4, 5]], dtype='float32'))
          tensor_array = fluid.layers.create_array(dtype='float32')
          i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
          fluid.layers.array_write(data1, i, tensor_array)
          fluid.layers.array_write(data2, i+1, tensor_array)

          reversed_tensor_array = fluid.layers.reverse(tensor_array, 0) # {[[3, 4, 5]], [[0, 1, 2]]}
F
fengjiayi 已提交
1450
    """
1451 1452 1453
    check_variable_and_dtype(
        x, 'x', ('float32', 'float64', 'int32', 'int64', 'uint8'), 'reverse'
    )
1454
    check_type(axis, 'axis', (int, tuple, list, Variable), 'reverse')
F
fengjiayi 已提交
1455 1456
    if isinstance(axis, int):
        axis = [axis]
W
wanghuancoder 已提交
1457
    if in_dygraph_mode():
1458
        return _C_ops.reverse(x, axis)
F
fengjiayi 已提交
1459
    helper = LayerHelper("reverse", **locals())
X
Xin Pan 已提交
1460
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1461 1462 1463 1464 1465 1466
    helper.append_op(
        type='reverse',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
F
fengjiayi 已提交
1467 1468 1469
    return out


1470 1471 1472 1473 1474 1475 1476
def save(x, file_path, overwrite=True):
    """
    Saves a variable as a file.

    Args:
        x(variable): The Tensor/LoDTensor to be saved.
        file_path(str): The file path where the variable will be saved.
1477 1478 1479
        overwrite(bool): Whether or not cover the given file when it has already
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1480 1481
    """
    helper = LayerHelper("save", **locals())
1482 1483 1484 1485 1486 1487
    helper.append_op(
        type="save",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path, "overwrite": overwrite},
    )
1488 1489 1490 1491 1492 1493 1494


def save_combine(x, file_path, overwrite=True):
    """
    Saves a list of variables into a single file.

    Args:
1495 1496
        x(list): A list of Tensor/LoDTensor variables to be saved together in
                 a single file.
1497
        file_path(str): The file path where variables will be saved.
1498
        overwrite(bool): Whether or not cover the given file when it has already
1499 1500
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1501 1502 1503 1504 1505 1506 1507 1508

    Returns:
        There is no return value.

    Examples:

        .. code-block:: python

1509
            import paddle.fluid as fluid
1510 1511 1512 1513 1514 1515 1516
            v1 = fluid.layers.data(name="data",
                                   shape=(4, 6),
                                   dtype="float32")
            v2 = fluid.layers.data(name="data",
                                   shape=(6, 8, 4),
                                   dtype="float32")
            normed = fluid.layers.save_combine([v1, v2], file_path="output")
1517 1518
    """
    helper = LayerHelper("save_combine", **locals())
1519 1520 1521 1522 1523 1524
    helper.append_op(
        type="save_combine",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path, "overwrite": overwrite},
    )
1525 1526 1527 1528


def load_combine(out, file_path):
    """
T
tianshuo78520a 已提交
1529
    Loads a list of variable from a single file.
1530 1531 1532 1533 1534 1535

    Args:
        out(list): The list of variables to be read from the disk file.
        file_path(str): The path of the disk file.
    """
    helper = LayerHelper("load_combine", **locals())
1536 1537 1538 1539 1540 1541
    helper.append_op(
        type="load_combine",
        inputs={},
        output={"Out": out},
        args={"file_path": file_path},
    )
1542 1543 1544 1545 1546 1547 1548


def has_inf(x):
    """
    Test if any of x contains an infinity number

    Args:
S
Steffy-zxf 已提交
1549
       x (Tensor): The Tensor to be checked.
1550 1551

    Returns:
S
Steffy-zxf 已提交
1552
       Tensor: The tensor storing the output, only a bool value, indicating that whether there is infinity number in x or not.
1553

1554 1555
    Examples:
        .. code-block:: python
1556

S
Steffy-zxf 已提交
1557 1558
          import paddle
          data = paddle.randn(shape=[4, 32, 32], dtype="float32")
1559
          res = paddle.fluid.layers.has_inf(data)
S
Steffy-zxf 已提交
1560
          # [False]
1561

1562
    """
J
Jiabin Yang 已提交
1563
    if _non_static_mode():
1564
        return _legacy_C_ops.isinf(x)
S
Steffy-zxf 已提交
1565

1566
    check_type(x, 'x', (Variable), 'has_inf')
1567
    helper = LayerHelper("isinf", **locals())
X
Xin Pan 已提交
1568
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1569 1570 1571 1572
    helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
    return out


1573
def linspace(start, stop, num, dtype=None, name=None):
1574
    r"""
1575
    This OP return fixed number of evenly spaced values within a given interval.
Z
zhoukunsheng 已提交
1576 1577

    Args:
1578 1579 1580 1581
        start(int|float|Tensor): The input :attr:`start` is start variable of range. It is a scalar, \
            or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
        stop(int|float|Tensor): The input :attr:`stop` is start variable of range. It is a scalar, \
            or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
1582
        num(int|Tensor): The input :attr:`num` is given num of the sequence. It is an int scalar, \
1583
            or a Tensor of shape [1] with data type int32.
W
wangchaochaohu 已提交
1584
        dtype(np.dtype|str, optional): The data type of output tensor, it could be
1585
            int32, int64, float32 and float64. Default: if None, the data type is float32.
1586
        name(str, optional): Normally there is no need for user to set this property.
1587
            For more information, please refer to :ref:`api_guide_Name`.Default: None.
Z
zhoukunsheng 已提交
1588 1589

    Returns:
1590
        Tensor: the output data type will be float32, float64. The 1-D tensor with fixed number of evenly spaced values, \
1591
        the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
1592
        the value with input :attr:`start`.
Z
zhoukunsheng 已提交
1593

Z
zhoukunsheng 已提交
1594
    Examples:
Z
zhoukunsheng 已提交
1595 1596
        .. code-block:: python

1597 1598 1599
             import paddle
             data = paddle.linspace(0, 10, 5, 'float32') # [0.0,  2.5,  5.0,  7.5, 10.0]
             data = paddle.linspace(0, 10, 1, 'float32') # [0.0]
Z
zhoukunsheng 已提交
1600 1601

    """
1602 1603
    if dtype is None:
        dtype = 'float32'
1604 1605 1606
    tensor_num = num
    tensor_start = start
    tensor_stop = stop
1607 1608
    if not isinstance(num, Variable):
        check_type(num, 'num', (int), 'linspace')
1609 1610
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
Z
zhoukunsheng 已提交
1611
    if not isinstance(start, Variable):
1612 1613
        with device_guard("cpu"):
            tensor_start = fill_constant([1], dtype, start)
Z
zhoukunsheng 已提交
1614
    if not isinstance(stop, Variable):
1615 1616
        with device_guard("cpu"):
            tensor_stop = fill_constant([1], dtype, stop)
Z
zhoukunsheng 已提交
1617
    if not isinstance(num, Variable):
1618 1619
        with device_guard("cpu"):
            tensor_num = fill_constant([1], 'int32', num)
1620
    if in_dygraph_mode():
1621 1622 1623 1624 1625 1626 1627
        return _C_ops.linspace(
            tensor_start,
            tensor_stop,
            tensor_num,
            dtype,
            _current_expected_place(),
        )
1628
    if _in_legacy_dygraph():
1629 1630 1631
        return _legacy_C_ops.linspace(
            tensor_start, tensor_stop, tensor_num, 'dtype', dtype
        )
1632 1633
    helper = LayerHelper("linspace", **locals())

1634 1635 1636
    start_dtype = convert_dtype(tensor_start.dtype)
    stop_dtype = convert_dtype(tensor_stop.dtype)
    out_dtype = convert_dtype(dtype)
1637
    if isinstance(start, Variable):
1638 1639 1640 1641 1642 1643
        check_dtype(
            start.dtype,
            'start',
            ['float32', 'float64', 'int32', 'int64'],
            'linspace',
        )
1644 1645
    else:
        check_type(start, 'start', (int, float), 'linspace')
Z
zhoukunsheng 已提交
1646

1647
    if isinstance(stop, Variable):
1648 1649 1650 1651 1652 1653
        check_dtype(
            stop.dtype,
            'stop',
            ['float32', 'float64', 'int32', 'int64'],
            'linspace',
        )
1654 1655 1656 1657
    else:
        check_type(stop, 'stop', (int, float), 'linspace')
    if isinstance(num, Variable):
        check_dtype(num.dtype, 'num', ['int32'], 'linspace')
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
    check_dtype(
        dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'], 'linspace'
    )
    if (
        (stop_dtype == "float64" or start_dtype == "float64")
        and out_dtype in ["float32", "int32"]
    ) or (
        (stop_dtype == "int64" or start_dtype == "int64")
        and out_dtype == "int32"
    ):
1668 1669
        raise ValueError(
            "The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
1670 1671 1672 1673
            "which may cause data type overflows. Please reset attr(dtype) of linspace.".format(
                start_dtype, stop_dtype, dtype
            )
        )
1674 1675

    out = helper.create_variable_for_type_inference(dtype=dtype)
Z
zhoukunsheng 已提交
1676

1677 1678 1679 1680 1681 1682
    helper.append_op(
        type='linspace',
        inputs={'Start': tensor_start, 'Stop': tensor_stop, 'Num': tensor_num},
        attrs={'dtype': dtype},
        outputs={'Out': [out]},
    )
1683
    if isinstance(num, int):
1684
        out.desc.set_shape((num,))
Z
zhoukunsheng 已提交
1685
    return out
1686 1687


Z
zhoukunsheng 已提交
1688 1689
def zeros_like(x, out=None):
    """
1690
    This OP creates a zeros tensor which has identical shape and dtype
Z
zhoukunsheng 已提交
1691 1692 1693
    with `x`.

    Args:
1694 1695 1696 1697 1698 1699
        x(Variable): The input tensor which specifies shape and dtype, the
            input data dtype could be bool, float32, float64, int32, int64.
        out(Variable, optional): If is :attr:`None` , the op will create the
            variable as output, the data type and shape of this variable will
            be same as input :attr:`x`. If is a tensor, the data type and shape
            need to be same as input :attr:`x`. The default value is :attr:`None` .
Z
zhoukunsheng 已提交
1700 1701

    Returns:
1702 1703 1704
        Variable: The N-D tensor, the element in tensor is related to input
            data type, if the input data type is bool, the output value is
            False, otherwise is zero. The output shape is the same as the input.
Z
zhoukunsheng 已提交
1705 1706 1707 1708

    Examples:
        .. code-block:: python

1709
          import paddle.fluid as fluid
1710
          x = fluid.data(name='x', dtype='float32', shape=[3])
Z
zhoukunsheng 已提交
1711 1712
          data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]

Z
zhoukunsheng 已提交
1713
    """
1714 1715 1716
    check_variable_and_dtype(
        x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'zeros_like'
    )
Z
zhoukunsheng 已提交
1717 1718 1719
    helper = LayerHelper("zeros_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1720 1721
    else:
        check_variable_and_dtype(
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
            out,
            "out",
            ['bool', 'float32', 'float64', 'int32', 'int64'],
            'zeros_like',
        )
    helper.append_op(
        type='fill_any_like',
        inputs={'X': [x]},
        attrs={'value': 0, "dtype": x.dtype},
        outputs={'Out': [out]},
    )
Z
zhoukunsheng 已提交
1733 1734
    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1735 1736


1737
@deprecated(since="2.0.0", update_to="paddle.diag")
Z
zhoukunsheng 已提交
1738
def diag(diagonal):
1739
    r"""
1740 1741 1742
	:alias_main: paddle.diag
	:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
	:old_api: paddle.fluid.layers.diag
S
swtkiwi 已提交
1743

1744
    This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Z
zhoukunsheng 已提交
1745 1746

    Args:
1747 1748
        diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
            specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Z
zhoukunsheng 已提交
1749 1750

    Returns:
1751 1752
        Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
            the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Z
zhoukunsheng 已提交
1753 1754 1755 1756 1757 1758

    Examples:
        .. code-block:: python

          # [[3, 0, 0]
          #  [0, 4, 0]
1759
          #  [0, 0, 5]
1760 1761 1762

          import paddle.fluid as fluid
          import numpy as np
1763 1764 1765
          diagonal = np.arange(3, 6, dtype='int32')
          data = fluid.layers.diag(diagonal)
          # diagonal.shape=(3,) data.shape=(3, 3)
Z
zhoukunsheng 已提交
1766 1767

    """
1768
    check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag')
1769 1770 1771 1772 1773 1774
    check_dtype(
        diagonal.dtype,
        'diagonal',
        ['float32', 'float64', 'int32', 'int64'],
        'diag',
    )
Z
zhoukunsheng 已提交
1775 1776 1777 1778 1779 1780 1781
    helper = LayerHelper("diag", **locals())

    if not isinstance(diagonal, Variable):
        diagonal = assign(diagonal)

    out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)

1782 1783 1784
    helper.append_op(
        type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]}
    )
Z
zhoukunsheng 已提交
1785 1786 1787

    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1788 1789


1790 1791 1792
def eye(
    num_rows, num_columns=None, batch_shape=None, dtype='float32', name=None
):
1793
    """
1794
    This function constructs a or a batch of 2-D tensor with ones on the diagonal and zeros elsewhere.
1795 1796 1797

    Args:
        num_rows(int): the number of rows in each batch tensor.
1798 1799
        num_columns(int, optional): the number of columns in each batch tensor.
            If None, default: num_rows.
1800 1801
        batch_shape(list, optional): If provided, the returned tensor will have a leading
            batch size of this shape, the data type of ``batch_shape`` is int. Default is None.
W
wangchaochaohu 已提交
1802
        dtype(np.dtype|str, optional): The data type of the returned tensor.
1803 1804 1805 1806
            It should be int32, int64, float16, float32, float64, default is 'float32'.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
1807 1808

    Returns:
1809
        Tensor: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
1810 1811 1812 1813 1814

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
1815 1816
          data = fluid.layers.eye(3, dtype='int32')
          # [[1, 0, 0]
1817
          #  [0, 1, 0]
1818 1819
          #  [0, 0, 1]]

1820
          data = fluid.layers.eye(2, 3, dtype='int32')
1821
          # [[1, 0, 0]
1822
          #  [0, 1, 0]]
1823 1824

          data = fluid.layers.eye(2, batch_shape=[3])
1825 1826 1827 1828 1829
          # Construct a batch of 3 identity tensors, each 2 x 2.
          # data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.

    """

1830 1831 1832 1833 1834 1835 1836
    def _check_attr(attr, message):
        if isinstance(attr, ((Variable, core.VarBase, core.eager.Tensor))):
            assert len(attr.shape) == 1 and attr.shape[0] in [1, -1]
        elif not isinstance(attr, int) or attr < 0:
            raise TypeError("{} should be a non-negative int.".format(message))

    _check_attr(num_rows, "num_rows")
1837 1838
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
1839
    if num_columns is not None:
1840
        _check_attr(num_columns, "num_columns")
1841 1842
    else:
        num_columns = num_rows
1843

R
Ruibiao Chen 已提交
1844
    if in_dygraph_mode():
1845 1846 1847
        out = _C_ops.eye(
            num_rows, num_columns, dtype, _current_expected_place()
        )
R
Ruibiao Chen 已提交
1848
    elif _in_legacy_dygraph():
1849 1850 1851
        out = _legacy_C_ops.eye(
            'dtype', dtype, 'num_rows', num_rows, 'num_columns', num_columns
        )
1852 1853
    else:
        helper = LayerHelper("eye", **locals())
1854 1855 1856 1857 1858 1859
        check_dtype(
            dtype,
            'dtype',
            ['float16', 'float32', 'float64', 'int32', 'int64'],
            'eye',
        )
1860
        out = helper.create_variable_for_type_inference(dtype=dtype)
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
        helper.append_op(
            type='eye',
            inputs={},
            outputs={'Out': [out]},
            attrs={
                'num_rows': num_rows,
                'num_columns': num_columns,
                'dtype': dtype,
            },
            stop_gradient=True,
        )
1872 1873

    if batch_shape is not None:
1874 1875 1876
        re_shape = [1] * len(batch_shape)
        re_shape = re_shape + [num_rows, num_columns]
        expand_times = batch_shape + [1, 1]
J
Jiabin Yang 已提交
1877
        if _non_static_mode():
1878
            out, _ = _legacy_C_ops.reshape2(out, None, 'shape', re_shape)
1879
            return _legacy_C_ops.expand(out, None, 'expand_times', expand_times)
1880

1881 1882
        if not isinstance(batch_shape, list):
            raise TypeError("batch_shape should be a list")
1883
        for batch_val in batch_shape:
1884 1885
            if batch_val <= 0:
                raise TypeError("batch_shape should be a positive int list")
1886

1887 1888
        from .nn import expand
        from paddle import reshape
1889

1890 1891 1892 1893
        out = reshape(x=out, shape=re_shape)
        out = expand(x=out, expand_times=expand_times)

    out.stop_gradient = True
1894 1895 1896
    return out


Z
zhoukunsheng 已提交
1897 1898 1899 1900
def ones_like(x, out=None):
    """
    **ones_like**

1901
    This function creates a ones tensor which has identical shape and dtype
Z
zhoukunsheng 已提交
1902 1903 1904 1905 1906 1907 1908
    with `x`.

    Args:
        x(Variable): The input tensor which specifies shape and dtype.
        out(Variable): The output tensor.

    Returns:
1909
        out(Variable): The tensor variable storing the output.
Z
zhoukunsheng 已提交
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid

          x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
          data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]

    """
1920 1921 1922
    check_variable_and_dtype(
        x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like'
    )
Z
zhoukunsheng 已提交
1923 1924 1925 1926

    helper = LayerHelper("ones_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1927 1928
    else:
        check_variable_and_dtype(
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939
            out,
            "out",
            ['bool', 'float32', 'float64', 'int32', 'int64'],
            'ones_like',
        )
    helper.append_op(
        type='fill_any_like',
        inputs={'X': [x]},
        attrs={'value': 1.0},
        outputs={'Out': [out]},
    )
Z
zhoukunsheng 已提交
1940
    return out
Y
yaoxuefeng 已提交
1941 1942 1943 1944 1945


@deprecated(since="2.0.0", update_to="paddle.triu")
def triu(input, diagonal=0, name=None):
    import paddle
1946

Y
yaoxuefeng 已提交
1947
    return paddle.tensor.triu(x=input, diagonal=diagonal, name=name)