tensor.py 70.7 KB
Newer Older
1
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16

17
import math
18 19 20
import numpy
import warnings

Y
Yu Yang 已提交
21
from ..layer_helper import LayerHelper
22
from ..param_attr import ParamAttr
23
from ..initializer import Initializer
24
from ..framework import _current_expected_place, convert_np_dtype_to_dtype_, _non_static_mode, _varbase_creator, device_guard, _in_legacy_dygraph, in_dygraph_mode, _get_paddle_place
X
xuwei06 已提交
25
from ..framework import Variable
26
from ..initializer import Constant
27
from ..core import VarDesc
28
from .. import core
29
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
30
from . import utils
31
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
32
from paddle.utils import deprecated
33

34
from .utils import check_shape
W
wanghuancoder 已提交
35
from paddle import _C_ops
Y
Yu Yang 已提交
36 37

__all__ = [
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
    'create_tensor',
    'create_parameter',
    'create_global_var',
    'cast',
    'tensor_array_to_tensor',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
    'argmin',
    'argmax',
    'argsort',
    'ones',
    'zeros',
    'reverse',
    'has_inf',
    'has_nan',
    'isfinite',
    'range',
    'linspace',
    'zeros_like',
    'ones_like',
    'diag',
    'eye',
    'triu',
Y
Yu Yang 已提交
64 65 66
]


X
xuwei06 已提交
67
def create_tensor(dtype, name=None, persistable=False):
68
    """
W
wangchaochaohu 已提交
69
    Create a variable, which will hold a Tensor with data type dtype.
70 71

    Args:
W
wangchaochaohu 已提交
72 73 74 75
        dtype(string|numpy.dtype): the data type of Tensor to be created, the
            data type is bool, float16, float32, float64, int8, int16, int32 and int64.
        name(string, optional): The default value is None.  Normally there is no need for 
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
Q
update  
qiaolongfei 已提交
76
        persistable(bool): Set the persistable flag of the create tensor.
W
wangchaochaohu 已提交
77
            default value is False.
78 79

    Returns:
W
wangchaochaohu 已提交
80
        Variable: The tensor to be created according to dtype.
81 82 83 84

    Examples:
        .. code-block:: python

85
          import paddle.fluid as fluid
86 87
          tensor = fluid.layers.create_tensor(dtype='float32')
    """
88 89 90 91
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int32',
        'int64'
    ], 'create_tensor')
Y
Yu Yang 已提交
92
    helper = LayerHelper("create_tensor", **locals())
93 94 95
    return helper.create_variable(name=helper.name,
                                  dtype=dtype,
                                  persistable=persistable)
Y
Yu Yang 已提交
96 97


98 99
def create_parameter(shape,
                     dtype,
X
xuwei06 已提交
100
                     name=None,
101 102 103 104
                     attr=None,
                     is_bias=False,
                     default_initializer=None):
    """
105
	:api_attr: Static Graph
S
swtkiwi 已提交
106

107
    This function creates a parameter. The parameter is a learnable variable, which can have
Y
yuyang18 已提交
108 109 110 111 112
    gradient, and can be optimized.

    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.

113 114 115 116 117 118 119
    Parameters:
        shape (list of int): Shape of the parameter
        dtype (str): Data type of the parameter
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
        attr (ParamAttr, optional): Attributes of the parameter
        is_bias (bool, optional): This can affect which default initializer is chosen
120 121 122
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
123
        default_initializer (Initializer, optional): Initializer for the parameter
124 125

    Returns:
126
        The created parameter.
Y
yuyang18 已提交
127 128

    Examples:
129 130
        .. code-block:: python

131 132 133
            import paddle
            paddle.enable_static()
            W = paddle.static.create_parameter(shape=[784, 200], dtype='float32')
134
    """
135 136
    check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter')
    for item in shape:
T
tianshuo78520a 已提交
137 138 139
        check_type(item, 'item of shape',
                   (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                    numpy.int64), 'create_parameter')
140 141 142 143 144 145 146 147 148

    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8'
    ], 'create_parameter')
    check_type(attr, 'attr', (type(None), ParamAttr), 'create_parameter')
    check_type(default_initializer, 'default_initializer',
               (type(None), Initializer), 'create_parameter')

Q
Qiao Longfei 已提交
149
    helper = LayerHelper("create_parameter", **locals())
150
    if attr is None:
X
xuwei06 已提交
151
        attr = ParamAttr(name=name)
152
    return helper.create_parameter(attr, shape, convert_dtype(dtype), is_bias,
153 154 155
                                   default_initializer)


156 157 158 159 160 161 162
def create_global_var(shape,
                      value,
                      dtype,
                      persistable=False,
                      force_cpu=False,
                      name=None):
    """
163
    This function creates a new tensor variable with value in the global block(block 0).
F
fengjiayi 已提交
164

165
    Parameters:
166
        shape (list[int]|tuple[int]): Shape of the variable
167
        value (float): The value of the variable. The new created
F
fengjiayi 已提交
168
                      variable will be filled with it.
169 170
        dtype (str): Data type of the variable
        persistable (bool, optional): If this variable is persistable.
F
fengjiayi 已提交
171
                           Default: False
172
        force_cpu (bool, optional): Force this variable to be on CPU.
F
fengjiayi 已提交
173
                         Default: False
174 175
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
176 177

    Returns:
178
        Variable: The created Variable
F
fengjiayi 已提交
179 180 181 182

    Examples:
        .. code-block:: python

183 184 185
            import paddle
            paddle.enable_static()
            var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
186
                                           persistable=True, force_cpu=True, name='new_var')
187
    """
188 189 190
    check_type(shape, 'shape', (list, tuple, numpy.ndarray),
               'create_global_var')
    for item in shape:
T
tianshuo78520a 已提交
191 192 193
        check_type(item, 'item of shape',
                   (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                    numpy.int64), 'create_global_var')
194 195

    check_dtype(dtype, 'dtype', [
196 197 198 199 200 201 202 203 204 205
        'bool',
        'float16',
        'float32',
        'float64',
        'int8',
        'int16',
        'int32',
        'int64',
        'uint8',
        'uint16',
206 207
    ], 'create_global_var')

Q
Qiao Longfei 已提交
208
    helper = LayerHelper("global_var", **locals())
209 210 211 212 213 214 215 216
    var = helper.create_global_variable(dtype=dtype,
                                        shape=shape,
                                        persistable=persistable,
                                        name=name,
                                        stop_gradient=True)
    helper.set_variable_initializer(var,
                                    initializer=Constant(value=float(value),
                                                         force_cpu=force_cpu))
M
minqiyang 已提交
217

Q
Qiao Longfei 已提交
218 219 220
    return var


221
def cast(x, dtype):
Y
Yu Yang 已提交
222
    """
S
swtkiwi 已提交
223

224
    This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
225 226
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
227 228

    Args:
229
        x(Tensor): An input N-D Tensor with data type bool, float16,
230
            float32, float64, int32, int64, uint8.
231
        dtype(np.dtype|str): Data type of the output:
232
            bool, float16, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
233 234

    Returns:
235
        Tensor: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
236 237 238

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
239

240
            import paddle
241

242 243
            x = paddle.to_tensor([2, 3, 4], 'float64')
            y = paddle.cast(x, 'uint8')
Y
Yu Yang 已提交
244
    """
H
hong 已提交
245 246 247 248 249
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
        return _C_ops.final_state_cast(x, dtype)

J
Jiabin Yang 已提交
250
    if _non_static_mode():
251 252
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
W
wanghuancoder 已提交
253
        out = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
Z
Zhang Ting 已提交
254
        return out
255

256
    check_variable_and_dtype(x, 'x', [
257 258
        'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64',
        'uint8', 'uint16'
259
    ], 'cast')
260
    check_dtype(dtype, 'dtype', [
261 262
        'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8', 'uint16'
263 264 265
    ], 'cast')

    helper = LayerHelper('cast', **locals())
266 267
    out = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=x.stop_gradient)
268 269 270 271 272 273 274
    helper.append_op(type='cast',
                     inputs={'X': [x]},
                     outputs={'Out': [out]},
                     attrs={
                         'in_dtype': x.dtype,
                         'out_dtype': out.dtype
                     })
Y
Yu Yang 已提交
275 276 277
    return out


278
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
279
    """
280
    This OP concatenates the input along the axis.
281 282

    Args:
283 284
        input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type
            bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type. 
285 286
        axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
            It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64.
287
            The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way
288
            as ``axis+R``. Default is 0.
289 290 291
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
292 293

    Returns:
294
        Tensor: A Tensor with the same data type as ``input``.
295 296 297

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
298

299
            import paddle.fluid as fluid
300 301
            import numpy as np

302 303 304 305 306 307
            in1 = np.array([[1, 2, 3],
                            [4, 5, 6]])
            in2 = np.array([[11, 12, 13],
                            [14, 15, 16]])
            in3 = np.array([[21, 22],
                            [23, 24]])
308 309 310 311
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
312 313
                # When the axis is negative, the real axis is (axis + Rank(x)).
                # As follows, axis is -1, Rank(x) is 2, the real axis is 1
314 315
                out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1, x2], axis=0)
316 317 318 319 320 321 322 323
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
324
    """
325

326 327 328 329 330 331
    if in_dygraph_mode():
        if isinstance(axis, Variable):
            axis = axis.numpy()
            axis = axis.item(0)
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
332 333 334
        out = _varbase_creator()
        _C_ops.concat(input, out, 'axis', axis)
        return out
335 336

    if _in_legacy_dygraph():
S
songyouwei 已提交
337 338
        if isinstance(axis, Variable):
            axis = axis.numpy()
339
            axis = axis.item(0)
340 341
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
342 343 344
        out = _varbase_creator()
        _C_ops.concat(input, out, 'axis', axis)
        return out
345

346 347 348 349 350 351 352 353 354
    check_type(input, 'input', (list, tuple, Variable), 'concat')
    if not isinstance(input, Variable):
        for id, x in enumerate(input):
            check_variable_and_dtype(
                x, 'input[' + str(id) + ']',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'concat')
            if x.dtype != input[0].dtype:
                raise TypeError(
355 356
                    "All the Tensors in the input must have the same data type."
                )
357
    else:
358
        input = [input]
359
    check_type(axis, 'axis', (int, Variable), 'concat')
360

361 362 363
    if isinstance(axis, Variable):
        check_dtype(
            axis.dtype, 'axis', ['int32', 'int64'], 'concat',
364 365
            "The data type of axis must be int32 or int64 when axis is a Tensor"
        )
366

367
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
368
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
369 370

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
371 372 373 374
        # NOTE(liym27): Don't remove this if branch!
        # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
        # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.

375
        assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \
376
                "number of the elements must be 1, but received %s." % len(input)
377
        out_index = helper.create_variable_for_type_inference(dtype="int32")
378 379 380 381 382 383 384 385 386 387
        helper.append_op(type='tensor_array_to_tensor',
                         inputs={'X': input[0]},
                         outputs={
                             'Out': [out],
                             'OutIndex': [out_index]
                         },
                         attrs={
                             'axis': axis,
                             'use_stack': False
                         })
388 389 390 391 392 393 394 395 396
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
            inputs['AxisTensor'] = axis
        else:
            attrs['axis'] = axis

397 398 399 400
        helper.append_op(type='concat',
                         inputs=inputs,
                         outputs={'Out': [out]},
                         attrs=attrs)
Y
Yu Yang 已提交
401 402 403
    return out


G
Guo Sheng 已提交
404
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
405
    r"""
G
Guo Sheng 已提交
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
456 457

    Args:
G
Guo Sheng 已提交
458 459 460 461 462 463 464
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
465 466

    Returns:
G
Guo Sheng 已提交
467 468 469
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
470 471 472 473

    Examples:
        .. code-block:: python

474
            import paddle.fluid as fluid
475
            import numpy as np
G
Guo Sheng 已提交
476 477 478 479 480 481 482
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            array = fluid.layers.create_array(dtype='float32')
            fluid.layers.array_write(x0, i, array)
            fluid.layers.array_write(x1, i + 1, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
483
    """
J
Jiabin Yang 已提交
484
    if _non_static_mode():
485 486 487 488 489 490 491 492 493 494
        assert isinstance(
            input, list), "The 'input' in tensor_array_to_tensor must be list"
        from .nn import stack, concat
        from ..dygraph import to_variable
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
            numpy.array(list(map(lambda x: int(x.shape[axis]), input))))
        return res, sizes

495 496 497 498 499
    check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
    if isinstance(input, list):
        for i, input_x in enumerate(input):
            check_type(input_x, 'input[' + str(i) + ']', Variable,
                       'tensor_array_to_tensor')
L
li099 已提交
500
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
501 502
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
503 504 505 506 507 508 509 510 511 512
    helper.append_op(type='tensor_array_to_tensor',
                     inputs={'X': input},
                     outputs={
                         'Out': [out],
                         'OutIndex': [out_index]
                     },
                     attrs={
                         'axis': axis,
                         'use_stack': use_stack
                     })
L
li099 已提交
513 514 515
    return out, out_index


516
def sums(input, out=None):
517
    r"""
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
539 540

    Args:
541 542 543 544
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
545 546

    Returns:
547 548
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
549 550

    Examples:
F
fengjiayi 已提交
551
        .. code-block:: python
K
kavyasrinet 已提交
552

553 554 555 556 557 558 559 560 561
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
562

563 564
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
565
    """
566 567 568 569
    check_type(input, 'input', (Variable, tuple, list), 'sums')
    if isinstance(input, list) or isinstance(input, tuple):
        for input_section in input:
            check_variable_and_dtype(input_section, "input", \
570
                    ['float16', 'float32', 'float64', 'int32', 'int64'], 'sums')
571 572
    else:
        check_variable_and_dtype(input, "input", \
573
                ['float16', 'float32', 'float64', 'int32', 'int64'], 'sums')
574

Y
Yu Yang 已提交
575 576
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
577 578
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
579
    else:
580 581 582 583 584 585 586 587
        check_variable_and_dtype(out, "out",
                                 ['float32', 'float64', 'int32', 'int64'],
                                 'sums')

    helper.append_op(type='sum',
                     inputs={'X': input},
                     outputs={'Out': out},
                     attrs={'use_mkldnn': False})
Y
Yu Yang 已提交
588 589 590
    return out


F
fengjiayi 已提交
591
def assign(input, output=None):
592
    """
S
swtkiwi 已提交
593

594
    The OP copies the :attr:`input` to the :attr:`output`.
595

596
    Parameters:
597 598 599 600
        input (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
            or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
            Note: the float64 data will be converted to float32 because of current platform protobuf
            data limitation.
601
        output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
602
            be created as :attr:`output`. Default: None.
603 604

    Returns:
605
        Tensor: A tensor with the same shape, data type and value as :attr:`input`.
606 607 608

    Examples:
        .. code-block:: python
609

610
          import paddle
611
          import numpy as np
612
          data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
613 614 615 616
          array = np.array([[1, 1],
                            [3, 4],
                            [1, 3]]).astype(np.int64)
          result1 = paddle.zeros(shape=[3, 3], dtype='float32')
617 618 619
          paddle.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
          result2 = paddle.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
620
    """
Y
Yu Yang 已提交
621
    helper = LayerHelper('assign', **locals())
622 623 624
    check_type(input, 'input',
               (Variable, numpy.ndarray, list, tuple, float, int, bool),
               'assign')
625 626
    is_inplace = True if output is not None else False

627 628 629 630
    if numpy.isscalar(input) and not isinstance(input, str):
        input = numpy.array([input])
    elif isinstance(input, (list, tuple)):
        input = numpy.array(input)
631 632
    # NOTE(Aurelius84): Why we judge core.VarBase?
    # In case of @to_static, a VarBase can be as input of `assign`,
J
Jiabin Yang 已提交
633
    # but _non_static_mode()==False under @to_static, which means
634 635 636
    # isinstance(VarBase, Variable) == False. It will cause return None
    # after this api.
    if isinstance(input, (Variable, core.VarBase)):
637
        if _non_static_mode():
C
chentianyu03 已提交
638 639 640 641 642 643 644 645 646
            if in_dygraph_mode() and output is None:
                output = _C_ops.final_state_assign(input)
            else:
                if output is None:
                    if _in_legacy_dygraph():
                        output = core.VarBase()
                    else:
                        output = core.eager.Tensor()
                _C_ops.assign(input, output)
647 648 649 650 651 652 653 654
        else:
            check_dtype(input.dtype, 'input', [
                'float16', 'uint16', 'float32', 'float64', 'int32', 'int64',
                'uint8', 'bool'
            ], 'assign', '(When the type of input in assign is Variable.)')
            if output is None:
                output = helper.create_variable_for_type_inference(
                    dtype=input.dtype)
655 656 657
            helper.append_op(type='assign',
                             inputs={'X': [input]},
                             outputs={'Out': [output]})
X
xuwei06 已提交
658
    elif isinstance(input, numpy.ndarray):
659 660 661 662 663
        # Not support [var, var, ...] currently.
        if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
            raise TypeError(
                "Required type(input) numpy.ndarray, but found `list(Variable)` in input."
            )
X
xuwei06 已提交
664
        dtype = convert_np_dtype_to_dtype_(input.dtype)
665 666 667 668 669 670 671 672
        if dtype == VarDesc.VarType.FP64:
            # Setting FP64 numpy data is not supported in Paddle, so we
            # use FP32 here
            warnings.warn(
                "paddle.assign doesn't support float64 input now due "
                "to current platform protobuf data limitation, we convert "
                "it to float32")
            dtype = VarDesc.VarType.FP32
673 674
        if dtype == VarDesc.VarType.BOOL:
            value_name = "bool_values"
W
wanghuancoder 已提交
675
            values = [int(v) for v in input.flat]
676
        elif dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
677
            value_name = "fp32_values"
678
            values = [float(v) for v in input.flat]
679
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
680
            value_name = "int32_values"
681
            values = [int(v) for v in input.flat]
682 683 684
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
685
        else:
686 687
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
688
                "the data type of 'input' must be bool, float32, int32 or int64, but "
689
                "received %s." % convert_dtype(dtype))
690 691 692
        if input.size > 1024 * 1024:
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
693
        if output is None:
C
caozhou 已提交
694
            output = helper.create_variable_for_type_inference(dtype=dtype)
695
        if _non_static_mode():
696 697
            _C_ops.assign_value(output, 'shape', list(input.shape), 'dtype',
                                dtype, value_name, values)
698
        else:
699 700 701 702 703 704 705
            helper.append_op(type='assign_value',
                             outputs={'Out': [output]},
                             attrs={
                                 'dtype': dtype,
                                 'shape': list(input.shape),
                                 value_name: values
                             })
X
xuwei06 已提交
706

J
Jiabin Yang 已提交
707
    if is_inplace and _non_static_mode():
708
        output._bump_inplace_version()
709

Y
Yu Yang 已提交
710 711 712
    return output


713
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
Y
Yu Yang 已提交
714
    """
S
swtkiwi 已提交
715

W
wangchaochaohu 已提交
716
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
717
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
718

T
tianshuo78520a 已提交
719
    The attribute `stop_gradient` of the created Tensor is set to True.
720 721

    Args:
722 723 724
        shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
            If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
            If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
W
wangchaochaohu 已提交
725
        dtype(np.dtype|str): Data type of the output Tensor which can
726
            be float16, float32, float64, uint8, int16, int32, int64.
727 728 729 730 731 732
        value(bool|float|int|Tensor): The constant value used to initialize 
            the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
        force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
        out(Tensor, optional): Optional output which can be any created 
            Tensor that meets the requirements to store the result of operation.
            if ``out`` is None, a new Tensor will be create to store the result.
733 734
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
735 736

    Returns:
737
        Tensor: Tensor which is created according to shape and dtype.
W
wangchaochaohu 已提交
738

739 740 741
    Examples:
        .. code-block:: python

742
          import paddle.fluid as fluid
743
          # attr shape is a list which doesn't contain  Tensor.
744 745
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
746
          # data1=[[5], [5]] data2=[[5], [5]]
747

748
          # attr shape is a list which contains Tensor.
749
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
750
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
751

752
          # attr shape is a Tensor.
753
          shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
754
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
W
wangchaochaohu 已提交
755
          
756
          # attr value is a Tensor.
W
wangchaochaohu 已提交
757 758
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
759
    """
760

W
wangchaochaohu 已提交
761
    attrs = {'force_cpu': force_cpu}
762
    dtype = convert_dtype(dtype)
763
    if not isinstance(value, Variable):
764
        if dtype in ['uint8', 'int16', 'int32', 'int64']:
W
wangchaochaohu 已提交
765
            attrs['str_value'] = str(int(value))
766
            attrs['value'] = int(value)
W
wangchaochaohu 已提交
767 768
        else:
            attrs['str_value'] = str(float(value))
769
            attrs['value'] = float(value)
770

J
Jiabin Yang 已提交
771
    if _non_static_mode():
772 773 774 775 776
        if out is None and in_dygraph_mode():
            #Currently, final state mode don't support out is None.
            place = _current_expected_place()
            if force_cpu:
                place = core.CPUPlace()
777 778 779 780
            if isinstance(shape, (list, tuple)):
                for item in shape:
                    if not isinstance(item, Variable):
                        shape = list(
781 782 783
                            map(
                                lambda x: x.numpy().flat[0]
                                if isinstance(x, Variable) else x, shape))
784
                        break
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802

            if not isinstance(dtype, core.VarDesc.VarType):
                dtype = convert_np_dtype_to_dtype_(dtype)
            out = _C_ops.final_state_full(shape, float(value), dtype, place)
            out.stop_gradient = True
            return out

        else:
            shape = utils.convert_shape_to_list(shape)
            if out is None:
                out = _varbase_creator(dtype=dtype)

            if isinstance(value, Variable):
                if dtype in ['uint8', 'int16', 'int32', 'int64']:
                    attrs['str_value'] = str(int(value.numpy().item(0)))
                else:
                    attrs['str_value'] = str(float(value.numpy().item(0)))

803 804 805
            _C_ops.fill_constant(out, 'value', float(value), 'force_cpu',
                                 force_cpu, 'dtype', out.dtype, 'str_value',
                                 attrs['str_value'], 'shape', shape)
806 807
            out.stop_gradient = True
            return out
808

809 810 811
    helper = LayerHelper("fill_constant", **locals())
    inputs = {}
    if isinstance(value, Variable):
812 813
        if convert_dtype(value.dtype) != dtype:
            value = cast(value, dtype)
814 815
        inputs['ValueTensor'] = value

816
    check_shape(shape)
817 818
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'uint8', 'int16', 'int32',
819
        'int64', 'complex64', 'complex128'
820
    ], 'fill_constant')
821
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
822

823 824 825 826 827
    if out is not None:
        check_variable_and_dtype(out, 'out', [convert_dtype(dtype)],
                                 'fill_constant')

    helper = LayerHelper("fill_constant", **locals())
828 829 830 831
    utils.get_shape_tensor_inputs(inputs=inputs,
                                  attrs=attrs,
                                  shape=shape,
                                  op_type='fill_constant')
L
liym27 已提交
832

Y
Yu Yang 已提交
833
    if out is None:
X
Xin Pan 已提交
834
        out = helper.create_variable_for_type_inference(dtype=dtype)
L
liym27 已提交
835
    attrs['dtype'] = out.dtype
836 837 838 839 840
    helper.append_op(type='fill_constant',
                     inputs=inputs,
                     outputs={'Out': [out]},
                     attrs=attrs,
                     stop_gradient=True)
Y
Yu Yang 已提交
841 842 843 844
    out.stop_gradient = True
    return out


845
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
Y
yuyang18 已提交
846
@templatedoc()
Y
Yu Yang 已提交
847 848 849 850 851
def fill_constant_batch_size_like(input,
                                  shape,
                                  dtype,
                                  value,
                                  input_dim_idx=0,
G
Guo Sheng 已提交
852 853
                                  output_dim_idx=0,
                                  force_cpu=False):
854
    """
T
tianshuo78520a 已提交
855
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
856 857 858 859
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
860 861

    Args:
W
wangchaochaohu 已提交
862 863 864 865 866 867 868 869 870 871 872
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
        value(float|int): The constant value used to initialize the Tensor to be created. 
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
873
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
874 875

    Returns:
W
wangchaochaohu 已提交
876
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
877 878 879 880 881

    Examples:

        .. code-block:: python

882
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
883
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
884
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
885
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
886

887
    """
888 889 890 891 892 893 894
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
895 896 897
        out = _C_ops.final_state_full_batch_size_like(input, shape, dtype,
                                                      value, input_dim_idx,
                                                      output_dim_idx, place)
898 899 900
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
901
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
902
    out = helper.create_variable_for_type_inference(dtype=dtype)
903 904 905 906 907 908
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
909
        'force_cpu': force_cpu
910 911 912 913 914
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
915 916 917 918
    helper.append_op(type='fill_constant_batch_size_like',
                     inputs={'Input': input},
                     outputs={'Out': [out]},
                     attrs=attrs)
Y
Yu Yang 已提交
919 920 921 922
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
923 924
def argmin(x, axis=0):
    """
925 926 927
	:alias_main: paddle.argmin
	:alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
	:old_api: paddle.fluid.layers.argmin
S
swtkiwi 已提交
928

S
sneaxiy 已提交
929 930
    **argmin**

931 932
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
933 934

    Args:
935 936 937 938 939
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
940

S
sneaxiy 已提交
941
    Returns:
942
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
943

S
sneaxiy 已提交
944 945
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
946

947
            import paddle.fluid as fluid
948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
975
    """
976 977 978
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin')
S
sneaxiy 已提交
979
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
980
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
981 982 983 984
    helper.append_op(type='arg_min',
                     inputs={'X': x},
                     outputs={'Out': [out]},
                     attrs={'axis': axis})
985
    out.stop_gradient = True
S
sneaxiy 已提交
986 987 988 989 990 991 992
    return out


def argmax(x, axis=0):
    """
    **argmax**

993 994
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
995 996

    Args:
997 998 999 1000 1001
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
1002

S
sneaxiy 已提交
1003
    Returns:
1004
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
1005

S
sneaxiy 已提交
1006 1007
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
1008

1009
            import paddle.fluid as fluid
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
1037
    """
1038 1039 1040
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax')
S
sneaxiy 已提交
1041
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
1042
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
1043 1044 1045 1046
    helper.append_op(type='arg_max',
                     inputs={'X': x},
                     outputs={'Out': [out]},
                     attrs={'axis': axis})
1047
    out.stop_gradient = True
S
sneaxiy 已提交
1048 1049 1050
    return out


1051
def argsort(input, axis=-1, descending=False, name=None):
Y
Yibing Liu 已提交
1052
    """
1053 1054 1055
	:alias_main: paddle.argsort
	:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
	:old_api: paddle.fluid.layers.argsort
S
swtkiwi 已提交
1056

1057 1058 1059
    This OP sorts the input along the given axis, and returns sorted output
    data Varibale and its corresponding index Variable with the same shape as
    :attr:`input`.
Y
Yibing Liu 已提交
1060 1061

    Args:
1062 1063 1064 1065 1066
        input(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
1067 1068 1069
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
1070 1071 1072
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
Y
Yibing Liu 已提交
1073 1074

    Returns:
1075 1076 1077
        tuple: A tuple of sorted data Variable(with the same shape and data
        type as input) and the sorted indices(with the same shape as input's
        and with data type int64).
Y
Yibing Liu 已提交
1078 1079 1080 1081

    Examples:
        .. code-block:: python

1082
            import paddle.fluid as fluid
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]]).astype(np.float32)
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argsort(input=x, axis=-1)
                out2 = fluid.layers.argsort(input=x, axis=0)
                out3 = fluid.layers.argsort(input=x, axis=1)
                print(out1[0].numpy())
                # [[[5. 5. 8. 9.]
                #   [0. 0. 1. 7.]
                #   [2. 4. 6. 9.]]
                #  [[2. 2. 4. 5.]
                #   [4. 7. 7. 9.]
                #   [0. 1. 6. 7.]]]
                print(out1[1].numpy())
                # [[[0 3 1 2]
                #   [0 1 2 3]
                #   [2 3 0 1]]
                #  [[1 3 2 0]
                #   [0 1 2 3]
                #   [2 0 3 1]]]
                print(out2[0].numpy())
                # [[[5. 2. 4. 2.]
                #   [0. 0. 1. 7.]
                #   [1. 7. 0. 4.]]
                #  [[5. 8. 9. 5.]
                #   [4. 7. 7. 9.]
                #   [6. 9. 2. 6.]]]
                print(out3[0].numpy())
                # [[[0. 0. 1. 4.]
                #   [5. 8. 2. 5.]
                #   [6. 9. 9. 7.]]
                #  [[1. 2. 0. 2.]
                #   [4. 7. 4. 6.]
                #   [5. 7. 7. 9.]]]
Y
Yibing Liu 已提交
1124
    """
1125 1126 1127
    check_variable_and_dtype(
        input, 'input',
        ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort')
Y
Yibing Liu 已提交
1128
    helper = LayerHelper("argsort", **locals())
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
    out = helper.create_variable_for_type_inference(dtype=input.dtype,
                                                    stop_gradient=True)
    ids = helper.create_variable_for_type_inference(VarDesc.VarType.INT64,
                                                    stop_gradient=True)
    helper.append_op(type='argsort',
                     inputs={'X': input},
                     outputs={
                         'Out': out,
                         'Indices': ids
                     },
                     attrs={
                         'axis': axis,
                         'descending': descending
                     })
Y
Yibing Liu 已提交
1143 1144 1145
    return out, ids


Y
Yang Yu 已提交
1146
def ones(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
1147
    """
1148 1149
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1150

1151
    Parameters:
1152
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of shape is int32 or int64.
W
wangchaochaohu 已提交
1153
        dtype (np.dtype|str): Data type of output Tensor, it supports
1154
            bool, float16, float32, float64, int32 and int64.
1155 1156
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1157
            Default: False.
1158 1159

    Returns:
1160
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
1161 1162 1163 1164

    Examples:
        .. code-block:: python

1165
          import paddle.fluid as fluid
1166 1167 1168 1169 1170
          data0 = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
          
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.ones(shape=shape, dtype='int32') #[[1, 1], [1, 1]]
Y
Yu Yang 已提交
1171 1172 1173 1174
    """
    return fill_constant(value=1.0, **locals())


1175
def zeros(shape, dtype, force_cpu=False, name=None):
Y
Yu Yang 已提交
1176
    """
1177 1178
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1179

1180
    Parameters:
1181
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
W
wangchaochaohu 已提交
1182
        dtype (np.dtype|str): Data type of output Tensor, it supports
1183
            bool, float16, float32, float64, int32 and int64.
1184 1185
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1186
            Default: False.
1187 1188
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
1189 1190

    Returns:
1191
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
1192 1193 1194 1195

    Examples:
        .. code-block:: python

1196
          import paddle.fluid as fluid
1197
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
1198 1199 1200 1201
          
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
Y
Yu Yang 已提交
1202 1203
    """
    return fill_constant(value=0.0, **locals())
1204 1205


F
fengjiayi 已提交
1206 1207
def reverse(x, axis):
    """
1208 1209 1210
	:alias_main: paddle.reverse
	:alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse
	:old_api: paddle.fluid.layers.reverse
S
swtkiwi 已提交
1211

1212
    The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
F
fengjiayi 已提交
1213

1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
    .. code-block:: text

        Case 1:

            Given a LoDTensor:
                x = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
                axis = [0, 1]

            Then:
                output = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]

        Case 2:

            Given a LoDTensorArray:
                x = {[[0, 1], [2, 3]],
                     [[4, 5, 6]],
                     [[7],[8], [9]]}
                axis = 0

            Then:
                output = {[[7],[8], [9]],
                          [[4, 5, 6]],
                          [[0, 1], [2, 3]]}

1238
    Parameters:
1239 1240
        x (Variable): A tensor or LoDTensorArray to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
                      If input is a LoDTensorArray, returns a new reversed LoDTensorArray without changing the internal order of each inner tensor.
1241 1242
        axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
            in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
1243 1244
            will be apply on each axis in the tuple or list. If input is a LoDTensorArray, the value of axis shall be 0, or a
            list [0] or tuple (0, ) with shape [1].
F
fengjiayi 已提交
1245 1246

    Returns:
1247
        Variable: The reversed tensor with the same shape and data type as :attr:`x`.
F
fengjiayi 已提交
1248 1249 1250 1251

    Examples:
        .. code-block:: python

1252
          import paddle.fluid as fluid
1253 1254 1255 1256
          import numpy as np
          data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
          result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
          result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266

          # example of LoDTensorArray
          data1 = fluid.layers.assign(np.array([[0, 1, 2]], dtype='float32'))
          data2 = fluid.layers.assign(np.array([[3, 4, 5]], dtype='float32'))
          tensor_array = fluid.layers.create_array(dtype='float32')
          i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
          fluid.layers.array_write(data1, i, tensor_array)
          fluid.layers.array_write(data2, i+1, tensor_array)

          reversed_tensor_array = fluid.layers.reverse(tensor_array, 0) # {[[3, 4, 5]], [[0, 1, 2]]}
F
fengjiayi 已提交
1267
    """
1268 1269 1270
    check_variable_and_dtype(x, 'x',
                             ('float32', 'float64', 'int32', 'int64', 'uint8'),
                             'reverse')
1271
    check_type(axis, 'axis', (int, tuple, list), 'reverse')
F
fengjiayi 已提交
1272 1273 1274
    if isinstance(axis, int):
        axis = [axis]
    helper = LayerHelper("reverse", **locals())
X
Xin Pan 已提交
1275
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1276 1277 1278 1279
    helper.append_op(type='reverse',
                     inputs={'X': x},
                     outputs={'Out': [out]},
                     attrs={'axis': axis})
F
fengjiayi 已提交
1280 1281 1282
    return out


1283 1284 1285 1286 1287 1288 1289
def save(x, file_path, overwrite=True):
    """
    Saves a variable as a file.

    Args:
        x(variable): The Tensor/LoDTensor to be saved.
        file_path(str): The file path where the variable will be saved.
1290 1291 1292
        overwrite(bool): Whether or not cover the given file when it has already
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1293 1294
    """
    helper = LayerHelper("save", **locals())
1295 1296 1297 1298 1299 1300 1301
    helper.append_op(type="save",
                     inputs={"input": x},
                     outputs={},
                     args={
                         "file_path": file_path,
                         "overwrite": overwrite
                     })
1302 1303 1304 1305 1306 1307 1308


def save_combine(x, file_path, overwrite=True):
    """
    Saves a list of variables into a single file.

    Args:
1309 1310
        x(list): A list of Tensor/LoDTensor variables to be saved together in
                 a single file.
1311
        file_path(str): The file path where variables will be saved.
1312
        overwrite(bool): Whether or not cover the given file when it has already
1313 1314
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1315 1316 1317 1318 1319 1320 1321 1322

    Returns:
        There is no return value.

    Examples:

        .. code-block:: python

1323
            import paddle.fluid as fluid
1324 1325 1326 1327 1328 1329 1330
            v1 = fluid.layers.data(name="data",
                                   shape=(4, 6),
                                   dtype="float32")
            v2 = fluid.layers.data(name="data",
                                   shape=(6, 8, 4),
                                   dtype="float32")
            normed = fluid.layers.save_combine([v1, v2], file_path="output")
1331 1332
    """
    helper = LayerHelper("save_combine", **locals())
1333 1334 1335 1336 1337 1338 1339
    helper.append_op(type="save_combine",
                     inputs={"input": x},
                     outputs={},
                     args={
                         "file_path": file_path,
                         "overwrite": overwrite
                     })
1340 1341 1342 1343


def load_combine(out, file_path):
    """
T
tianshuo78520a 已提交
1344
    Loads a list of variable from a single file.
1345 1346 1347 1348 1349 1350

    Args:
        out(list): The list of variables to be read from the disk file.
        file_path(str): The path of the disk file.
    """
    helper = LayerHelper("load_combine", **locals())
1351 1352 1353 1354
    helper.append_op(type="load_combine",
                     inputs={},
                     output={"Out": out},
                     args={"file_path": file_path})
1355 1356 1357 1358 1359 1360 1361


def has_inf(x):
    """
    Test if any of x contains an infinity number

    Args:
S
Steffy-zxf 已提交
1362
       x (Tensor): The Tensor to be checked.
1363 1364

    Returns:
S
Steffy-zxf 已提交
1365
       Tensor: The tensor storing the output, only a bool value, indicating that whether there is infinity number in x or not.
1366 1367 1368 1369
    
    Examples:
        .. code-block:: python
          
S
Steffy-zxf 已提交
1370 1371
          import paddle
          data = paddle.randn(shape=[4, 32, 32], dtype="float32")
1372
          res = paddle.fluid.layers.has_inf(data)
S
Steffy-zxf 已提交
1373
          # [False]
1374

1375
    """
J
Jiabin Yang 已提交
1376
    if _non_static_mode():
W
wanghuancoder 已提交
1377
        return _C_ops.isinf(x)
S
Steffy-zxf 已提交
1378

1379
    check_type(x, 'x', (Variable), 'has_inf')
1380
    helper = LayerHelper("isinf", **locals())
X
Xin Pan 已提交
1381
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1382 1383 1384 1385 1386 1387 1388 1389 1390
    helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
    return out


def has_nan(x):
    """
    Test if any of x contains a NAN

    Args:
S
Steffy-zxf 已提交
1391
       x (Tensor): The Tensor to be checked.
1392 1393

    Returns:
S
Steffy-zxf 已提交
1394
       Tensor: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
1395 1396 1397 1398
    
    Examples:
        .. code-block:: python
    
S
Steffy-zxf 已提交
1399 1400
          import paddle
          data = paddle.randn(shape=[2,3], dtype="float32")
1401
          res = paddle.fluid.layers.has_nan(data)
S
Steffy-zxf 已提交
1402
          # [False]
1403

1404
    """
J
Jiabin Yang 已提交
1405
    if _non_static_mode():
W
wanghuancoder 已提交
1406
        return _C_ops.isnan(x)
S
Steffy-zxf 已提交
1407

1408
    check_type(x, 'x', (Variable), 'has_nan')
1409
    helper = LayerHelper("isnan", **locals())
X
Xin Pan 已提交
1410
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1411 1412 1413 1414 1415 1416
    helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
    return out


def isfinite(x):
    """
S
swtkiwi 已提交
1417

1418 1419 1420 1421
    Test if any of x contains an infinity/NAN number. If all the elements are finite,
    returns true, else false.

    Args:
N
Noel 已提交
1422
        x(Tensor): The Tensor to be checked.
1423 1424

    Returns:
N
Noel 已提交
1425
        Tensor: The tensor storing the output, contains a bool value.
1426 1427 1428 1429 1430

    Examples:

        .. code-block:: python

N
Noel 已提交
1431 1432 1433 1434 1435 1436
            import paddle

            x = paddle.rand(shape=[4, 6], dtype='float32')
            y = paddle.fluid.layers.isfinite(x)
            print(y)

1437
    """
1438 1439
    check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
                             "isfinite")
1440
    helper = LayerHelper("isfinite", **locals())
1441

1442
    out = helper.create_variable_for_type_inference(dtype='bool')
1443 1444
    helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
    return out
W
whs 已提交
1445 1446


1447
def range(start, end, step, dtype, name=None):
W
whs 已提交
1448
    """
1449
    This OP returns a 1-D Tensor with spaced values within a given interval.
W
whs 已提交
1450

1451 1452
    Values are generated into the half-open interval [``start``, ``end``) with
    the ``step``. (the interval including ``start`` but excluding ``end``).
1453

1454 1455
    If ``dtype`` is float32 or float64, we advise adding a small epsilon to
    ``end`` to avoid floating point rounding errors when comparing against ``end``.
W
whs 已提交
1456

L
Liufang Sang 已提交
1457
    Parameters:
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
        start(float|int|Tensor): Start of interval. The interval includes this
            value. If ``start`` is a Tensor, it is a 1-D Tensor with shape [1],
            with data type int32, int64, float32, float64.
        end(float|int|Tensor): End of interval. The interval does not include
            this value. If ``end`` is a Tensor, it is a 1-D Tensor with shape
            [1], with data type int32, int64, float32, float64.
        step(float|int|Tensor): Spacing between values. For any out, it is
            the istance between two adjacent values, out[i+1] - out[i]. If
            ``step`` is a Tensor, it is a 1-D Tensor with shape [1], with data
            type int32, int64, float32, float64.
        dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of the
            output tensor. Supported data types: int32, int64, float32, float64.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns: 
        Tensor: A 1-D Tensor with values from the interval [``start``, ``end``)
            taken with common difference ``step`` beginning from ``start``. Its
            data type is set by ``dtype``.

    Raises:
        TypeError: If ``dtype`` is not int32, int64, float32, float64.
W
whs 已提交
1481 1482 1483 1484 1485

    examples:

        .. code-block:: python

1486
            import paddle.fluid as fluid
W
whs 已提交
1487

1488 1489
            out1 = fluid.layers.range(0, 10, 2, 'int32')
            # [0, 2, 4, 6, 8]
W
whs 已提交
1490

1491 1492 1493 1494 1495
            start_var = fluid.layers.fill_constant([1], 'int64', 3)
            out2 = fluid.layers.range(start_var, 7, 1, 'int64')
            # [3, 4, 5, 6]

    """
1496 1497 1498 1499 1500
    out_shape = None
    if not isinstance(start, Variable) and not isinstance(
            end, Variable) and not isinstance(step, Variable):
        out_shape = [int(math.ceil((end - start) / step))]

1501 1502
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
1503

W
whs 已提交
1504
    if not isinstance(start, Variable):
1505
        with device_guard("cpu"):
1506
            start = fill_constant([1], dtype, start, force_cpu=True)
1507 1508
    elif start.dtype != dtype:
        start = cast(start, dtype)
1509

W
whs 已提交
1510
    if not isinstance(end, Variable):
1511
        with device_guard("cpu"):
1512
            end = fill_constant([1], dtype, end, force_cpu=True)
1513 1514
    elif end.dtype != dtype:
        end = cast(end, dtype)
1515

W
whs 已提交
1516
    if not isinstance(step, Variable):
1517
        with device_guard("cpu"):
1518
            step = fill_constant([1], dtype, step, force_cpu=True)
1519 1520
    elif step.dtype != dtype:
        step = cast(step, dtype)
W
whs 已提交
1521

Z
zyfncg 已提交
1522 1523 1524 1525
    if in_dygraph_mode():
        return _C_ops.final_state_arange(start, end, step, dtype,
                                         _current_expected_place())

Z
zyfncg 已提交
1526
    if _in_legacy_dygraph():
J
Jiawei Wang 已提交
1527 1528 1529
        out = _C_ops.range(start, end, step)
        out.stop_gradient = True
        return out
W
whs 已提交
1530

1531 1532 1533
    check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],
                'range/arange')
    helper = LayerHelper('range', **locals())
1534
    out = helper.create_variable_for_type_inference(dtype, shape=out_shape)
1535 1536 1537 1538 1539 1540 1541
    helper.append_op(type='range',
                     inputs={
                         'Start': start,
                         'End': end,
                         'Step': step
                     },
                     outputs={'Out': out})
1542
    out.stop_gradient = True
1543 1544
    if out_shape is not None:
        out.desc.set_shape(out_shape)
W
whs 已提交
1545
    return out
Z
zhoukunsheng 已提交
1546 1547


1548
def linspace(start, stop, num, dtype=None, name=None):
1549
    r"""
1550
    This OP return fixed number of evenly spaced values within a given interval.
Z
zhoukunsheng 已提交
1551 1552

    Args:
1553 1554 1555 1556
        start(int|float|Tensor): The input :attr:`start` is start variable of range. It is a scalar, \
            or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
        stop(int|float|Tensor): The input :attr:`stop` is start variable of range. It is a scalar, \
            or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
1557
        num(int|Tensor): The input :attr:`num` is given num of the sequence. It is an int scalar, \
1558
            or a Tensor of shape [1] with data type int32.
W
wangchaochaohu 已提交
1559
        dtype(np.dtype|str, optional): The data type of output tensor, it could be
1560
            int32, int64, float32 and float64. Default: if None, the data type is float32.
1561 1562
        name(str, optional): Normally there is no need for user to set this property. 
            For more information, please refer to :ref:`api_guide_Name`.Default: None.
Z
zhoukunsheng 已提交
1563 1564

    Returns:
1565
        Tensor: the output data type will be float32, float64. The 1-D tensor with fixed number of evenly spaced values, \
1566 1567
        the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
        the value with input :attr:`start`. 
Z
zhoukunsheng 已提交
1568

Z
zhoukunsheng 已提交
1569
    Examples:
Z
zhoukunsheng 已提交
1570 1571
        .. code-block:: python

1572 1573 1574
             import paddle
             data = paddle.linspace(0, 10, 5, 'float32') # [0.0,  2.5,  5.0,  7.5, 10.0]
             data = paddle.linspace(0, 10, 1, 'float32') # [0.0]
Z
zhoukunsheng 已提交
1575 1576

    """
1577 1578
    if dtype is None:
        dtype = 'float32'
1579 1580 1581
    tensor_num = num
    tensor_start = start
    tensor_stop = stop
1582 1583
    if not isinstance(num, Variable):
        check_type(num, 'num', (int), 'linspace')
1584 1585
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
Z
zhoukunsheng 已提交
1586
    if not isinstance(start, Variable):
1587 1588
        with device_guard("cpu"):
            tensor_start = fill_constant([1], dtype, start)
Z
zhoukunsheng 已提交
1589
    if not isinstance(stop, Variable):
1590 1591
        with device_guard("cpu"):
            tensor_stop = fill_constant([1], dtype, stop)
Z
zhoukunsheng 已提交
1592
    if not isinstance(num, Variable):
1593 1594
        with device_guard("cpu"):
            tensor_num = fill_constant([1], 'int32', num)
1595
    if _in_legacy_dygraph():
W
wanghuancoder 已提交
1596 1597
        return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype',
                               dtype)
1598 1599 1600
    if in_dygraph_mode():
        return _C_ops.final_state_linspace(tensor_start, tensor_stop,
                                           tensor_num, dtype)
1601 1602
    helper = LayerHelper("linspace", **locals())

1603 1604 1605
    start_dtype = convert_dtype(tensor_start.dtype)
    stop_dtype = convert_dtype(tensor_stop.dtype)
    out_dtype = convert_dtype(dtype)
1606
    if isinstance(start, Variable):
1607 1608
        check_dtype(start.dtype, 'start',
                    ['float32', 'float64', 'int32', 'int64'], 'linspace')
1609 1610
    else:
        check_type(start, 'start', (int, float), 'linspace')
Z
zhoukunsheng 已提交
1611

1612
    if isinstance(stop, Variable):
1613 1614
        check_dtype(stop.dtype, 'stop',
                    ['float32', 'float64', 'int32', 'int64'], 'linspace')
1615 1616 1617 1618 1619 1620
    else:
        check_type(stop, 'stop', (int, float), 'linspace')
    if isinstance(num, Variable):
        check_dtype(num.dtype, 'num', ['int32'], 'linspace')
    check_dtype(dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'],
                'linspace')
1621 1622 1623 1624
    if ((stop_dtype == "float64" or start_dtype == "float64")
            and out_dtype in ["float32", "int32"]) or (
                (stop_dtype == "int64" or start_dtype == "int64")
                and out_dtype == "int32"):
1625 1626 1627 1628
        raise ValueError(
            "The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
            "which may cause data type overflows. Please reset attr(dtype) of linspace."
            .format(start_dtype, stop_dtype, dtype))
1629 1630

    out = helper.create_variable_for_type_inference(dtype=dtype)
Z
zhoukunsheng 已提交
1631

1632 1633 1634 1635 1636 1637 1638 1639
    helper.append_op(type='linspace',
                     inputs={
                         'Start': tensor_start,
                         'Stop': tensor_stop,
                         'Num': tensor_num
                     },
                     attrs={'dtype': dtype},
                     outputs={'Out': [out]})
1640 1641
    if isinstance(num, int):
        out.desc.set_shape((num, ))
Z
zhoukunsheng 已提交
1642
    return out
1643 1644


Z
zhoukunsheng 已提交
1645 1646
def zeros_like(x, out=None):
    """
1647
    This OP creates a zeros tensor which has identical shape and dtype 
Z
zhoukunsheng 已提交
1648 1649 1650
    with `x`.

    Args:
1651 1652 1653 1654 1655 1656
        x(Variable): The input tensor which specifies shape and dtype, the
            input data dtype could be bool, float32, float64, int32, int64.
        out(Variable, optional): If is :attr:`None` , the op will create the
            variable as output, the data type and shape of this variable will
            be same as input :attr:`x`. If is a tensor, the data type and shape
            need to be same as input :attr:`x`. The default value is :attr:`None` .
Z
zhoukunsheng 已提交
1657 1658

    Returns:
1659 1660 1661
        Variable: The N-D tensor, the element in tensor is related to input
            data type, if the input data type is bool, the output value is
            False, otherwise is zero. The output shape is the same as the input.
Z
zhoukunsheng 已提交
1662 1663 1664 1665

    Examples:
        .. code-block:: python

1666
          import paddle.fluid as fluid
1667
          x = fluid.data(name='x', dtype='float32', shape=[3])
Z
zhoukunsheng 已提交
1668 1669
          data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]

Z
zhoukunsheng 已提交
1670 1671
    """

1672 1673 1674
    check_variable_and_dtype(x, "x",
                             ['bool', 'float32', 'float64', 'int32', 'int64'],
                             'ones_like')
Z
zhoukunsheng 已提交
1675 1676 1677
    helper = LayerHelper("zeros_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1678 1679 1680
    else:
        check_variable_and_dtype(
            out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
1681
            'zeros_like')
1682

1683 1684 1685
    helper.append_op(type='fill_zeros_like',
                     inputs={'X': [x]},
                     outputs={'Out': [out]})
Z
zhoukunsheng 已提交
1686 1687
    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1688 1689


1690
@deprecated(since="2.0.0", update_to="paddle.diag")
Z
zhoukunsheng 已提交
1691
def diag(diagonal):
1692
    r"""
1693 1694 1695
	:alias_main: paddle.diag
	:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
	:old_api: paddle.fluid.layers.diag
S
swtkiwi 已提交
1696

1697
    This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Z
zhoukunsheng 已提交
1698 1699

    Args:
1700 1701
        diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
            specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Z
zhoukunsheng 已提交
1702 1703

    Returns:
1704 1705
        Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
            the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Z
zhoukunsheng 已提交
1706 1707 1708 1709 1710 1711 1712

    Examples:
        .. code-block:: python

          # [[3, 0, 0]
          #  [0, 4, 0]
          #  [0, 0, 5] 
1713 1714 1715

          import paddle.fluid as fluid
          import numpy as np
1716 1717 1718
          diagonal = np.arange(3, 6, dtype='int32')
          data = fluid.layers.diag(diagonal)
          # diagonal.shape=(3,) data.shape=(3, 3)
Z
zhoukunsheng 已提交
1719 1720

    """
1721 1722 1723
    check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag')
    check_dtype(diagonal.dtype, 'diagonal',
                ['float32', 'float64', 'int32', 'int64'], 'diag')
Z
zhoukunsheng 已提交
1724 1725 1726 1727 1728 1729 1730
    helper = LayerHelper("diag", **locals())

    if not isinstance(diagonal, Variable):
        diagonal = assign(diagonal)

    out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)

1731 1732 1733
    helper.append_op(type='diag',
                     inputs={'Diagonal': [diagonal]},
                     outputs={'Out': [out]})
Z
zhoukunsheng 已提交
1734 1735 1736

    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1737 1738


1739 1740 1741 1742 1743
def eye(num_rows,
        num_columns=None,
        batch_shape=None,
        dtype='float32',
        name=None):
1744
    """
1745
    This function constructs a or a batch of 2-D tensor with ones on the diagonal and zeros elsewhere. 
1746 1747 1748

    Args:
        num_rows(int): the number of rows in each batch tensor.
1749 1750
        num_columns(int, optional): the number of columns in each batch tensor.
            If None, default: num_rows.
1751 1752
        batch_shape(list, optional): If provided, the returned tensor will have a leading
            batch size of this shape, the data type of ``batch_shape`` is int. Default is None.
W
wangchaochaohu 已提交
1753
        dtype(np.dtype|str, optional): The data type of the returned tensor.
1754 1755 1756 1757
            It should be int32, int64, float16, float32, float64, default is 'float32'.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
1758 1759

    Returns:
1760
        Tensor: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
1761 1762 1763 1764 1765

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
1766 1767
          data = fluid.layers.eye(3, dtype='int32')
          # [[1, 0, 0]
1768
          #  [0, 1, 0]
1769 1770
          #  [0, 0, 1]]

1771
          data = fluid.layers.eye(2, 3, dtype='int32')
1772
          # [[1, 0, 0]
1773
          #  [0, 1, 0]]
1774 1775

          data = fluid.layers.eye(2, batch_shape=[3])
1776 1777 1778 1779 1780
          # Construct a batch of 3 identity tensors, each 2 x 2.
          # data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.

    """

1781 1782
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
1783 1784 1785 1786 1787
    if num_columns is not None:
        if not isinstance(num_columns, int) or num_columns < 0:
            raise TypeError("num_columns should be a non-negative int")
    else:
        num_columns = num_rows
1788

R
Ruibiao Chen 已提交
1789 1790 1791 1792
    if in_dygraph_mode():
        out = _C_ops.final_state_eye(num_rows, num_columns, dtype,
                                     _current_expected_place())
    elif _in_legacy_dygraph():
W
wanghuancoder 已提交
1793 1794
        out = _C_ops.eye('dtype', dtype, 'num_rows', num_rows, 'num_columns',
                         num_columns)
1795 1796 1797 1798 1799 1800 1801
    else:
        helper = LayerHelper("eye", **locals())
        check_dtype(dtype, 'dtype',
                    ['float16', 'float32', 'float64', 'int32', 'int64'], 'eye')
        if not isinstance(num_rows, int) or num_rows < 0:
            raise TypeError("num_rows should be a non-negative int")
        out = helper.create_variable_for_type_inference(dtype=dtype)
1802 1803 1804 1805 1806 1807 1808 1809 1810
        helper.append_op(type='eye',
                         inputs={},
                         outputs={'Out': [out]},
                         attrs={
                             'num_rows': num_rows,
                             'num_columns': num_columns,
                             'dtype': dtype
                         },
                         stop_gradient=True)
1811 1812

    if batch_shape is not None:
1813 1814 1815
        re_shape = [1] * len(batch_shape)
        re_shape = re_shape + [num_rows, num_columns]
        expand_times = batch_shape + [1, 1]
J
Jiabin Yang 已提交
1816
        if _non_static_mode():
W
wanghuancoder 已提交
1817 1818
            out = _C_ops.reshape(out, 'shape', re_shape)
            return _C_ops.expand(out, None, 'expand_times', expand_times)
1819

1820 1821
        if not isinstance(batch_shape, list):
            raise TypeError("batch_shape should be a list")
1822
        for batch_val in (batch_shape):
1823 1824
            if batch_val <= 0:
                raise TypeError("batch_shape should be a positive int list")
1825 1826 1827 1828 1829 1830

        from .nn import reshape, expand
        out = reshape(x=out, shape=re_shape)
        out = expand(x=out, expand_times=expand_times)

    out.stop_gradient = True
1831 1832 1833
    return out


Z
zhoukunsheng 已提交
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
def ones_like(x, out=None):
    """
    **ones_like**

    This function creates a ones tensor which has identical shape and dtype 
    with `x`.

    Args:
        x(Variable): The input tensor which specifies shape and dtype.
        out(Variable): The output tensor.

    Returns:
1846
        out(Variable): The tensor variable storing the output.
Z
zhoukunsheng 已提交
1847 1848 1849 1850 1851 1852 1853 1854 1855 1856

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid

          x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
          data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]

    """
1857 1858 1859
    check_variable_and_dtype(x, "x",
                             ['bool', 'float32', 'float64', 'int32', 'int64'],
                             'ones_like')
Z
zhoukunsheng 已提交
1860 1861 1862 1863

    helper = LayerHelper("ones_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1864 1865 1866 1867
    else:
        check_variable_and_dtype(
            out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
            'ones_like')
1868 1869 1870 1871
    helper.append_op(type='fill_any_like',
                     inputs={'X': [x]},
                     attrs={'value': 1.0},
                     outputs={'Out': [out]})
Z
zhoukunsheng 已提交
1872
    return out
Y
yaoxuefeng 已提交
1873 1874 1875 1876 1877 1878


@deprecated(since="2.0.0", update_to="paddle.triu")
def triu(input, diagonal=0, name=None):
    import paddle
    return paddle.tensor.triu(x=input, diagonal=diagonal, name=name)