tensor.py 68.4 KB
Newer Older
1
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16

17
import math
18 19 20
import numpy
import warnings

Y
Yu Yang 已提交
21
from ..layer_helper import LayerHelper
22
from ..param_attr import ParamAttr
23
from ..initializer import Initializer
24
from ..framework import _current_expected_place, convert_np_dtype_to_dtype_, _non_static_mode, _varbase_creator, device_guard, _in_legacy_dygraph, in_dygraph_mode, _get_paddle_place
X
xuwei06 已提交
25
from ..framework import Variable
26
from ..initializer import Constant
27
from ..core import VarDesc
28
from .. import core
29
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
30
from . import utils
31
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
32
from paddle.utils import deprecated
33

34
from .utils import check_shape
W
wanghuancoder 已提交
35
from paddle import _C_ops
Y
Yu Yang 已提交
36 37

__all__ = [
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
    'create_tensor',
    'create_parameter',
    'create_global_var',
    'cast',
    'tensor_array_to_tensor',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
    'argmin',
    'argmax',
    'argsort',
    'ones',
    'zeros',
    'reverse',
    'has_inf',
    'has_nan',
    'isfinite',
    'range',
    'linspace',
    'zeros_like',
    'ones_like',
    'diag',
    'eye',
    'triu',
Y
Yu Yang 已提交
64 65 66
]


X
xuwei06 已提交
67
def create_tensor(dtype, name=None, persistable=False):
68
    """
W
wangchaochaohu 已提交
69
    Create a variable, which will hold a Tensor with data type dtype.
70 71

    Args:
W
wangchaochaohu 已提交
72 73 74 75
        dtype(string|numpy.dtype): the data type of Tensor to be created, the
            data type is bool, float16, float32, float64, int8, int16, int32 and int64.
        name(string, optional): The default value is None.  Normally there is no need for 
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
Q
update  
qiaolongfei 已提交
76
        persistable(bool): Set the persistable flag of the create tensor.
W
wangchaochaohu 已提交
77
            default value is False.
78 79

    Returns:
W
wangchaochaohu 已提交
80
        Variable: The tensor to be created according to dtype.
81 82 83 84

    Examples:
        .. code-block:: python

85
          import paddle.fluid as fluid
86 87
          tensor = fluid.layers.create_tensor(dtype='float32')
    """
88 89 90 91
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int32',
        'int64'
    ], 'create_tensor')
Y
Yu Yang 已提交
92
    helper = LayerHelper("create_tensor", **locals())
X
xuwei06 已提交
93 94
    return helper.create_variable(
        name=helper.name, dtype=dtype, persistable=persistable)
Y
Yu Yang 已提交
95 96


97 98
def create_parameter(shape,
                     dtype,
X
xuwei06 已提交
99
                     name=None,
100 101 102 103
                     attr=None,
                     is_bias=False,
                     default_initializer=None):
    """
104
	:api_attr: Static Graph
S
swtkiwi 已提交
105

106
    This function creates a parameter. The parameter is a learnable variable, which can have
Y
yuyang18 已提交
107 108 109 110 111
    gradient, and can be optimized.

    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.

112 113 114 115 116 117 118
    Parameters:
        shape (list of int): Shape of the parameter
        dtype (str): Data type of the parameter
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
        attr (ParamAttr, optional): Attributes of the parameter
        is_bias (bool, optional): This can affect which default initializer is chosen
119 120 121
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
122
        default_initializer (Initializer, optional): Initializer for the parameter
123 124

    Returns:
125
        The created parameter.
Y
yuyang18 已提交
126 127

    Examples:
128 129
        .. code-block:: python

130 131 132
            import paddle
            paddle.enable_static()
            W = paddle.static.create_parameter(shape=[784, 200], dtype='float32')
133
    """
134 135
    check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter')
    for item in shape:
T
tianshuo78520a 已提交
136 137 138
        check_type(item, 'item of shape',
                   (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                    numpy.int64), 'create_parameter')
139 140 141 142 143 144 145 146 147

    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8'
    ], 'create_parameter')
    check_type(attr, 'attr', (type(None), ParamAttr), 'create_parameter')
    check_type(default_initializer, 'default_initializer',
               (type(None), Initializer), 'create_parameter')

Q
Qiao Longfei 已提交
148
    helper = LayerHelper("create_parameter", **locals())
149
    if attr is None:
X
xuwei06 已提交
150
        attr = ParamAttr(name=name)
151 152
    return helper.create_parameter(attr, shape,
                                   convert_dtype(dtype), is_bias,
153 154 155
                                   default_initializer)


156 157 158 159 160 161 162
def create_global_var(shape,
                      value,
                      dtype,
                      persistable=False,
                      force_cpu=False,
                      name=None):
    """
163
    This function creates a new tensor variable with value in the global block(block 0).
F
fengjiayi 已提交
164

165
    Parameters:
166
        shape (list[int]|tuple[int]): Shape of the variable
167
        value (float): The value of the variable. The new created
F
fengjiayi 已提交
168
                      variable will be filled with it.
169 170
        dtype (str): Data type of the variable
        persistable (bool, optional): If this variable is persistable.
F
fengjiayi 已提交
171
                           Default: False
172
        force_cpu (bool, optional): Force this variable to be on CPU.
F
fengjiayi 已提交
173
                         Default: False
174 175
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
176 177

    Returns:
178
        Variable: The created Variable
F
fengjiayi 已提交
179 180 181 182

    Examples:
        .. code-block:: python

183 184 185
            import paddle
            paddle.enable_static()
            var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
186
                                           persistable=True, force_cpu=True, name='new_var')
187
    """
188 189 190
    check_type(shape, 'shape', (list, tuple, numpy.ndarray),
               'create_global_var')
    for item in shape:
T
tianshuo78520a 已提交
191 192 193
        check_type(item, 'item of shape',
                   (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                    numpy.int64), 'create_global_var')
194 195

    check_dtype(dtype, 'dtype', [
196 197 198 199 200 201 202 203 204 205
        'bool',
        'float16',
        'float32',
        'float64',
        'int8',
        'int16',
        'int32',
        'int64',
        'uint8',
        'uint16',
206 207
    ], 'create_global_var')

Q
Qiao Longfei 已提交
208 209
    helper = LayerHelper("global_var", **locals())
    var = helper.create_global_variable(
M
minqiyang 已提交
210 211 212 213 214
        dtype=dtype,
        shape=shape,
        persistable=persistable,
        name=name,
        stop_gradient=True)
M
minqiyang 已提交
215 216 217
    helper.set_variable_initializer(
        var, initializer=Constant(
            value=float(value), force_cpu=force_cpu))
M
minqiyang 已提交
218

Q
Qiao Longfei 已提交
219 220 221
    return var


222
def cast(x, dtype):
Y
Yu Yang 已提交
223
    """
S
swtkiwi 已提交
224

225
    This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
226 227
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
228 229

    Args:
230
        x(Tensor): An input N-D Tensor with data type bool, float16,
231
            float32, float64, int32, int64, uint8.
232
        dtype(np.dtype|str): Data type of the output:
233
            bool, float16, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
234 235

    Returns:
236
        Tensor: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
237 238 239

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
240

241
            import paddle
242

243 244
            x = paddle.to_tensor([2, 3, 4], 'float64')
            y = paddle.cast(x, 'uint8')
Y
Yu Yang 已提交
245
    """
H
hong 已提交
246 247 248 249 250
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
        return _C_ops.final_state_cast(x, dtype)

J
Jiabin Yang 已提交
251
    if _non_static_mode():
252 253
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
W
wanghuancoder 已提交
254
        out = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
Z
Zhang Ting 已提交
255
        return out
256

257
    check_variable_and_dtype(x, 'x', [
258 259
        'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64',
        'uint8', 'uint16'
260
    ], 'cast')
261
    check_dtype(dtype, 'dtype', [
262 263
        'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8', 'uint16'
264 265 266
    ], 'cast')

    helper = LayerHelper('cast', **locals())
267 268
    out = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=x.stop_gradient)
Y
Yu Yang 已提交
269 270 271 272 273 274 275 276 277
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype,
               'out_dtype': out.dtype})
    return out


278
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
279
    """
280
    This OP concatenates the input along the axis.
281 282

    Args:
283 284
        input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type
            bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type. 
285 286
        axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
            It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64.
287
            The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way
288
            as ``axis+R``. Default is 0.
289 290 291
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
292 293

    Returns:
294
        Tensor: A Tensor with the same data type as ``input``.
295 296 297

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
298

299
            import paddle.fluid as fluid
300 301
            import numpy as np

302 303 304 305 306 307
            in1 = np.array([[1, 2, 3],
                            [4, 5, 6]])
            in2 = np.array([[11, 12, 13],
                            [14, 15, 16]])
            in3 = np.array([[21, 22],
                            [23, 24]])
308 309 310 311
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
312 313
                # When the axis is negative, the real axis is (axis + Rank(x)).
                # As follows, axis is -1, Rank(x) is 2, the real axis is 1
314 315
                out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1, x2], axis=0)
316 317 318 319 320 321 322 323
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
324
    """
325

326 327 328 329 330 331
    if in_dygraph_mode():
        if isinstance(axis, Variable):
            axis = axis.numpy()
            axis = axis.item(0)
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
332 333 334
        out = _varbase_creator()
        _C_ops.concat(input, out, 'axis', axis)
        return out
335 336

    if _in_legacy_dygraph():
S
songyouwei 已提交
337 338
        if isinstance(axis, Variable):
            axis = axis.numpy()
339
            axis = axis.item(0)
340 341
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
342 343 344
        out = _varbase_creator()
        _C_ops.concat(input, out, 'axis', axis)
        return out
345

346 347 348 349 350 351 352 353 354 355 356
    check_type(input, 'input', (list, tuple, Variable), 'concat')
    if not isinstance(input, Variable):
        for id, x in enumerate(input):
            check_variable_and_dtype(
                x, 'input[' + str(id) + ']',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'concat')
            if x.dtype != input[0].dtype:
                raise TypeError(
                    "All the Tensors in the input must have the same data type.")
    else:
357
        input = [input]
358
    check_type(axis, 'axis', (int, Variable), 'concat')
359

360 361 362 363 364
    if isinstance(axis, Variable):
        check_dtype(
            axis.dtype, 'axis', ['int32', 'int64'], 'concat',
            "The data type of axis must be int32 or int64 when axis is a Tensor")

365
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
366
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
367 368

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
369 370 371 372
        # NOTE(liym27): Don't remove this if branch!
        # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
        # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.

373
        assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \
374
                "number of the elements must be 1, but received %s." % len(input)
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
        out_index = helper.create_variable_for_type_inference(dtype="int32")
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out],
                     'OutIndex': [out_index]},
            attrs={'axis': axis,
                   'use_stack': False})
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
            inputs['AxisTensor'] = axis
        else:
            attrs['axis'] = axis

        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
Y
Yu Yang 已提交
394 395 396
    return out


G
Guo Sheng 已提交
397
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
398
    r"""
G
Guo Sheng 已提交
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
449 450

    Args:
G
Guo Sheng 已提交
451 452 453 454 455 456 457
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
458 459

    Returns:
G
Guo Sheng 已提交
460 461 462
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
463 464 465 466

    Examples:
        .. code-block:: python

467
            import paddle.fluid as fluid
468
            import numpy as np
G
Guo Sheng 已提交
469 470 471 472 473 474 475
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            array = fluid.layers.create_array(dtype='float32')
            fluid.layers.array_write(x0, i, array)
            fluid.layers.array_write(x1, i + 1, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
476
    """
J
Jiabin Yang 已提交
477
    if _non_static_mode():
478 479 480 481 482 483 484 485 486 487
        assert isinstance(
            input, list), "The 'input' in tensor_array_to_tensor must be list"
        from .nn import stack, concat
        from ..dygraph import to_variable
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
            numpy.array(list(map(lambda x: int(x.shape[axis]), input))))
        return res, sizes

488 489 490 491 492
    check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
    if isinstance(input, list):
        for i, input_x in enumerate(input):
            check_type(input_x, 'input[' + str(i) + ']', Variable,
                       'tensor_array_to_tensor')
L
li099 已提交
493
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
494 495 496
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
    helper.append_op(
L
li099 已提交
497
        type='tensor_array_to_tensor',
L
li099 已提交
498 499 500
        inputs={'X': input},
        outputs={'Out': [out],
                 'OutIndex': [out_index]},
G
Guo Sheng 已提交
501 502
        attrs={'axis': axis,
               'use_stack': use_stack})
L
li099 已提交
503 504 505
    return out, out_index


506
def sums(input, out=None):
507
    r"""
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
529 530

    Args:
531 532 533 534
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
535 536

    Returns:
537 538
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
539 540

    Examples:
F
fengjiayi 已提交
541
        .. code-block:: python
K
kavyasrinet 已提交
542

543 544 545 546 547 548 549 550 551
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
552

553 554
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
555
    """
556 557 558 559
    check_type(input, 'input', (Variable, tuple, list), 'sums')
    if isinstance(input, list) or isinstance(input, tuple):
        for input_section in input:
            check_variable_and_dtype(input_section, "input", \
560
                    ['float16', 'float32', 'float64', 'int32', 'int64'], 'sums')
561 562
    else:
        check_variable_and_dtype(input, "input", \
563
                ['float16', 'float32', 'float64', 'int32', 'int64'], 'sums')
564

Y
Yu Yang 已提交
565 566
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
567 568
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
569 570 571 572
    else:
        check_variable_and_dtype(
            out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums')

T
tensor-tang 已提交
573 574 575 576 577
    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False})
Y
Yu Yang 已提交
578 579 580
    return out


F
fengjiayi 已提交
581
def assign(input, output=None):
582
    """
S
swtkiwi 已提交
583

584
    The OP copies the :attr:`input` to the :attr:`output`.
585

586
    Parameters:
587 588 589 590
        input (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
            or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
            Note: the float64 data will be converted to float32 because of current platform protobuf
            data limitation.
591
        output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
592
            be created as :attr:`output`. Default: None.
593 594

    Returns:
595
        Tensor: A tensor with the same shape, data type and value as :attr:`input`.
596 597 598

    Examples:
        .. code-block:: python
599

600
          import paddle
601
          import numpy as np
602
          data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
603 604 605 606
          array = np.array([[1, 1],
                            [3, 4],
                            [1, 3]]).astype(np.int64)
          result1 = paddle.zeros(shape=[3, 3], dtype='float32')
607 608 609
          paddle.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
          result2 = paddle.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
610
    """
Y
Yu Yang 已提交
611
    helper = LayerHelper('assign', **locals())
612 613
    check_type(input, 'input', (Variable, numpy.ndarray, list, tuple, float,
                                int, bool), 'assign')
614 615
    is_inplace = True if output is not None else False

616 617 618 619
    if numpy.isscalar(input) and not isinstance(input, str):
        input = numpy.array([input])
    elif isinstance(input, (list, tuple)):
        input = numpy.array(input)
620 621
    # NOTE(Aurelius84): Why we judge core.VarBase?
    # In case of @to_static, a VarBase can be as input of `assign`,
J
Jiabin Yang 已提交
622
    # but _non_static_mode()==False under @to_static, which means
623 624 625
    # isinstance(VarBase, Variable) == False. It will cause return None
    # after this api.
    if isinstance(input, (Variable, core.VarBase)):
626
        if _non_static_mode():
C
chentianyu03 已提交
627 628 629 630 631 632 633 634 635
            if in_dygraph_mode() and output is None:
                output = _C_ops.final_state_assign(input)
            else:
                if output is None:
                    if _in_legacy_dygraph():
                        output = core.VarBase()
                    else:
                        output = core.eager.Tensor()
                _C_ops.assign(input, output)
636 637 638 639 640 641 642 643 644 645 646
        else:
            check_dtype(input.dtype, 'input', [
                'float16', 'uint16', 'float32', 'float64', 'int32', 'int64',
                'uint8', 'bool'
            ], 'assign', '(When the type of input in assign is Variable.)')
            if output is None:
                output = helper.create_variable_for_type_inference(
                    dtype=input.dtype)
            helper.append_op(
                type='assign', inputs={'X': [input]},
                outputs={'Out': [output]})
X
xuwei06 已提交
647
    elif isinstance(input, numpy.ndarray):
648 649 650 651 652
        # Not support [var, var, ...] currently.
        if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
            raise TypeError(
                "Required type(input) numpy.ndarray, but found `list(Variable)` in input."
            )
X
xuwei06 已提交
653
        dtype = convert_np_dtype_to_dtype_(input.dtype)
654 655 656 657 658 659 660 661
        if dtype == VarDesc.VarType.FP64:
            # Setting FP64 numpy data is not supported in Paddle, so we
            # use FP32 here
            warnings.warn(
                "paddle.assign doesn't support float64 input now due "
                "to current platform protobuf data limitation, we convert "
                "it to float32")
            dtype = VarDesc.VarType.FP32
662 663
        if dtype == VarDesc.VarType.BOOL:
            value_name = "bool_values"
W
wanghuancoder 已提交
664
            values = [int(v) for v in input.flat]
665
        elif dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
666
            value_name = "fp32_values"
667
            values = [float(v) for v in input.flat]
668
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
669
            value_name = "int32_values"
670
            values = [int(v) for v in input.flat]
671 672 673
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
674
        else:
675 676
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
677
                "the data type of 'input' must be bool, float32, int32 or int64, but "
678
                "received %s." % convert_dtype(dtype))
679 680 681
        if input.size > 1024 * 1024:
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
682
        if output is None:
C
caozhou 已提交
683
            output = helper.create_variable_for_type_inference(dtype=dtype)
X
xuwei06 已提交
684 685 686 687 688 689
        helper.append_op(
            type='assign_value',
            outputs={'Out': [output]},
            attrs={
                'dtype': dtype,
                'shape': list(input.shape),
690
                value_name: values
X
xuwei06 已提交
691 692
            })

J
Jiabin Yang 已提交
693
    if is_inplace and _non_static_mode():
694
        output._bump_inplace_version()
695

Y
Yu Yang 已提交
696 697 698
    return output


699
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
Y
Yu Yang 已提交
700
    """
S
swtkiwi 已提交
701

W
wangchaochaohu 已提交
702
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
703
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
704

T
tianshuo78520a 已提交
705
    The attribute `stop_gradient` of the created Tensor is set to True.
706 707

    Args:
708 709 710
        shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
            If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
            If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
W
wangchaochaohu 已提交
711
        dtype(np.dtype|str): Data type of the output Tensor which can
712
            be float16, float32, float64, uint8, int16, int32, int64.
713 714 715 716 717 718
        value(bool|float|int|Tensor): The constant value used to initialize 
            the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
        force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
        out(Tensor, optional): Optional output which can be any created 
            Tensor that meets the requirements to store the result of operation.
            if ``out`` is None, a new Tensor will be create to store the result.
719 720
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
721 722

    Returns:
723
        Tensor: Tensor which is created according to shape and dtype.
W
wangchaochaohu 已提交
724

725 726 727
    Examples:
        .. code-block:: python

728
          import paddle.fluid as fluid
729
          # attr shape is a list which doesn't contain  Tensor.
730 731
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
732
          # data1=[[5], [5]] data2=[[5], [5]]
733

734
          # attr shape is a list which contains Tensor.
735
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
736
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
737

738
          # attr shape is a Tensor.
739
          shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
740
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
W
wangchaochaohu 已提交
741
          
742
          # attr value is a Tensor.
W
wangchaochaohu 已提交
743 744
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
745
    """
746

W
wangchaochaohu 已提交
747
    attrs = {'force_cpu': force_cpu}
748
    dtype = convert_dtype(dtype)
749
    if not isinstance(value, Variable):
750
        if dtype in ['uint8', 'int16', 'int32', 'int64']:
W
wangchaochaohu 已提交
751
            attrs['str_value'] = str(int(value))
752
            attrs['value'] = int(value)
W
wangchaochaohu 已提交
753 754
        else:
            attrs['str_value'] = str(float(value))
755
            attrs['value'] = float(value)
756

J
Jiabin Yang 已提交
757
    if _non_static_mode():
758 759 760 761 762
        if out is None and in_dygraph_mode():
            #Currently, final state mode don't support out is None.
            place = _current_expected_place()
            if force_cpu:
                place = core.CPUPlace()
763 764 765 766 767 768 769
            if isinstance(shape, (list, tuple)):
                for item in shape:
                    if not isinstance(item, Variable):
                        shape = list(
                            map(lambda x: x.numpy().flat[0] if isinstance(x, Variable) else x,
                                shape))
                        break
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793

            if not isinstance(dtype, core.VarDesc.VarType):
                dtype = convert_np_dtype_to_dtype_(dtype)
            out = _C_ops.final_state_full(shape, float(value), dtype, place)
            out.stop_gradient = True
            return out

        else:
            shape = utils.convert_shape_to_list(shape)
            if out is None:
                out = _varbase_creator(dtype=dtype)

            if isinstance(value, Variable):
                if dtype in ['uint8', 'int16', 'int32', 'int64']:
                    attrs['str_value'] = str(int(value.numpy().item(0)))
                else:
                    attrs['str_value'] = str(float(value.numpy().item(0)))

            _C_ops.fill_constant(out, 'value',
                                 float(value), 'force_cpu', force_cpu, 'dtype',
                                 out.dtype, 'str_value', attrs['str_value'],
                                 'shape', shape)
            out.stop_gradient = True
            return out
794

795 796 797
    helper = LayerHelper("fill_constant", **locals())
    inputs = {}
    if isinstance(value, Variable):
798 799
        if convert_dtype(value.dtype) != dtype:
            value = cast(value, dtype)
800 801
        inputs['ValueTensor'] = value

802
    check_shape(shape)
803 804
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'uint8', 'int16', 'int32',
805
        'int64', 'complex64', 'complex128'
806
    ], 'fill_constant')
807
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
808

809 810 811 812 813
    if out is not None:
        check_variable_and_dtype(out, 'out', [convert_dtype(dtype)],
                                 'fill_constant')

    helper = LayerHelper("fill_constant", **locals())
814
    utils.get_shape_tensor_inputs(
815
        inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant')
L
liym27 已提交
816

Y
Yu Yang 已提交
817
    if out is None:
X
Xin Pan 已提交
818
        out = helper.create_variable_for_type_inference(dtype=dtype)
L
liym27 已提交
819
    attrs['dtype'] = out.dtype
Y
Yu Yang 已提交
820 821
    helper.append_op(
        type='fill_constant',
L
liym27 已提交
822
        inputs=inputs,
Y
Yu Yang 已提交
823
        outputs={'Out': [out]},
L
liym27 已提交
824
        attrs=attrs,
M
minqiyang 已提交
825
        stop_gradient=True)
Y
Yu Yang 已提交
826 827 828 829
    out.stop_gradient = True
    return out


830
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
Y
yuyang18 已提交
831
@templatedoc()
Y
Yu Yang 已提交
832 833 834 835 836
def fill_constant_batch_size_like(input,
                                  shape,
                                  dtype,
                                  value,
                                  input_dim_idx=0,
G
Guo Sheng 已提交
837 838
                                  output_dim_idx=0,
                                  force_cpu=False):
839
    """
T
tianshuo78520a 已提交
840
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
841 842 843 844
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
845 846

    Args:
W
wangchaochaohu 已提交
847 848 849 850 851 852 853 854 855 856 857
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
        value(float|int): The constant value used to initialize the Tensor to be created. 
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
858
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
859 860

    Returns:
W
wangchaochaohu 已提交
861
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
862 863 864 865 866

    Examples:

        .. code-block:: python

867
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
868
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
869
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
870
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
871

872
    """
873 874 875 876 877 878 879 880 881 882 883 884
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
        out = _C_ops.final_state_full_batch_size_like(
            input, shape, dtype, value, input_dim_idx, output_dim_idx, place)
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
885
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
886
    out = helper.create_variable_for_type_inference(dtype=dtype)
887 888 889 890 891 892
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
893
        'force_cpu': force_cpu
894 895 896 897 898
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
Y
Yu Yang 已提交
899 900 901 902
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
903
        attrs=attrs)
Y
Yu Yang 已提交
904 905 906 907
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
908 909
def argmin(x, axis=0):
    """
910 911 912
	:alias_main: paddle.argmin
	:alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
	:old_api: paddle.fluid.layers.argmin
S
swtkiwi 已提交
913

S
sneaxiy 已提交
914 915
    **argmin**

916 917
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
918 919

    Args:
920 921 922 923 924
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
925

S
sneaxiy 已提交
926
    Returns:
927
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
928

S
sneaxiy 已提交
929 930
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
931

932
            import paddle.fluid as fluid
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
960
    """
961 962 963
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin')
S
sneaxiy 已提交
964
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
965
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
966 967 968 969 970
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
971
    out.stop_gradient = True
S
sneaxiy 已提交
972 973 974 975 976 977 978
    return out


def argmax(x, axis=0):
    """
    **argmax**

979 980
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
981 982

    Args:
983 984 985 986 987
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
988

S
sneaxiy 已提交
989
    Returns:
990
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
991

S
sneaxiy 已提交
992 993
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
994

995
            import paddle.fluid as fluid
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
1023
    """
1024 1025 1026
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax')
S
sneaxiy 已提交
1027
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
1028
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
1029 1030 1031 1032 1033
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
1034
    out.stop_gradient = True
S
sneaxiy 已提交
1035 1036 1037
    return out


1038
def argsort(input, axis=-1, descending=False, name=None):
Y
Yibing Liu 已提交
1039
    """
1040 1041 1042
	:alias_main: paddle.argsort
	:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
	:old_api: paddle.fluid.layers.argsort
S
swtkiwi 已提交
1043

1044 1045 1046
    This OP sorts the input along the given axis, and returns sorted output
    data Varibale and its corresponding index Variable with the same shape as
    :attr:`input`.
Y
Yibing Liu 已提交
1047 1048

    Args:
1049 1050 1051 1052 1053
        input(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
1054 1055 1056
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
1057 1058 1059
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
Y
Yibing Liu 已提交
1060 1061

    Returns:
1062 1063 1064
        tuple: A tuple of sorted data Variable(with the same shape and data
        type as input) and the sorted indices(with the same shape as input's
        and with data type int64).
Y
Yibing Liu 已提交
1065 1066 1067 1068

    Examples:
        .. code-block:: python

1069
            import paddle.fluid as fluid
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]]).astype(np.float32)
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argsort(input=x, axis=-1)
                out2 = fluid.layers.argsort(input=x, axis=0)
                out3 = fluid.layers.argsort(input=x, axis=1)
                print(out1[0].numpy())
                # [[[5. 5. 8. 9.]
                #   [0. 0. 1. 7.]
                #   [2. 4. 6. 9.]]
                #  [[2. 2. 4. 5.]
                #   [4. 7. 7. 9.]
                #   [0. 1. 6. 7.]]]
                print(out1[1].numpy())
                # [[[0 3 1 2]
                #   [0 1 2 3]
                #   [2 3 0 1]]
                #  [[1 3 2 0]
                #   [0 1 2 3]
                #   [2 0 3 1]]]
                print(out2[0].numpy())
                # [[[5. 2. 4. 2.]
                #   [0. 0. 1. 7.]
                #   [1. 7. 0. 4.]]
                #  [[5. 8. 9. 5.]
                #   [4. 7. 7. 9.]
                #   [6. 9. 2. 6.]]]
                print(out3[0].numpy())
                # [[[0. 0. 1. 4.]
                #   [5. 8. 2. 5.]
                #   [6. 9. 9. 7.]]
                #  [[1. 2. 0. 2.]
                #   [4. 7. 4. 6.]
                #   [5. 7. 7. 9.]]]
Y
Yibing Liu 已提交
1111
    """
1112 1113 1114
    check_variable_and_dtype(
        input, 'input',
        ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort')
Y
Yibing Liu 已提交
1115
    helper = LayerHelper("argsort", **locals())
X
Xin Pan 已提交
1116 1117 1118 1119
    out = helper.create_variable_for_type_inference(
        dtype=input.dtype, stop_gradient=True)
    ids = helper.create_variable_for_type_inference(
        VarDesc.VarType.INT64, stop_gradient=True)
Y
Yibing Liu 已提交
1120 1121 1122 1123
    helper.append_op(
        type='argsort',
        inputs={'X': input},
        outputs={'Out': out,
1124
                 'Indices': ids},
1125 1126
        attrs={'axis': axis,
               'descending': descending})
Y
Yibing Liu 已提交
1127 1128 1129
    return out, ids


Y
Yang Yu 已提交
1130
def ones(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
1131
    """
1132 1133
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1134

1135
    Parameters:
1136
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of shape is int32 or int64.
W
wangchaochaohu 已提交
1137
        dtype (np.dtype|str): Data type of output Tensor, it supports
1138
            bool, float16, float32, float64, int32 and int64.
1139 1140
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1141
            Default: False.
1142 1143

    Returns:
1144
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
1145 1146 1147 1148

    Examples:
        .. code-block:: python

1149
          import paddle.fluid as fluid
1150 1151 1152 1153 1154
          data0 = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
          
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.ones(shape=shape, dtype='int32') #[[1, 1], [1, 1]]
Y
Yu Yang 已提交
1155 1156 1157 1158
    """
    return fill_constant(value=1.0, **locals())


1159
def zeros(shape, dtype, force_cpu=False, name=None):
Y
Yu Yang 已提交
1160
    """
1161 1162
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1163

1164
    Parameters:
1165
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
W
wangchaochaohu 已提交
1166
        dtype (np.dtype|str): Data type of output Tensor, it supports
1167
            bool, float16, float32, float64, int32 and int64.
1168 1169
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1170
            Default: False.
1171 1172
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
1173 1174

    Returns:
1175
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
1176 1177 1178 1179

    Examples:
        .. code-block:: python

1180
          import paddle.fluid as fluid
1181
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
1182 1183 1184 1185
          
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
Y
Yu Yang 已提交
1186 1187
    """
    return fill_constant(value=0.0, **locals())
1188 1189


F
fengjiayi 已提交
1190 1191
def reverse(x, axis):
    """
1192 1193 1194
	:alias_main: paddle.reverse
	:alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse
	:old_api: paddle.fluid.layers.reverse
S
swtkiwi 已提交
1195

1196
    The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
F
fengjiayi 已提交
1197

1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
    .. code-block:: text

        Case 1:

            Given a LoDTensor:
                x = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
                axis = [0, 1]

            Then:
                output = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]

        Case 2:

            Given a LoDTensorArray:
                x = {[[0, 1], [2, 3]],
                     [[4, 5, 6]],
                     [[7],[8], [9]]}
                axis = 0

            Then:
                output = {[[7],[8], [9]],
                          [[4, 5, 6]],
                          [[0, 1], [2, 3]]}

1222
    Parameters:
1223 1224
        x (Variable): A tensor or LoDTensorArray to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
                      If input is a LoDTensorArray, returns a new reversed LoDTensorArray without changing the internal order of each inner tensor.
1225 1226
        axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
            in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
1227 1228
            will be apply on each axis in the tuple or list. If input is a LoDTensorArray, the value of axis shall be 0, or a
            list [0] or tuple (0, ) with shape [1].
F
fengjiayi 已提交
1229 1230

    Returns:
1231
        Variable: The reversed tensor with the same shape and data type as :attr:`x`.
F
fengjiayi 已提交
1232 1233 1234 1235

    Examples:
        .. code-block:: python

1236
          import paddle.fluid as fluid
1237 1238 1239 1240
          import numpy as np
          data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
          result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
          result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250

          # example of LoDTensorArray
          data1 = fluid.layers.assign(np.array([[0, 1, 2]], dtype='float32'))
          data2 = fluid.layers.assign(np.array([[3, 4, 5]], dtype='float32'))
          tensor_array = fluid.layers.create_array(dtype='float32')
          i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
          fluid.layers.array_write(data1, i, tensor_array)
          fluid.layers.array_write(data2, i+1, tensor_array)

          reversed_tensor_array = fluid.layers.reverse(tensor_array, 0) # {[[3, 4, 5]], [[0, 1, 2]]}
F
fengjiayi 已提交
1251
    """
1252 1253 1254
    check_variable_and_dtype(
        x, 'x', ('float32', 'float64', 'int32', 'int64', 'uint8'), 'reverse')
    check_type(axis, 'axis', (int, tuple, list), 'reverse')
F
fengjiayi 已提交
1255 1256 1257
    if isinstance(axis, int):
        axis = [axis]
    helper = LayerHelper("reverse", **locals())
X
Xin Pan 已提交
1258
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
F
fengjiayi 已提交
1259 1260
    helper.append_op(
        type='reverse',
W
Wu Yi 已提交
1261
        inputs={'X': x},
F
fengjiayi 已提交
1262 1263 1264 1265 1266
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


1267 1268 1269 1270 1271 1272 1273
def save(x, file_path, overwrite=True):
    """
    Saves a variable as a file.

    Args:
        x(variable): The Tensor/LoDTensor to be saved.
        file_path(str): The file path where the variable will be saved.
1274 1275 1276
        overwrite(bool): Whether or not cover the given file when it has already
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
    """
    helper = LayerHelper("save", **locals())
    helper.append_op(
        type="save",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def save_combine(x, file_path, overwrite=True):
    """
    Saves a list of variables into a single file.

    Args:
1292 1293
        x(list): A list of Tensor/LoDTensor variables to be saved together in
                 a single file.
1294
        file_path(str): The file path where variables will be saved.
1295
        overwrite(bool): Whether or not cover the given file when it has already
1296 1297
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1298 1299 1300 1301 1302 1303 1304 1305

    Returns:
        There is no return value.

    Examples:

        .. code-block:: python

1306
            import paddle.fluid as fluid
1307 1308 1309 1310 1311 1312 1313
            v1 = fluid.layers.data(name="data",
                                   shape=(4, 6),
                                   dtype="float32")
            v2 = fluid.layers.data(name="data",
                                   shape=(6, 8, 4),
                                   dtype="float32")
            normed = fluid.layers.save_combine([v1, v2], file_path="output")
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
    """
    helper = LayerHelper("save_combine", **locals())
    helper.append_op(
        type="save_combine",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def load_combine(out, file_path):
    """
T
tianshuo78520a 已提交
1326
    Loads a list of variable from a single file.
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337

    Args:
        out(list): The list of variables to be read from the disk file.
        file_path(str): The path of the disk file.
    """
    helper = LayerHelper("load_combine", **locals())
    helper.append_op(
        type="load_combine",
        inputs={},
        output={"Out": out},
        args={"file_path": file_path})
1338 1339 1340 1341 1342 1343 1344


def has_inf(x):
    """
    Test if any of x contains an infinity number

    Args:
S
Steffy-zxf 已提交
1345
       x (Tensor): The Tensor to be checked.
1346 1347

    Returns:
S
Steffy-zxf 已提交
1348
       Tensor: The tensor storing the output, only a bool value, indicating that whether there is infinity number in x or not.
1349 1350 1351 1352
    
    Examples:
        .. code-block:: python
          
S
Steffy-zxf 已提交
1353 1354
          import paddle
          data = paddle.randn(shape=[4, 32, 32], dtype="float32")
1355
          res = paddle.fluid.layers.has_inf(data)
S
Steffy-zxf 已提交
1356
          # [False]
1357

1358
    """
J
Jiabin Yang 已提交
1359
    if _non_static_mode():
W
wanghuancoder 已提交
1360
        return _C_ops.isinf(x)
S
Steffy-zxf 已提交
1361

1362
    check_type(x, 'x', (Variable), 'has_inf')
1363
    helper = LayerHelper("isinf", **locals())
X
Xin Pan 已提交
1364
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1365 1366 1367 1368 1369 1370 1371 1372 1373
    helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
    return out


def has_nan(x):
    """
    Test if any of x contains a NAN

    Args:
S
Steffy-zxf 已提交
1374
       x (Tensor): The Tensor to be checked.
1375 1376

    Returns:
S
Steffy-zxf 已提交
1377
       Tensor: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
1378 1379 1380 1381
    
    Examples:
        .. code-block:: python
    
S
Steffy-zxf 已提交
1382 1383
          import paddle
          data = paddle.randn(shape=[2,3], dtype="float32")
1384
          res = paddle.fluid.layers.has_nan(data)
S
Steffy-zxf 已提交
1385
          # [False]
1386

1387
    """
J
Jiabin Yang 已提交
1388
    if _non_static_mode():
W
wanghuancoder 已提交
1389
        return _C_ops.isnan(x)
S
Steffy-zxf 已提交
1390

1391
    check_type(x, 'x', (Variable), 'has_nan')
1392
    helper = LayerHelper("isnan", **locals())
X
Xin Pan 已提交
1393
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1394 1395 1396 1397 1398 1399
    helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
    return out


def isfinite(x):
    """
S
swtkiwi 已提交
1400

1401 1402 1403 1404
    Test if any of x contains an infinity/NAN number. If all the elements are finite,
    returns true, else false.

    Args:
N
Noel 已提交
1405
        x(Tensor): The Tensor to be checked.
1406 1407

    Returns:
N
Noel 已提交
1408
        Tensor: The tensor storing the output, contains a bool value.
1409 1410 1411 1412 1413

    Examples:

        .. code-block:: python

N
Noel 已提交
1414 1415 1416 1417 1418 1419
            import paddle

            x = paddle.rand(shape=[4, 6], dtype='float32')
            y = paddle.fluid.layers.isfinite(x)
            print(y)

1420
    """
1421 1422
    check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
                             "isfinite")
1423
    helper = LayerHelper("isfinite", **locals())
1424

1425
    out = helper.create_variable_for_type_inference(dtype='bool')
1426 1427
    helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
    return out
W
whs 已提交
1428 1429


1430
def range(start, end, step, dtype, name=None):
W
whs 已提交
1431
    """
1432
    This OP returns a 1-D Tensor with spaced values within a given interval.
W
whs 已提交
1433

1434 1435
    Values are generated into the half-open interval [``start``, ``end``) with
    the ``step``. (the interval including ``start`` but excluding ``end``).
1436

1437 1438
    If ``dtype`` is float32 or float64, we advise adding a small epsilon to
    ``end`` to avoid floating point rounding errors when comparing against ``end``.
W
whs 已提交
1439

L
Liufang Sang 已提交
1440
    Parameters:
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463
        start(float|int|Tensor): Start of interval. The interval includes this
            value. If ``start`` is a Tensor, it is a 1-D Tensor with shape [1],
            with data type int32, int64, float32, float64.
        end(float|int|Tensor): End of interval. The interval does not include
            this value. If ``end`` is a Tensor, it is a 1-D Tensor with shape
            [1], with data type int32, int64, float32, float64.
        step(float|int|Tensor): Spacing between values. For any out, it is
            the istance between two adjacent values, out[i+1] - out[i]. If
            ``step`` is a Tensor, it is a 1-D Tensor with shape [1], with data
            type int32, int64, float32, float64.
        dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of the
            output tensor. Supported data types: int32, int64, float32, float64.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns: 
        Tensor: A 1-D Tensor with values from the interval [``start``, ``end``)
            taken with common difference ``step`` beginning from ``start``. Its
            data type is set by ``dtype``.

    Raises:
        TypeError: If ``dtype`` is not int32, int64, float32, float64.
W
whs 已提交
1464 1465 1466 1467 1468

    examples:

        .. code-block:: python

1469
            import paddle.fluid as fluid
W
whs 已提交
1470

1471 1472
            out1 = fluid.layers.range(0, 10, 2, 'int32')
            # [0, 2, 4, 6, 8]
W
whs 已提交
1473

1474 1475 1476 1477 1478
            start_var = fluid.layers.fill_constant([1], 'int64', 3)
            out2 = fluid.layers.range(start_var, 7, 1, 'int64')
            # [3, 4, 5, 6]

    """
1479 1480 1481 1482 1483
    out_shape = None
    if not isinstance(start, Variable) and not isinstance(
            end, Variable) and not isinstance(step, Variable):
        out_shape = [int(math.ceil((end - start) / step))]

1484 1485
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
1486

W
whs 已提交
1487
    if not isinstance(start, Variable):
1488
        with device_guard("cpu"):
1489
            start = fill_constant([1], dtype, start, force_cpu=True)
1490 1491
    elif start.dtype != dtype:
        start = cast(start, dtype)
1492

W
whs 已提交
1493
    if not isinstance(end, Variable):
1494
        with device_guard("cpu"):
1495
            end = fill_constant([1], dtype, end, force_cpu=True)
1496 1497
    elif end.dtype != dtype:
        end = cast(end, dtype)
1498

W
whs 已提交
1499
    if not isinstance(step, Variable):
1500
        with device_guard("cpu"):
1501
            step = fill_constant([1], dtype, step, force_cpu=True)
1502 1503
    elif step.dtype != dtype:
        step = cast(step, dtype)
W
whs 已提交
1504

Z
zyfncg 已提交
1505 1506 1507 1508
    if in_dygraph_mode():
        return _C_ops.final_state_arange(start, end, step, dtype,
                                         _current_expected_place())

Z
zyfncg 已提交
1509
    if _in_legacy_dygraph():
J
Jiawei Wang 已提交
1510 1511 1512
        out = _C_ops.range(start, end, step)
        out.stop_gradient = True
        return out
W
whs 已提交
1513

1514 1515 1516
    check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],
                'range/arange')
    helper = LayerHelper('range', **locals())
1517
    out = helper.create_variable_for_type_inference(dtype, shape=out_shape)
W
whs 已提交
1518 1519 1520 1521 1522
    helper.append_op(
        type='range',
        inputs={'Start': start,
                'End': end,
                'Step': step},
1523
        outputs={'Out': out})
1524
    out.stop_gradient = True
1525 1526
    if out_shape is not None:
        out.desc.set_shape(out_shape)
W
whs 已提交
1527
    return out
Z
zhoukunsheng 已提交
1528 1529


1530
def linspace(start, stop, num, dtype=None, name=None):
1531
    r"""
1532
    This OP return fixed number of evenly spaced values within a given interval.
Z
zhoukunsheng 已提交
1533 1534

    Args:
1535 1536 1537 1538
        start(int|float|Tensor): The input :attr:`start` is start variable of range. It is a scalar, \
            or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
        stop(int|float|Tensor): The input :attr:`stop` is start variable of range. It is a scalar, \
            or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
1539
        num(int|Tensor): The input :attr:`num` is given num of the sequence. It is an int scalar, \
1540
            or a Tensor of shape [1] with data type int32.
W
wangchaochaohu 已提交
1541
        dtype(np.dtype|str, optional): The data type of output tensor, it could be
1542
            int32, int64, float32 and float64. Default: if None, the data type is float32.
1543 1544
        name(str, optional): Normally there is no need for user to set this property. 
            For more information, please refer to :ref:`api_guide_Name`.Default: None.
Z
zhoukunsheng 已提交
1545 1546

    Returns:
1547
        Tensor: the output data type will be float32, float64. The 1-D tensor with fixed number of evenly spaced values, \
1548 1549
        the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
        the value with input :attr:`start`. 
Z
zhoukunsheng 已提交
1550

Z
zhoukunsheng 已提交
1551
    Examples:
Z
zhoukunsheng 已提交
1552 1553
        .. code-block:: python

1554 1555 1556
             import paddle
             data = paddle.linspace(0, 10, 5, 'float32') # [0.0,  2.5,  5.0,  7.5, 10.0]
             data = paddle.linspace(0, 10, 1, 'float32') # [0.0]
Z
zhoukunsheng 已提交
1557 1558

    """
1559 1560
    if dtype is None:
        dtype = 'float32'
1561 1562 1563
    tensor_num = num
    tensor_start = start
    tensor_stop = stop
1564 1565
    if not isinstance(num, Variable):
        check_type(num, 'num', (int), 'linspace')
1566 1567
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
Z
zhoukunsheng 已提交
1568
    if not isinstance(start, Variable):
1569 1570
        with device_guard("cpu"):
            tensor_start = fill_constant([1], dtype, start)
Z
zhoukunsheng 已提交
1571
    if not isinstance(stop, Variable):
1572 1573
        with device_guard("cpu"):
            tensor_stop = fill_constant([1], dtype, stop)
Z
zhoukunsheng 已提交
1574
    if not isinstance(num, Variable):
1575 1576
        with device_guard("cpu"):
            tensor_num = fill_constant([1], 'int32', num)
1577
    if _in_legacy_dygraph():
W
wanghuancoder 已提交
1578 1579
        return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype',
                               dtype)
1580 1581 1582
    if in_dygraph_mode():
        return _C_ops.final_state_linspace(tensor_start, tensor_stop,
                                           tensor_num, dtype)
1583 1584
    helper = LayerHelper("linspace", **locals())

1585 1586 1587
    start_dtype = convert_dtype(tensor_start.dtype)
    stop_dtype = convert_dtype(tensor_stop.dtype)
    out_dtype = convert_dtype(dtype)
1588
    if isinstance(start, Variable):
1589 1590
        check_dtype(start.dtype, 'start',
                    ['float32', 'float64', 'int32', 'int64'], 'linspace')
1591 1592
    else:
        check_type(start, 'start', (int, float), 'linspace')
Z
zhoukunsheng 已提交
1593

1594
    if isinstance(stop, Variable):
1595 1596
        check_dtype(stop.dtype, 'stop',
                    ['float32', 'float64', 'int32', 'int64'], 'linspace')
1597 1598 1599 1600 1601 1602
    else:
        check_type(stop, 'stop', (int, float), 'linspace')
    if isinstance(num, Variable):
        check_dtype(num.dtype, 'num', ['int32'], 'linspace')
    check_dtype(dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'],
                'linspace')
1603 1604 1605 1606 1607 1608 1609 1610
    if ((stop_dtype == "float64" or start_dtype == "float64") and
            out_dtype in ["float32", "int32"]) or ((stop_dtype == "int64" or
                                                    start_dtype == "int64") and
                                                   out_dtype == "int32"):
        raise ValueError(
            "The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
            "which may cause data type overflows. Please reset attr(dtype) of linspace."
            .format(start_dtype, stop_dtype, dtype))
1611 1612

    out = helper.create_variable_for_type_inference(dtype=dtype)
Z
zhoukunsheng 已提交
1613 1614 1615

    helper.append_op(
        type='linspace',
1616 1617 1618 1619
        inputs={'Start': tensor_start,
                'Stop': tensor_stop,
                'Num': tensor_num},
        attrs={'dtype': dtype},
Z
zhoukunsheng 已提交
1620
        outputs={'Out': [out]})
1621 1622
    if isinstance(num, int):
        out.desc.set_shape((num, ))
Z
zhoukunsheng 已提交
1623
    return out
1624 1625


Z
zhoukunsheng 已提交
1626 1627
def zeros_like(x, out=None):
    """
1628
    This OP creates a zeros tensor which has identical shape and dtype 
Z
zhoukunsheng 已提交
1629 1630 1631
    with `x`.

    Args:
1632 1633 1634 1635 1636 1637
        x(Variable): The input tensor which specifies shape and dtype, the
            input data dtype could be bool, float32, float64, int32, int64.
        out(Variable, optional): If is :attr:`None` , the op will create the
            variable as output, the data type and shape of this variable will
            be same as input :attr:`x`. If is a tensor, the data type and shape
            need to be same as input :attr:`x`. The default value is :attr:`None` .
Z
zhoukunsheng 已提交
1638 1639

    Returns:
1640 1641 1642
        Variable: The N-D tensor, the element in tensor is related to input
            data type, if the input data type is bool, the output value is
            False, otherwise is zero. The output shape is the same as the input.
Z
zhoukunsheng 已提交
1643 1644 1645 1646

    Examples:
        .. code-block:: python

1647
          import paddle.fluid as fluid
1648
          x = fluid.data(name='x', dtype='float32', shape=[3])
Z
zhoukunsheng 已提交
1649 1650
          data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]

Z
zhoukunsheng 已提交
1651 1652
    """

1653 1654
    check_variable_and_dtype(
        x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like')
Z
zhoukunsheng 已提交
1655 1656 1657
    helper = LayerHelper("zeros_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1658 1659 1660
    else:
        check_variable_and_dtype(
            out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
1661
            'zeros_like')
1662

Z
zhoukunsheng 已提交
1663 1664 1665 1666
    helper.append_op(
        type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]})
    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1667 1668


1669
@deprecated(since="2.0.0", update_to="paddle.diag")
Z
zhoukunsheng 已提交
1670
def diag(diagonal):
1671
    r"""
1672 1673 1674
	:alias_main: paddle.diag
	:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
	:old_api: paddle.fluid.layers.diag
S
swtkiwi 已提交
1675

1676
    This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Z
zhoukunsheng 已提交
1677 1678

    Args:
1679 1680
        diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
            specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Z
zhoukunsheng 已提交
1681 1682

    Returns:
1683 1684
        Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
            the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Z
zhoukunsheng 已提交
1685 1686 1687 1688 1689 1690 1691

    Examples:
        .. code-block:: python

          # [[3, 0, 0]
          #  [0, 4, 0]
          #  [0, 0, 5] 
1692 1693 1694

          import paddle.fluid as fluid
          import numpy as np
1695 1696 1697
          diagonal = np.arange(3, 6, dtype='int32')
          data = fluid.layers.diag(diagonal)
          # diagonal.shape=(3,) data.shape=(3, 3)
Z
zhoukunsheng 已提交
1698 1699

    """
1700 1701 1702
    check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag')
    check_dtype(diagonal.dtype, 'diagonal',
                ['float32', 'float64', 'int32', 'int64'], 'diag')
Z
zhoukunsheng 已提交
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
    helper = LayerHelper("diag", **locals())

    if not isinstance(diagonal, Variable):
        diagonal = assign(diagonal)

    out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)

    helper.append_op(
        type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]})

    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1715 1716


1717 1718 1719 1720 1721
def eye(num_rows,
        num_columns=None,
        batch_shape=None,
        dtype='float32',
        name=None):
1722
    """
1723
    This function constructs a or a batch of 2-D tensor with ones on the diagonal and zeros elsewhere. 
1724 1725 1726

    Args:
        num_rows(int): the number of rows in each batch tensor.
1727 1728
        num_columns(int, optional): the number of columns in each batch tensor.
            If None, default: num_rows.
1729 1730
        batch_shape(list, optional): If provided, the returned tensor will have a leading
            batch size of this shape, the data type of ``batch_shape`` is int. Default is None.
W
wangchaochaohu 已提交
1731
        dtype(np.dtype|str, optional): The data type of the returned tensor.
1732 1733 1734 1735
            It should be int32, int64, float16, float32, float64, default is 'float32'.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
1736 1737

    Returns:
1738
        Tensor: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
1739 1740 1741 1742 1743

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
1744 1745
          data = fluid.layers.eye(3, dtype='int32')
          # [[1, 0, 0]
1746
          #  [0, 1, 0]
1747 1748
          #  [0, 0, 1]]

1749
          data = fluid.layers.eye(2, 3, dtype='int32')
1750
          # [[1, 0, 0]
1751
          #  [0, 1, 0]]
1752 1753

          data = fluid.layers.eye(2, batch_shape=[3])
1754 1755 1756 1757 1758
          # Construct a batch of 3 identity tensors, each 2 x 2.
          # data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.

    """

1759 1760
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
1761 1762 1763 1764 1765
    if num_columns is not None:
        if not isinstance(num_columns, int) or num_columns < 0:
            raise TypeError("num_columns should be a non-negative int")
    else:
        num_columns = num_rows
1766

R
Ruibiao Chen 已提交
1767 1768 1769 1770
    if in_dygraph_mode():
        out = _C_ops.final_state_eye(num_rows, num_columns, dtype,
                                     _current_expected_place())
    elif _in_legacy_dygraph():
W
wanghuancoder 已提交
1771 1772
        out = _C_ops.eye('dtype', dtype, 'num_rows', num_rows, 'num_columns',
                         num_columns)
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
    else:
        helper = LayerHelper("eye", **locals())
        check_dtype(dtype, 'dtype',
                    ['float16', 'float32', 'float64', 'int32', 'int64'], 'eye')
        if not isinstance(num_rows, int) or num_rows < 0:
            raise TypeError("num_rows should be a non-negative int")
        out = helper.create_variable_for_type_inference(dtype=dtype)
        helper.append_op(
            type='eye',
            inputs={},
            outputs={'Out': [out]},
            attrs={
                'num_rows': num_rows,
                'num_columns': num_columns,
                'dtype': dtype
            },
            stop_gradient=True)
1790 1791

    if batch_shape is not None:
1792 1793 1794
        re_shape = [1] * len(batch_shape)
        re_shape = re_shape + [num_rows, num_columns]
        expand_times = batch_shape + [1, 1]
J
Jiabin Yang 已提交
1795
        if _non_static_mode():
W
wanghuancoder 已提交
1796 1797
            out = _C_ops.reshape(out, 'shape', re_shape)
            return _C_ops.expand(out, None, 'expand_times', expand_times)
1798

1799 1800
        if not isinstance(batch_shape, list):
            raise TypeError("batch_shape should be a list")
1801
        for batch_val in (batch_shape):
1802 1803
            if batch_val <= 0:
                raise TypeError("batch_shape should be a positive int list")
1804 1805 1806 1807 1808 1809

        from .nn import reshape, expand
        out = reshape(x=out, shape=re_shape)
        out = expand(x=out, expand_times=expand_times)

    out.stop_gradient = True
1810 1811 1812
    return out


Z
zhoukunsheng 已提交
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
def ones_like(x, out=None):
    """
    **ones_like**

    This function creates a ones tensor which has identical shape and dtype 
    with `x`.

    Args:
        x(Variable): The input tensor which specifies shape and dtype.
        out(Variable): The output tensor.

    Returns:
1825
        out(Variable): The tensor variable storing the output.
Z
zhoukunsheng 已提交
1826 1827 1828 1829 1830 1831 1832 1833 1834 1835

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid

          x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
          data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]

    """
1836 1837
    check_variable_and_dtype(
        x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like')
Z
zhoukunsheng 已提交
1838 1839 1840 1841

    helper = LayerHelper("ones_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1842 1843 1844 1845
    else:
        check_variable_and_dtype(
            out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
            'ones_like')
Z
zhoukunsheng 已提交
1846 1847 1848 1849 1850 1851
    helper.append_op(
        type='fill_any_like',
        inputs={'X': [x]},
        attrs={'value': 1.0},
        outputs={'Out': [out]})
    return out
Y
yaoxuefeng 已提交
1852 1853 1854 1855 1856 1857


@deprecated(since="2.0.0", update_to="paddle.triu")
def triu(input, diagonal=0, name=None):
    import paddle
    return paddle.tensor.triu(x=input, diagonal=diagonal, name=name)