tensor.py 67.3 KB
Newer Older
1
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16

17
import math
18 19 20
import numpy
import warnings

Y
Yu Yang 已提交
21
from ..layer_helper import LayerHelper
22
from ..param_attr import ParamAttr
23
from ..initializer import Initializer
Z
zyfncg 已提交
24
from ..framework import _current_expected_place, convert_np_dtype_to_dtype_, _non_static_mode, _varbase_creator, device_guard, _in_legacy_dygraph, in_dygraph_mode
X
xuwei06 已提交
25
from ..framework import Variable
26
from ..initializer import Constant
27
from ..core import VarDesc
28
from .. import core
29
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
30
from . import utils
31
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
32
from paddle.utils import deprecated
33

34
from .utils import check_shape
W
wanghuancoder 已提交
35
from paddle import _C_ops
Y
Yu Yang 已提交
36 37

__all__ = [
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
    'create_tensor',
    'create_parameter',
    'create_global_var',
    'cast',
    'tensor_array_to_tensor',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
    'argmin',
    'argmax',
    'argsort',
    'ones',
    'zeros',
    'reverse',
    'has_inf',
    'has_nan',
    'isfinite',
    'range',
    'linspace',
    'zeros_like',
    'ones_like',
    'diag',
    'eye',
    'triu',
Y
Yu Yang 已提交
64 65 66
]


X
xuwei06 已提交
67
def create_tensor(dtype, name=None, persistable=False):
68
    """
W
wangchaochaohu 已提交
69
    Create a variable, which will hold a Tensor with data type dtype.
70 71

    Args:
W
wangchaochaohu 已提交
72 73 74 75
        dtype(string|numpy.dtype): the data type of Tensor to be created, the
            data type is bool, float16, float32, float64, int8, int16, int32 and int64.
        name(string, optional): The default value is None.  Normally there is no need for 
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
Q
update  
qiaolongfei 已提交
76
        persistable(bool): Set the persistable flag of the create tensor.
W
wangchaochaohu 已提交
77
            default value is False.
78 79

    Returns:
W
wangchaochaohu 已提交
80
        Variable: The tensor to be created according to dtype.
81 82 83 84

    Examples:
        .. code-block:: python

85
          import paddle.fluid as fluid
86 87
          tensor = fluid.layers.create_tensor(dtype='float32')
    """
88 89 90 91
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int32',
        'int64'
    ], 'create_tensor')
Y
Yu Yang 已提交
92
    helper = LayerHelper("create_tensor", **locals())
X
xuwei06 已提交
93 94
    return helper.create_variable(
        name=helper.name, dtype=dtype, persistable=persistable)
Y
Yu Yang 已提交
95 96


97 98
def create_parameter(shape,
                     dtype,
X
xuwei06 已提交
99
                     name=None,
100 101 102 103
                     attr=None,
                     is_bias=False,
                     default_initializer=None):
    """
104
	:api_attr: Static Graph
S
swtkiwi 已提交
105

106
    This function creates a parameter. The parameter is a learnable variable, which can have
Y
yuyang18 已提交
107 108 109 110 111
    gradient, and can be optimized.

    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.

112 113 114 115 116 117 118
    Parameters:
        shape (list of int): Shape of the parameter
        dtype (str): Data type of the parameter
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
        attr (ParamAttr, optional): Attributes of the parameter
        is_bias (bool, optional): This can affect which default initializer is chosen
119 120 121
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
122
        default_initializer (Initializer, optional): Initializer for the parameter
123 124

    Returns:
125
        The created parameter.
Y
yuyang18 已提交
126 127

    Examples:
128 129
        .. code-block:: python

130 131 132
            import paddle
            paddle.enable_static()
            W = paddle.static.create_parameter(shape=[784, 200], dtype='float32')
133
    """
134 135
    check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter')
    for item in shape:
T
tianshuo78520a 已提交
136 137 138
        check_type(item, 'item of shape',
                   (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                    numpy.int64), 'create_parameter')
139 140 141 142 143 144 145 146 147

    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8'
    ], 'create_parameter')
    check_type(attr, 'attr', (type(None), ParamAttr), 'create_parameter')
    check_type(default_initializer, 'default_initializer',
               (type(None), Initializer), 'create_parameter')

Q
Qiao Longfei 已提交
148
    helper = LayerHelper("create_parameter", **locals())
149
    if attr is None:
X
xuwei06 已提交
150
        attr = ParamAttr(name=name)
151 152
    return helper.create_parameter(attr, shape,
                                   convert_dtype(dtype), is_bias,
153 154 155
                                   default_initializer)


156 157 158 159 160 161 162
def create_global_var(shape,
                      value,
                      dtype,
                      persistable=False,
                      force_cpu=False,
                      name=None):
    """
163
    This function creates a new tensor variable with value in the global block(block 0).
F
fengjiayi 已提交
164

165
    Parameters:
166
        shape (list[int]|tuple[int]): Shape of the variable
167
        value (float): The value of the variable. The new created
F
fengjiayi 已提交
168
                      variable will be filled with it.
169 170
        dtype (str): Data type of the variable
        persistable (bool, optional): If this variable is persistable.
F
fengjiayi 已提交
171
                           Default: False
172
        force_cpu (bool, optional): Force this variable to be on CPU.
F
fengjiayi 已提交
173
                         Default: False
174 175
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
176 177

    Returns:
178
        Variable: The created Variable
F
fengjiayi 已提交
179 180 181 182

    Examples:
        .. code-block:: python

183 184 185
            import paddle
            paddle.enable_static()
            var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
186
                                           persistable=True, force_cpu=True, name='new_var')
187
    """
188 189 190
    check_type(shape, 'shape', (list, tuple, numpy.ndarray),
               'create_global_var')
    for item in shape:
T
tianshuo78520a 已提交
191 192 193
        check_type(item, 'item of shape',
                   (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                    numpy.int64), 'create_global_var')
194 195

    check_dtype(dtype, 'dtype', [
196 197 198 199 200 201 202 203 204 205
        'bool',
        'float16',
        'float32',
        'float64',
        'int8',
        'int16',
        'int32',
        'int64',
        'uint8',
        'uint16',
206 207
    ], 'create_global_var')

Q
Qiao Longfei 已提交
208 209
    helper = LayerHelper("global_var", **locals())
    var = helper.create_global_variable(
M
minqiyang 已提交
210 211 212 213 214
        dtype=dtype,
        shape=shape,
        persistable=persistable,
        name=name,
        stop_gradient=True)
M
minqiyang 已提交
215 216 217
    helper.set_variable_initializer(
        var, initializer=Constant(
            value=float(value), force_cpu=force_cpu))
M
minqiyang 已提交
218

Q
Qiao Longfei 已提交
219 220 221
    return var


222
def cast(x, dtype):
Y
Yu Yang 已提交
223
    """
S
swtkiwi 已提交
224

225
    This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
226 227
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
228 229

    Args:
230
        x(Tensor): An input N-D Tensor with data type bool, float16,
231
            float32, float64, int32, int64, uint8.
232
        dtype(np.dtype|str): Data type of the output:
233
            bool, float16, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
234 235

    Returns:
236
        Tensor: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
237 238 239

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
240

241
            import paddle
242

243 244
            x = paddle.to_tensor([2, 3, 4], 'float64')
            y = paddle.cast(x, 'uint8')
Y
Yu Yang 已提交
245
    """
H
hong 已提交
246 247 248 249 250
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
        return _C_ops.final_state_cast(x, dtype)

J
Jiabin Yang 已提交
251
    if _non_static_mode():
252 253
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
W
wanghuancoder 已提交
254
        out = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
Z
Zhang Ting 已提交
255
        return out
256

257
    check_variable_and_dtype(x, 'x', [
258 259
        'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64',
        'uint8', 'uint16'
260
    ], 'cast')
261
    check_dtype(dtype, 'dtype', [
262 263
        'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8', 'uint16'
264 265 266
    ], 'cast')

    helper = LayerHelper('cast', **locals())
267 268
    out = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=x.stop_gradient)
Y
Yu Yang 已提交
269 270 271 272 273 274 275 276 277
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype,
               'out_dtype': out.dtype})
    return out


278
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
279
    """
280
    This OP concatenates the input along the axis.
281 282

    Args:
283 284
        input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type
            bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type. 
285 286
        axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
            It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64.
287
            The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way
288
            as ``axis+R``. Default is 0.
289 290 291
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
292 293

    Returns:
294
        Tensor: A Tensor with the same data type as ``input``.
295 296 297

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
298

299
            import paddle.fluid as fluid
300 301
            import numpy as np

302 303 304 305 306 307
            in1 = np.array([[1, 2, 3],
                            [4, 5, 6]])
            in2 = np.array([[11, 12, 13],
                            [14, 15, 16]])
            in3 = np.array([[21, 22],
                            [23, 24]])
308 309 310 311
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
312 313
                # When the axis is negative, the real axis is (axis + Rank(x)).
                # As follows, axis is -1, Rank(x) is 2, the real axis is 1
314 315
                out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1, x2], axis=0)
316 317 318 319 320 321 322 323
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
324
    """
325

326 327 328 329 330 331 332 333 334
    if in_dygraph_mode():
        if isinstance(axis, Variable):
            axis = axis.numpy()
            axis = axis.item(0)
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
        return _C_ops.final_state_concat(input, axis)

    if _in_legacy_dygraph():
S
songyouwei 已提交
335 336
        if isinstance(axis, Variable):
            axis = axis.numpy()
337
            axis = axis.item(0)
338 339
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
340 341 342
        out = _varbase_creator()
        _C_ops.concat(input, out, 'axis', axis)
        return out
343

344 345 346 347 348 349 350 351 352 353 354
    check_type(input, 'input', (list, tuple, Variable), 'concat')
    if not isinstance(input, Variable):
        for id, x in enumerate(input):
            check_variable_and_dtype(
                x, 'input[' + str(id) + ']',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'concat')
            if x.dtype != input[0].dtype:
                raise TypeError(
                    "All the Tensors in the input must have the same data type.")
    else:
355
        input = [input]
356
    check_type(axis, 'axis', (int, Variable), 'concat')
357

358 359 360 361 362
    if isinstance(axis, Variable):
        check_dtype(
            axis.dtype, 'axis', ['int32', 'int64'], 'concat',
            "The data type of axis must be int32 or int64 when axis is a Tensor")

363
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
364
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
365 366

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
367 368 369 370
        # NOTE(liym27): Don't remove this if branch!
        # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
        # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.

371
        assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \
372
                "number of the elements must be 1, but received %s." % len(input)
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
        out_index = helper.create_variable_for_type_inference(dtype="int32")
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out],
                     'OutIndex': [out_index]},
            attrs={'axis': axis,
                   'use_stack': False})
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
            inputs['AxisTensor'] = axis
        else:
            attrs['axis'] = axis

        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
Y
Yu Yang 已提交
392 393 394
    return out


G
Guo Sheng 已提交
395
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
396
    r"""
G
Guo Sheng 已提交
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
447 448

    Args:
G
Guo Sheng 已提交
449 450 451 452 453 454 455
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
456 457

    Returns:
G
Guo Sheng 已提交
458 459 460
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
461 462 463 464

    Examples:
        .. code-block:: python

465
            import paddle.fluid as fluid
466
            import numpy as np
G
Guo Sheng 已提交
467 468 469 470 471 472 473
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            array = fluid.layers.create_array(dtype='float32')
            fluid.layers.array_write(x0, i, array)
            fluid.layers.array_write(x1, i + 1, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
474
    """
J
Jiabin Yang 已提交
475
    if _non_static_mode():
476 477 478 479 480 481 482 483 484 485
        assert isinstance(
            input, list), "The 'input' in tensor_array_to_tensor must be list"
        from .nn import stack, concat
        from ..dygraph import to_variable
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
            numpy.array(list(map(lambda x: int(x.shape[axis]), input))))
        return res, sizes

486 487 488 489 490
    check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
    if isinstance(input, list):
        for i, input_x in enumerate(input):
            check_type(input_x, 'input[' + str(i) + ']', Variable,
                       'tensor_array_to_tensor')
L
li099 已提交
491
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
492 493 494
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
    helper.append_op(
L
li099 已提交
495
        type='tensor_array_to_tensor',
L
li099 已提交
496 497 498
        inputs={'X': input},
        outputs={'Out': [out],
                 'OutIndex': [out_index]},
G
Guo Sheng 已提交
499 500
        attrs={'axis': axis,
               'use_stack': use_stack})
L
li099 已提交
501 502 503
    return out, out_index


504
def sums(input, out=None):
505
    r"""
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
527 528

    Args:
529 530 531 532
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
533 534

    Returns:
535 536
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
537 538

    Examples:
F
fengjiayi 已提交
539
        .. code-block:: python
K
kavyasrinet 已提交
540

541 542 543 544 545 546 547 548 549
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
550

551 552
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
553
    """
554 555 556 557
    check_type(input, 'input', (Variable, tuple, list), 'sums')
    if isinstance(input, list) or isinstance(input, tuple):
        for input_section in input:
            check_variable_and_dtype(input_section, "input", \
558
                    ['float16', 'float32', 'float64', 'int32', 'int64'], 'sums')
559 560
    else:
        check_variable_and_dtype(input, "input", \
561
                ['float16', 'float32', 'float64', 'int32', 'int64'], 'sums')
562

Y
Yu Yang 已提交
563 564
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
565 566
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
567 568 569 570
    else:
        check_variable_and_dtype(
            out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums')

T
tensor-tang 已提交
571 572 573 574 575
    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False})
Y
Yu Yang 已提交
576 577 578
    return out


F
fengjiayi 已提交
579
def assign(input, output=None):
580
    """
S
swtkiwi 已提交
581

582
    The OP copies the :attr:`input` to the :attr:`output`.
583

584
    Parameters:
585 586 587 588
        input (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
            or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
            Note: the float64 data will be converted to float32 because of current platform protobuf
            data limitation.
589
        output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
590
            be created as :attr:`output`. Default: None.
591 592

    Returns:
593
        Tensor: A tensor with the same shape, data type and value as :attr:`input`.
594 595 596

    Examples:
        .. code-block:: python
597

598
          import paddle
599
          import numpy as np
600
          data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
601 602 603 604
          array = np.array([[1, 1],
                            [3, 4],
                            [1, 3]]).astype(np.int64)
          result1 = paddle.zeros(shape=[3, 3], dtype='float32')
605 606 607
          paddle.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
          result2 = paddle.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
608
    """
Y
Yu Yang 已提交
609
    helper = LayerHelper('assign', **locals())
610 611
    check_type(input, 'input', (Variable, numpy.ndarray, list, tuple, float,
                                int, bool), 'assign')
612 613
    is_inplace = True if output is not None else False

614 615 616 617
    if numpy.isscalar(input) and not isinstance(input, str):
        input = numpy.array([input])
    elif isinstance(input, (list, tuple)):
        input = numpy.array(input)
618 619
    # NOTE(Aurelius84): Why we judge core.VarBase?
    # In case of @to_static, a VarBase can be as input of `assign`,
J
Jiabin Yang 已提交
620
    # but _non_static_mode()==False under @to_static, which means
621 622 623
    # isinstance(VarBase, Variable) == False. It will cause return None
    # after this api.
    if isinstance(input, (Variable, core.VarBase)):
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
        if _non_static_mode():
            if output is None:
                if _in_legacy_dygraph():
                    output = core.VarBase()
                else:
                    output = core.eager.Tensor()
            _C_ops.assign(input, output)
        else:
            check_dtype(input.dtype, 'input', [
                'float16', 'uint16', 'float32', 'float64', 'int32', 'int64',
                'uint8', 'bool'
            ], 'assign', '(When the type of input in assign is Variable.)')
            if output is None:
                output = helper.create_variable_for_type_inference(
                    dtype=input.dtype)
            helper.append_op(
                type='assign', inputs={'X': [input]},
                outputs={'Out': [output]})
X
xuwei06 已提交
642
    elif isinstance(input, numpy.ndarray):
643 644 645 646 647
        # Not support [var, var, ...] currently.
        if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
            raise TypeError(
                "Required type(input) numpy.ndarray, but found `list(Variable)` in input."
            )
X
xuwei06 已提交
648
        dtype = convert_np_dtype_to_dtype_(input.dtype)
649 650 651 652 653 654 655 656
        if dtype == VarDesc.VarType.FP64:
            # Setting FP64 numpy data is not supported in Paddle, so we
            # use FP32 here
            warnings.warn(
                "paddle.assign doesn't support float64 input now due "
                "to current platform protobuf data limitation, we convert "
                "it to float32")
            dtype = VarDesc.VarType.FP32
657 658
        if dtype == VarDesc.VarType.BOOL:
            value_name = "bool_values"
W
wanghuancoder 已提交
659
            values = [int(v) for v in input.flat]
660
        elif dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
661
            value_name = "fp32_values"
662
            values = [float(v) for v in input.flat]
663
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
664
            value_name = "int32_values"
665
            values = [int(v) for v in input.flat]
666 667 668
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
669
        else:
670 671
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
672
                "the data type of 'input' must be bool, float32, int32 or int64, but "
673
                "received %s." % convert_dtype(dtype))
674 675 676
        if input.size > 1024 * 1024:
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
677 678 679
        if output is None:
            output = helper.create_variable_for_type_inference(
                dtype=input.dtype)
X
xuwei06 已提交
680 681 682 683 684 685
        helper.append_op(
            type='assign_value',
            outputs={'Out': [output]},
            attrs={
                'dtype': dtype,
                'shape': list(input.shape),
686
                value_name: values
X
xuwei06 已提交
687 688
            })

J
Jiabin Yang 已提交
689
    if is_inplace and _non_static_mode():
690
        output._bump_inplace_version()
691

Y
Yu Yang 已提交
692 693 694
    return output


695
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
Y
Yu Yang 已提交
696
    """
S
swtkiwi 已提交
697

W
wangchaochaohu 已提交
698
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
699
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
700

T
tianshuo78520a 已提交
701
    The attribute `stop_gradient` of the created Tensor is set to True.
702 703

    Args:
704 705 706
        shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
            If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
            If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
W
wangchaochaohu 已提交
707
        dtype(np.dtype|str): Data type of the output Tensor which can
708
            be float16, float32, float64, uint8, int16, int32, int64.
709 710 711 712 713 714
        value(bool|float|int|Tensor): The constant value used to initialize 
            the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
        force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
        out(Tensor, optional): Optional output which can be any created 
            Tensor that meets the requirements to store the result of operation.
            if ``out`` is None, a new Tensor will be create to store the result.
715 716
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
717 718

    Returns:
719
        Tensor: Tensor which is created according to shape and dtype.
W
wangchaochaohu 已提交
720

721 722 723
    Examples:
        .. code-block:: python

724
          import paddle.fluid as fluid
725
          # attr shape is a list which doesn't contain  Tensor.
726 727
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
728
          # data1=[[5], [5]] data2=[[5], [5]]
729

730
          # attr shape is a list which contains Tensor.
731
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
732
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
733

734
          # attr shape is a Tensor.
735
          shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
736
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
W
wangchaochaohu 已提交
737
          
738
          # attr value is a Tensor.
W
wangchaochaohu 已提交
739 740
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
741
    """
742

W
wangchaochaohu 已提交
743
    attrs = {'force_cpu': force_cpu}
744
    dtype = convert_dtype(dtype)
745
    if not isinstance(value, Variable):
746
        if dtype in ['uint8', 'int16', 'int32', 'int64']:
W
wangchaochaohu 已提交
747
            attrs['str_value'] = str(int(value))
748
            attrs['value'] = int(value)
W
wangchaochaohu 已提交
749 750
        else:
            attrs['str_value'] = str(float(value))
751
            attrs['value'] = float(value)
752

J
Jiabin Yang 已提交
753
    if _non_static_mode():
754
        shape = utils.convert_shape_to_list(shape)
755 756
        if out is None:
            out = _varbase_creator(dtype=dtype)
W
wangchaochaohu 已提交
757 758

        if isinstance(value, Variable):
759
            if dtype in ['uint8', 'int16', 'int32', 'int64']:
760
                attrs['str_value'] = str(int(value.numpy().item(0)))
W
wangchaochaohu 已提交
761
            else:
762
                attrs['str_value'] = str(float(value.numpy().item(0)))
W
wangchaochaohu 已提交
763

W
wanghuancoder 已提交
764 765 766 767
        _C_ops.fill_constant(out, 'value',
                             float(value), 'force_cpu', force_cpu, 'dtype',
                             out.dtype, 'str_value', attrs['str_value'],
                             'shape', shape)
768 769 770
        out.stop_gradient = True
        return out

771 772 773
    helper = LayerHelper("fill_constant", **locals())
    inputs = {}
    if isinstance(value, Variable):
774 775
        if convert_dtype(value.dtype) != dtype:
            value = cast(value, dtype)
776 777
        inputs['ValueTensor'] = value

778
    check_shape(shape)
779 780
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'uint8', 'int16', 'int32',
781
        'int64', 'complex64', 'complex128'
782
    ], 'fill_constant')
783
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
784

785 786 787 788 789
    if out is not None:
        check_variable_and_dtype(out, 'out', [convert_dtype(dtype)],
                                 'fill_constant')

    helper = LayerHelper("fill_constant", **locals())
790
    utils.get_shape_tensor_inputs(
791
        inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant')
L
liym27 已提交
792

Y
Yu Yang 已提交
793
    if out is None:
X
Xin Pan 已提交
794
        out = helper.create_variable_for_type_inference(dtype=dtype)
L
liym27 已提交
795
    attrs['dtype'] = out.dtype
Y
Yu Yang 已提交
796 797
    helper.append_op(
        type='fill_constant',
L
liym27 已提交
798
        inputs=inputs,
Y
Yu Yang 已提交
799
        outputs={'Out': [out]},
L
liym27 已提交
800
        attrs=attrs,
M
minqiyang 已提交
801
        stop_gradient=True)
Y
Yu Yang 已提交
802 803 804 805
    out.stop_gradient = True
    return out


806
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
Y
yuyang18 已提交
807
@templatedoc()
Y
Yu Yang 已提交
808 809 810 811 812
def fill_constant_batch_size_like(input,
                                  shape,
                                  dtype,
                                  value,
                                  input_dim_idx=0,
G
Guo Sheng 已提交
813 814
                                  output_dim_idx=0,
                                  force_cpu=False):
815
    """
T
tianshuo78520a 已提交
816
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
817 818 819 820
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
821 822

    Args:
W
wangchaochaohu 已提交
823 824 825 826 827 828 829 830 831 832 833
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
        value(float|int): The constant value used to initialize the Tensor to be created. 
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
834
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
835 836

    Returns:
W
wangchaochaohu 已提交
837
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
838 839 840 841 842

    Examples:

        .. code-block:: python

843
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
844
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
845
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
846
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
847

848
    """
849 850 851 852 853 854 855 856 857 858 859 860
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
        out = _C_ops.final_state_full_batch_size_like(
            input, shape, dtype, value, input_dim_idx, output_dim_idx, place)
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
861
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
862
    out = helper.create_variable_for_type_inference(dtype=dtype)
863 864 865 866 867 868
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
869
        'force_cpu': force_cpu
870 871 872 873 874
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
Y
Yu Yang 已提交
875 876 877 878
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
879
        attrs=attrs)
Y
Yu Yang 已提交
880 881 882 883
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
884 885
def argmin(x, axis=0):
    """
886 887 888
	:alias_main: paddle.argmin
	:alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
	:old_api: paddle.fluid.layers.argmin
S
swtkiwi 已提交
889

S
sneaxiy 已提交
890 891
    **argmin**

892 893
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
894 895

    Args:
896 897 898 899 900
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
901

S
sneaxiy 已提交
902
    Returns:
903
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
904

S
sneaxiy 已提交
905 906
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
907

908
            import paddle.fluid as fluid
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
936
    """
937 938 939
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin')
S
sneaxiy 已提交
940
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
941
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
942 943 944 945 946
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
947
    out.stop_gradient = True
S
sneaxiy 已提交
948 949 950 951 952 953 954
    return out


def argmax(x, axis=0):
    """
    **argmax**

955 956
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
957 958

    Args:
959 960 961 962 963
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
964

S
sneaxiy 已提交
965
    Returns:
966
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
967

S
sneaxiy 已提交
968 969
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
970

971
            import paddle.fluid as fluid
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
999
    """
1000 1001 1002
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax')
S
sneaxiy 已提交
1003
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
1004
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
1005 1006 1007 1008 1009
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
1010
    out.stop_gradient = True
S
sneaxiy 已提交
1011 1012 1013
    return out


1014
def argsort(input, axis=-1, descending=False, name=None):
Y
Yibing Liu 已提交
1015
    """
1016 1017 1018
	:alias_main: paddle.argsort
	:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
	:old_api: paddle.fluid.layers.argsort
S
swtkiwi 已提交
1019

1020 1021 1022
    This OP sorts the input along the given axis, and returns sorted output
    data Varibale and its corresponding index Variable with the same shape as
    :attr:`input`.
Y
Yibing Liu 已提交
1023 1024

    Args:
1025 1026 1027 1028 1029
        input(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
1030 1031 1032
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
1033 1034 1035
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
Y
Yibing Liu 已提交
1036 1037

    Returns:
1038 1039 1040
        tuple: A tuple of sorted data Variable(with the same shape and data
        type as input) and the sorted indices(with the same shape as input's
        and with data type int64).
Y
Yibing Liu 已提交
1041 1042 1043 1044

    Examples:
        .. code-block:: python

1045
            import paddle.fluid as fluid
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]]).astype(np.float32)
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argsort(input=x, axis=-1)
                out2 = fluid.layers.argsort(input=x, axis=0)
                out3 = fluid.layers.argsort(input=x, axis=1)
                print(out1[0].numpy())
                # [[[5. 5. 8. 9.]
                #   [0. 0. 1. 7.]
                #   [2. 4. 6. 9.]]
                #  [[2. 2. 4. 5.]
                #   [4. 7. 7. 9.]
                #   [0. 1. 6. 7.]]]
                print(out1[1].numpy())
                # [[[0 3 1 2]
                #   [0 1 2 3]
                #   [2 3 0 1]]
                #  [[1 3 2 0]
                #   [0 1 2 3]
                #   [2 0 3 1]]]
                print(out2[0].numpy())
                # [[[5. 2. 4. 2.]
                #   [0. 0. 1. 7.]
                #   [1. 7. 0. 4.]]
                #  [[5. 8. 9. 5.]
                #   [4. 7. 7. 9.]
                #   [6. 9. 2. 6.]]]
                print(out3[0].numpy())
                # [[[0. 0. 1. 4.]
                #   [5. 8. 2. 5.]
                #   [6. 9. 9. 7.]]
                #  [[1. 2. 0. 2.]
                #   [4. 7. 4. 6.]
                #   [5. 7. 7. 9.]]]
Y
Yibing Liu 已提交
1087
    """
1088 1089 1090
    check_variable_and_dtype(
        input, 'input',
        ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort')
Y
Yibing Liu 已提交
1091
    helper = LayerHelper("argsort", **locals())
X
Xin Pan 已提交
1092 1093 1094 1095
    out = helper.create_variable_for_type_inference(
        dtype=input.dtype, stop_gradient=True)
    ids = helper.create_variable_for_type_inference(
        VarDesc.VarType.INT64, stop_gradient=True)
Y
Yibing Liu 已提交
1096 1097 1098 1099
    helper.append_op(
        type='argsort',
        inputs={'X': input},
        outputs={'Out': out,
1100
                 'Indices': ids},
1101 1102
        attrs={'axis': axis,
               'descending': descending})
Y
Yibing Liu 已提交
1103 1104 1105
    return out, ids


Y
Yang Yu 已提交
1106
def ones(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
1107
    """
1108 1109
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1110

1111
    Parameters:
1112
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of shape is int32 or int64.
W
wangchaochaohu 已提交
1113
        dtype (np.dtype|str): Data type of output Tensor, it supports
1114
            bool, float16, float32, float64, int32 and int64.
1115 1116
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1117
            Default: False.
1118 1119

    Returns:
1120
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
1121 1122 1123 1124

    Examples:
        .. code-block:: python

1125
          import paddle.fluid as fluid
1126 1127 1128 1129 1130
          data0 = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
          
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.ones(shape=shape, dtype='int32') #[[1, 1], [1, 1]]
Y
Yu Yang 已提交
1131 1132 1133 1134
    """
    return fill_constant(value=1.0, **locals())


1135
def zeros(shape, dtype, force_cpu=False, name=None):
Y
Yu Yang 已提交
1136
    """
1137 1138
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1139

1140
    Parameters:
1141
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
W
wangchaochaohu 已提交
1142
        dtype (np.dtype|str): Data type of output Tensor, it supports
1143
            bool, float16, float32, float64, int32 and int64.
1144 1145
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1146
            Default: False.
1147 1148
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
1149 1150

    Returns:
1151
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
1152 1153 1154 1155

    Examples:
        .. code-block:: python

1156
          import paddle.fluid as fluid
1157
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
1158 1159 1160 1161
          
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
Y
Yu Yang 已提交
1162 1163
    """
    return fill_constant(value=0.0, **locals())
1164 1165


F
fengjiayi 已提交
1166 1167
def reverse(x, axis):
    """
1168 1169 1170
	:alias_main: paddle.reverse
	:alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse
	:old_api: paddle.fluid.layers.reverse
S
swtkiwi 已提交
1171

1172
    The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
F
fengjiayi 已提交
1173

1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
    .. code-block:: text

        Case 1:

            Given a LoDTensor:
                x = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
                axis = [0, 1]

            Then:
                output = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]

        Case 2:

            Given a LoDTensorArray:
                x = {[[0, 1], [2, 3]],
                     [[4, 5, 6]],
                     [[7],[8], [9]]}
                axis = 0

            Then:
                output = {[[7],[8], [9]],
                          [[4, 5, 6]],
                          [[0, 1], [2, 3]]}

1198
    Parameters:
1199 1200
        x (Variable): A tensor or LoDTensorArray to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
                      If input is a LoDTensorArray, returns a new reversed LoDTensorArray without changing the internal order of each inner tensor.
1201 1202
        axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
            in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
1203 1204
            will be apply on each axis in the tuple or list. If input is a LoDTensorArray, the value of axis shall be 0, or a
            list [0] or tuple (0, ) with shape [1].
F
fengjiayi 已提交
1205 1206

    Returns:
1207
        Variable: The reversed tensor with the same shape and data type as :attr:`x`.
F
fengjiayi 已提交
1208 1209 1210 1211

    Examples:
        .. code-block:: python

1212
          import paddle.fluid as fluid
1213 1214 1215 1216
          import numpy as np
          data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
          result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
          result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226

          # example of LoDTensorArray
          data1 = fluid.layers.assign(np.array([[0, 1, 2]], dtype='float32'))
          data2 = fluid.layers.assign(np.array([[3, 4, 5]], dtype='float32'))
          tensor_array = fluid.layers.create_array(dtype='float32')
          i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
          fluid.layers.array_write(data1, i, tensor_array)
          fluid.layers.array_write(data2, i+1, tensor_array)

          reversed_tensor_array = fluid.layers.reverse(tensor_array, 0) # {[[3, 4, 5]], [[0, 1, 2]]}
F
fengjiayi 已提交
1227
    """
1228 1229 1230
    check_variable_and_dtype(
        x, 'x', ('float32', 'float64', 'int32', 'int64', 'uint8'), 'reverse')
    check_type(axis, 'axis', (int, tuple, list), 'reverse')
F
fengjiayi 已提交
1231 1232 1233
    if isinstance(axis, int):
        axis = [axis]
    helper = LayerHelper("reverse", **locals())
X
Xin Pan 已提交
1234
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
F
fengjiayi 已提交
1235 1236
    helper.append_op(
        type='reverse',
W
Wu Yi 已提交
1237
        inputs={'X': x},
F
fengjiayi 已提交
1238 1239 1240 1241 1242
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


1243 1244 1245 1246 1247 1248 1249
def save(x, file_path, overwrite=True):
    """
    Saves a variable as a file.

    Args:
        x(variable): The Tensor/LoDTensor to be saved.
        file_path(str): The file path where the variable will be saved.
1250 1251 1252
        overwrite(bool): Whether or not cover the given file when it has already
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
    """
    helper = LayerHelper("save", **locals())
    helper.append_op(
        type="save",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def save_combine(x, file_path, overwrite=True):
    """
    Saves a list of variables into a single file.

    Args:
1268 1269
        x(list): A list of Tensor/LoDTensor variables to be saved together in
                 a single file.
1270
        file_path(str): The file path where variables will be saved.
1271
        overwrite(bool): Whether or not cover the given file when it has already
1272 1273
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1274 1275 1276 1277 1278 1279 1280 1281

    Returns:
        There is no return value.

    Examples:

        .. code-block:: python

1282
            import paddle.fluid as fluid
1283 1284 1285 1286 1287 1288 1289
            v1 = fluid.layers.data(name="data",
                                   shape=(4, 6),
                                   dtype="float32")
            v2 = fluid.layers.data(name="data",
                                   shape=(6, 8, 4),
                                   dtype="float32")
            normed = fluid.layers.save_combine([v1, v2], file_path="output")
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
    """
    helper = LayerHelper("save_combine", **locals())
    helper.append_op(
        type="save_combine",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def load_combine(out, file_path):
    """
T
tianshuo78520a 已提交
1302
    Loads a list of variable from a single file.
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313

    Args:
        out(list): The list of variables to be read from the disk file.
        file_path(str): The path of the disk file.
    """
    helper = LayerHelper("load_combine", **locals())
    helper.append_op(
        type="load_combine",
        inputs={},
        output={"Out": out},
        args={"file_path": file_path})
1314 1315 1316 1317 1318 1319 1320


def has_inf(x):
    """
    Test if any of x contains an infinity number

    Args:
S
Steffy-zxf 已提交
1321
       x (Tensor): The Tensor to be checked.
1322 1323

    Returns:
S
Steffy-zxf 已提交
1324
       Tensor: The tensor storing the output, only a bool value, indicating that whether there is infinity number in x or not.
1325 1326 1327 1328
    
    Examples:
        .. code-block:: python
          
S
Steffy-zxf 已提交
1329 1330
          import paddle
          data = paddle.randn(shape=[4, 32, 32], dtype="float32")
1331
          res = paddle.fluid.layers.has_inf(data)
S
Steffy-zxf 已提交
1332
          # [False]
1333

1334
    """
J
Jiabin Yang 已提交
1335
    if _non_static_mode():
W
wanghuancoder 已提交
1336
        return _C_ops.isinf(x)
S
Steffy-zxf 已提交
1337

1338
    check_type(x, 'x', (Variable), 'has_inf')
1339
    helper = LayerHelper("isinf", **locals())
X
Xin Pan 已提交
1340
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1341 1342 1343 1344 1345 1346 1347 1348 1349
    helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
    return out


def has_nan(x):
    """
    Test if any of x contains a NAN

    Args:
S
Steffy-zxf 已提交
1350
       x (Tensor): The Tensor to be checked.
1351 1352

    Returns:
S
Steffy-zxf 已提交
1353
       Tensor: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
1354 1355 1356 1357
    
    Examples:
        .. code-block:: python
    
S
Steffy-zxf 已提交
1358 1359
          import paddle
          data = paddle.randn(shape=[2,3], dtype="float32")
1360
          res = paddle.fluid.layers.has_nan(data)
S
Steffy-zxf 已提交
1361
          # [False]
1362

1363
    """
J
Jiabin Yang 已提交
1364
    if _non_static_mode():
W
wanghuancoder 已提交
1365
        return _C_ops.isnan(x)
S
Steffy-zxf 已提交
1366

1367
    check_type(x, 'x', (Variable), 'has_nan')
1368
    helper = LayerHelper("isnan", **locals())
X
Xin Pan 已提交
1369
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1370 1371 1372 1373 1374 1375
    helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
    return out


def isfinite(x):
    """
S
swtkiwi 已提交
1376

1377 1378 1379 1380
    Test if any of x contains an infinity/NAN number. If all the elements are finite,
    returns true, else false.

    Args:
N
Noel 已提交
1381
        x(Tensor): The Tensor to be checked.
1382 1383

    Returns:
N
Noel 已提交
1384
        Tensor: The tensor storing the output, contains a bool value.
1385 1386 1387 1388 1389

    Examples:

        .. code-block:: python

N
Noel 已提交
1390 1391 1392 1393 1394 1395
            import paddle

            x = paddle.rand(shape=[4, 6], dtype='float32')
            y = paddle.fluid.layers.isfinite(x)
            print(y)

1396
    """
1397 1398
    check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
                             "isfinite")
1399
    helper = LayerHelper("isfinite", **locals())
1400

1401
    out = helper.create_variable_for_type_inference(dtype='bool')
1402 1403
    helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
    return out
W
whs 已提交
1404 1405


1406
def range(start, end, step, dtype, name=None):
W
whs 已提交
1407
    """
1408
    This OP returns a 1-D Tensor with spaced values within a given interval.
W
whs 已提交
1409

1410 1411
    Values are generated into the half-open interval [``start``, ``end``) with
    the ``step``. (the interval including ``start`` but excluding ``end``).
1412

1413 1414
    If ``dtype`` is float32 or float64, we advise adding a small epsilon to
    ``end`` to avoid floating point rounding errors when comparing against ``end``.
W
whs 已提交
1415

L
Liufang Sang 已提交
1416
    Parameters:
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
        start(float|int|Tensor): Start of interval. The interval includes this
            value. If ``start`` is a Tensor, it is a 1-D Tensor with shape [1],
            with data type int32, int64, float32, float64.
        end(float|int|Tensor): End of interval. The interval does not include
            this value. If ``end`` is a Tensor, it is a 1-D Tensor with shape
            [1], with data type int32, int64, float32, float64.
        step(float|int|Tensor): Spacing between values. For any out, it is
            the istance between two adjacent values, out[i+1] - out[i]. If
            ``step`` is a Tensor, it is a 1-D Tensor with shape [1], with data
            type int32, int64, float32, float64.
        dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of the
            output tensor. Supported data types: int32, int64, float32, float64.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns: 
        Tensor: A 1-D Tensor with values from the interval [``start``, ``end``)
            taken with common difference ``step`` beginning from ``start``. Its
            data type is set by ``dtype``.

    Raises:
        TypeError: If ``dtype`` is not int32, int64, float32, float64.
W
whs 已提交
1440 1441 1442 1443 1444

    examples:

        .. code-block:: python

1445
            import paddle.fluid as fluid
W
whs 已提交
1446

1447 1448
            out1 = fluid.layers.range(0, 10, 2, 'int32')
            # [0, 2, 4, 6, 8]
W
whs 已提交
1449

1450 1451 1452 1453 1454 1455 1456
            start_var = fluid.layers.fill_constant([1], 'int64', 3)
            out2 = fluid.layers.range(start_var, 7, 1, 'int64')
            # [3, 4, 5, 6]

    """
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
1457

W
whs 已提交
1458
    if not isinstance(start, Variable):
1459
        with device_guard("cpu"):
1460
            start = fill_constant([1], dtype, start, force_cpu=True)
1461 1462
    elif start.dtype != dtype:
        start = cast(start, dtype)
1463

W
whs 已提交
1464
    if not isinstance(end, Variable):
1465
        with device_guard("cpu"):
1466
            end = fill_constant([1], dtype, end, force_cpu=True)
1467 1468
    elif end.dtype != dtype:
        end = cast(end, dtype)
1469

W
whs 已提交
1470
    if not isinstance(step, Variable):
1471
        with device_guard("cpu"):
1472
            step = fill_constant([1], dtype, step, force_cpu=True)
1473 1474
    elif step.dtype != dtype:
        step = cast(step, dtype)
W
whs 已提交
1475

Z
zyfncg 已提交
1476 1477 1478 1479
    if in_dygraph_mode():
        return _C_ops.final_state_arange(start, end, step, dtype,
                                         _current_expected_place())

Z
zyfncg 已提交
1480
    if _in_legacy_dygraph():
J
Jiawei Wang 已提交
1481 1482 1483
        out = _C_ops.range(start, end, step)
        out.stop_gradient = True
        return out
W
whs 已提交
1484

W
wanghuancoder 已提交
1485 1486 1487 1488 1489
    out_shape = None
    if not isinstance(start, Variable) and not isinstance(
            end, Variable) and not isinstance(step, Variable):
        out_shape = [int(math.ceil((end - start) / step))]

1490 1491 1492
    check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],
                'range/arange')
    helper = LayerHelper('range', **locals())
1493
    out = helper.create_variable_for_type_inference(dtype, shape=out_shape)
W
whs 已提交
1494 1495 1496 1497 1498
    helper.append_op(
        type='range',
        inputs={'Start': start,
                'End': end,
                'Step': step},
1499
        outputs={'Out': out})
1500
    out.stop_gradient = True
W
whs 已提交
1501
    return out
Z
zhoukunsheng 已提交
1502 1503


1504
def linspace(start, stop, num, dtype=None, name=None):
1505
    r"""
1506
    This OP return fixed number of evenly spaced values within a given interval.
Z
zhoukunsheng 已提交
1507 1508

    Args:
1509 1510 1511 1512
        start(int|float|Tensor): The input :attr:`start` is start variable of range. It is a scalar, \
            or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
        stop(int|float|Tensor): The input :attr:`stop` is start variable of range. It is a scalar, \
            or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
1513
        num(int|Tensor): The input :attr:`num` is given num of the sequence. It is an int scalar, \
1514
            or a Tensor of shape [1] with data type int32.
W
wangchaochaohu 已提交
1515
        dtype(np.dtype|str, optional): The data type of output tensor, it could be
1516
            int32, int64, float32 and float64. Default: if None, the data type is float32.
1517 1518
        name(str, optional): Normally there is no need for user to set this property. 
            For more information, please refer to :ref:`api_guide_Name`.Default: None.
Z
zhoukunsheng 已提交
1519 1520

    Returns:
1521
        Tensor: the output data type will be float32, float64. The 1-D tensor with fixed number of evenly spaced values, \
1522 1523
        the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
        the value with input :attr:`start`. 
Z
zhoukunsheng 已提交
1524

Z
zhoukunsheng 已提交
1525
    Examples:
Z
zhoukunsheng 已提交
1526 1527
        .. code-block:: python

1528 1529 1530
             import paddle
             data = paddle.linspace(0, 10, 5, 'float32') # [0.0,  2.5,  5.0,  7.5, 10.0]
             data = paddle.linspace(0, 10, 1, 'float32') # [0.0]
Z
zhoukunsheng 已提交
1531 1532

    """
1533 1534
    if dtype is None:
        dtype = 'float32'
1535 1536 1537
    tensor_num = num
    tensor_start = start
    tensor_stop = stop
1538 1539
    if not isinstance(num, Variable):
        check_type(num, 'num', (int), 'linspace')
1540 1541
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
Z
zhoukunsheng 已提交
1542
    if not isinstance(start, Variable):
1543 1544
        with device_guard("cpu"):
            tensor_start = fill_constant([1], dtype, start)
Z
zhoukunsheng 已提交
1545
    if not isinstance(stop, Variable):
1546 1547
        with device_guard("cpu"):
            tensor_stop = fill_constant([1], dtype, stop)
Z
zhoukunsheng 已提交
1548
    if not isinstance(num, Variable):
1549 1550
        with device_guard("cpu"):
            tensor_num = fill_constant([1], 'int32', num)
1551
    if _in_legacy_dygraph():
W
wanghuancoder 已提交
1552 1553
        return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype',
                               dtype)
1554 1555 1556
    if in_dygraph_mode():
        return _C_ops.final_state_linspace(tensor_start, tensor_stop,
                                           tensor_num, dtype)
1557 1558
    helper = LayerHelper("linspace", **locals())

1559 1560 1561
    start_dtype = convert_dtype(tensor_start.dtype)
    stop_dtype = convert_dtype(tensor_stop.dtype)
    out_dtype = convert_dtype(dtype)
1562
    if isinstance(start, Variable):
1563 1564
        check_dtype(start.dtype, 'start',
                    ['float32', 'float64', 'int32', 'int64'], 'linspace')
1565 1566
    else:
        check_type(start, 'start', (int, float), 'linspace')
Z
zhoukunsheng 已提交
1567

1568
    if isinstance(stop, Variable):
1569 1570
        check_dtype(stop.dtype, 'stop',
                    ['float32', 'float64', 'int32', 'int64'], 'linspace')
1571 1572 1573 1574 1575 1576
    else:
        check_type(stop, 'stop', (int, float), 'linspace')
    if isinstance(num, Variable):
        check_dtype(num.dtype, 'num', ['int32'], 'linspace')
    check_dtype(dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'],
                'linspace')
1577 1578 1579 1580 1581 1582 1583 1584
    if ((stop_dtype == "float64" or start_dtype == "float64") and
            out_dtype in ["float32", "int32"]) or ((stop_dtype == "int64" or
                                                    start_dtype == "int64") and
                                                   out_dtype == "int32"):
        raise ValueError(
            "The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
            "which may cause data type overflows. Please reset attr(dtype) of linspace."
            .format(start_dtype, stop_dtype, dtype))
1585 1586

    out = helper.create_variable_for_type_inference(dtype=dtype)
Z
zhoukunsheng 已提交
1587 1588 1589

    helper.append_op(
        type='linspace',
1590 1591 1592 1593
        inputs={'Start': tensor_start,
                'Stop': tensor_stop,
                'Num': tensor_num},
        attrs={'dtype': dtype},
Z
zhoukunsheng 已提交
1594
        outputs={'Out': [out]})
1595 1596
    if isinstance(num, int):
        out.desc.set_shape((num, ))
Z
zhoukunsheng 已提交
1597
    return out
1598 1599


Z
zhoukunsheng 已提交
1600 1601
def zeros_like(x, out=None):
    """
1602
    This OP creates a zeros tensor which has identical shape and dtype 
Z
zhoukunsheng 已提交
1603 1604 1605
    with `x`.

    Args:
1606 1607 1608 1609 1610 1611
        x(Variable): The input tensor which specifies shape and dtype, the
            input data dtype could be bool, float32, float64, int32, int64.
        out(Variable, optional): If is :attr:`None` , the op will create the
            variable as output, the data type and shape of this variable will
            be same as input :attr:`x`. If is a tensor, the data type and shape
            need to be same as input :attr:`x`. The default value is :attr:`None` .
Z
zhoukunsheng 已提交
1612 1613

    Returns:
1614 1615 1616
        Variable: The N-D tensor, the element in tensor is related to input
            data type, if the input data type is bool, the output value is
            False, otherwise is zero. The output shape is the same as the input.
Z
zhoukunsheng 已提交
1617 1618 1619 1620

    Examples:
        .. code-block:: python

1621
          import paddle.fluid as fluid
1622
          x = fluid.data(name='x', dtype='float32', shape=[3])
Z
zhoukunsheng 已提交
1623 1624
          data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]

Z
zhoukunsheng 已提交
1625 1626
    """

1627 1628
    check_variable_and_dtype(
        x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like')
Z
zhoukunsheng 已提交
1629 1630 1631
    helper = LayerHelper("zeros_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1632 1633 1634
    else:
        check_variable_and_dtype(
            out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
1635
            'zeros_like')
1636

Z
zhoukunsheng 已提交
1637 1638 1639 1640
    helper.append_op(
        type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]})
    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1641 1642


1643
@deprecated(since="2.0.0", update_to="paddle.diag")
Z
zhoukunsheng 已提交
1644
def diag(diagonal):
1645
    r"""
1646 1647 1648
	:alias_main: paddle.diag
	:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
	:old_api: paddle.fluid.layers.diag
S
swtkiwi 已提交
1649

1650
    This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Z
zhoukunsheng 已提交
1651 1652

    Args:
1653 1654
        diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
            specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Z
zhoukunsheng 已提交
1655 1656

    Returns:
1657 1658
        Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
            the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Z
zhoukunsheng 已提交
1659 1660 1661 1662 1663 1664 1665

    Examples:
        .. code-block:: python

          # [[3, 0, 0]
          #  [0, 4, 0]
          #  [0, 0, 5] 
1666 1667 1668

          import paddle.fluid as fluid
          import numpy as np
1669 1670 1671
          diagonal = np.arange(3, 6, dtype='int32')
          data = fluid.layers.diag(diagonal)
          # diagonal.shape=(3,) data.shape=(3, 3)
Z
zhoukunsheng 已提交
1672 1673

    """
1674 1675 1676
    check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag')
    check_dtype(diagonal.dtype, 'diagonal',
                ['float32', 'float64', 'int32', 'int64'], 'diag')
Z
zhoukunsheng 已提交
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
    helper = LayerHelper("diag", **locals())

    if not isinstance(diagonal, Variable):
        diagonal = assign(diagonal)

    out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)

    helper.append_op(
        type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]})

    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1689 1690


1691 1692 1693 1694 1695
def eye(num_rows,
        num_columns=None,
        batch_shape=None,
        dtype='float32',
        name=None):
1696
    """
1697
    This function constructs a or a batch of 2-D tensor with ones on the diagonal and zeros elsewhere. 
1698 1699 1700

    Args:
        num_rows(int): the number of rows in each batch tensor.
1701 1702
        num_columns(int, optional): the number of columns in each batch tensor.
            If None, default: num_rows.
1703 1704
        batch_shape(list, optional): If provided, the returned tensor will have a leading
            batch size of this shape, the data type of ``batch_shape`` is int. Default is None.
W
wangchaochaohu 已提交
1705
        dtype(np.dtype|str, optional): The data type of the returned tensor.
1706 1707 1708 1709
            It should be int32, int64, float16, float32, float64, default is 'float32'.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
1710 1711

    Returns:
1712
        Tensor: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
1713 1714 1715 1716 1717

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
1718 1719
          data = fluid.layers.eye(3, dtype='int32')
          # [[1, 0, 0]
1720
          #  [0, 1, 0]
1721 1722
          #  [0, 0, 1]]

1723
          data = fluid.layers.eye(2, 3, dtype='int32')
1724
          # [[1, 0, 0]
1725
          #  [0, 1, 0]]
1726 1727

          data = fluid.layers.eye(2, batch_shape=[3])
1728 1729 1730 1731 1732
          # Construct a batch of 3 identity tensors, each 2 x 2.
          # data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.

    """

1733 1734
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
1735 1736 1737 1738 1739
    if num_columns is not None:
        if not isinstance(num_columns, int) or num_columns < 0:
            raise TypeError("num_columns should be a non-negative int")
    else:
        num_columns = num_rows
1740

R
Ruibiao Chen 已提交
1741 1742 1743 1744
    if in_dygraph_mode():
        out = _C_ops.final_state_eye(num_rows, num_columns, dtype,
                                     _current_expected_place())
    elif _in_legacy_dygraph():
W
wanghuancoder 已提交
1745 1746
        out = _C_ops.eye('dtype', dtype, 'num_rows', num_rows, 'num_columns',
                         num_columns)
1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
    else:
        helper = LayerHelper("eye", **locals())
        check_dtype(dtype, 'dtype',
                    ['float16', 'float32', 'float64', 'int32', 'int64'], 'eye')
        if not isinstance(num_rows, int) or num_rows < 0:
            raise TypeError("num_rows should be a non-negative int")
        out = helper.create_variable_for_type_inference(dtype=dtype)
        helper.append_op(
            type='eye',
            inputs={},
            outputs={'Out': [out]},
            attrs={
                'num_rows': num_rows,
                'num_columns': num_columns,
                'dtype': dtype
            },
            stop_gradient=True)
1764 1765

    if batch_shape is not None:
1766 1767 1768
        re_shape = [1] * len(batch_shape)
        re_shape = re_shape + [num_rows, num_columns]
        expand_times = batch_shape + [1, 1]
J
Jiabin Yang 已提交
1769
        if _non_static_mode():
W
wanghuancoder 已提交
1770 1771
            out = _C_ops.reshape(out, 'shape', re_shape)
            return _C_ops.expand(out, None, 'expand_times', expand_times)
1772

1773 1774
        if not isinstance(batch_shape, list):
            raise TypeError("batch_shape should be a list")
1775
        for batch_val in (batch_shape):
1776 1777
            if batch_val <= 0:
                raise TypeError("batch_shape should be a positive int list")
1778 1779 1780 1781 1782 1783

        from .nn import reshape, expand
        out = reshape(x=out, shape=re_shape)
        out = expand(x=out, expand_times=expand_times)

    out.stop_gradient = True
1784 1785 1786
    return out


Z
zhoukunsheng 已提交
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
def ones_like(x, out=None):
    """
    **ones_like**

    This function creates a ones tensor which has identical shape and dtype 
    with `x`.

    Args:
        x(Variable): The input tensor which specifies shape and dtype.
        out(Variable): The output tensor.

    Returns:
1799
        out(Variable): The tensor variable storing the output.
Z
zhoukunsheng 已提交
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid

          x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
          data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]

    """
1810 1811
    check_variable_and_dtype(
        x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like')
Z
zhoukunsheng 已提交
1812 1813 1814 1815

    helper = LayerHelper("ones_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1816 1817 1818 1819
    else:
        check_variable_and_dtype(
            out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
            'ones_like')
Z
zhoukunsheng 已提交
1820 1821 1822 1823 1824 1825
    helper.append_op(
        type='fill_any_like',
        inputs={'X': [x]},
        attrs={'value': 1.0},
        outputs={'Out': [out]})
    return out
Y
yaoxuefeng 已提交
1826 1827 1828 1829 1830 1831


@deprecated(since="2.0.0", update_to="paddle.triu")
def triu(input, diagonal=0, name=None):
    import paddle
    return paddle.tensor.triu(x=input, diagonal=diagonal, name=name)