tensor.py 68.1 KB
Newer Older
1
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16

17
import math
18 19 20
import numpy
import warnings

Y
Yu Yang 已提交
21
from ..layer_helper import LayerHelper
22
from ..param_attr import ParamAttr
23
from ..initializer import Initializer
24
from ..framework import _current_expected_place, convert_np_dtype_to_dtype_, _non_static_mode, _varbase_creator, device_guard, _in_legacy_dygraph, in_dygraph_mode, _get_paddle_place
X
xuwei06 已提交
25
from ..framework import Variable
26
from ..initializer import Constant
27
from ..core import VarDesc
28
from .. import core
29
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
30
from . import utils
31
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
32
from paddle.utils import deprecated
33

34
from .utils import check_shape
W
wanghuancoder 已提交
35
from paddle import _C_ops
Y
Yu Yang 已提交
36 37

__all__ = [
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
    'create_tensor',
    'create_parameter',
    'create_global_var',
    'cast',
    'tensor_array_to_tensor',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
    'argmin',
    'argmax',
    'argsort',
    'ones',
    'zeros',
    'reverse',
    'has_inf',
    'has_nan',
    'isfinite',
    'range',
    'linspace',
    'zeros_like',
    'ones_like',
    'diag',
    'eye',
    'triu',
Y
Yu Yang 已提交
64 65 66
]


X
xuwei06 已提交
67
def create_tensor(dtype, name=None, persistable=False):
68
    """
W
wangchaochaohu 已提交
69
    Create a variable, which will hold a Tensor with data type dtype.
70 71

    Args:
W
wangchaochaohu 已提交
72 73 74 75
        dtype(string|numpy.dtype): the data type of Tensor to be created, the
            data type is bool, float16, float32, float64, int8, int16, int32 and int64.
        name(string, optional): The default value is None.  Normally there is no need for 
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
Q
update  
qiaolongfei 已提交
76
        persistable(bool): Set the persistable flag of the create tensor.
W
wangchaochaohu 已提交
77
            default value is False.
78 79

    Returns:
W
wangchaochaohu 已提交
80
        Variable: The tensor to be created according to dtype.
81 82 83 84

    Examples:
        .. code-block:: python

85
          import paddle.fluid as fluid
86 87
          tensor = fluid.layers.create_tensor(dtype='float32')
    """
88 89 90 91
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int32',
        'int64'
    ], 'create_tensor')
Y
Yu Yang 已提交
92
    helper = LayerHelper("create_tensor", **locals())
X
xuwei06 已提交
93 94
    return helper.create_variable(
        name=helper.name, dtype=dtype, persistable=persistable)
Y
Yu Yang 已提交
95 96


97 98
def create_parameter(shape,
                     dtype,
X
xuwei06 已提交
99
                     name=None,
100 101 102 103
                     attr=None,
                     is_bias=False,
                     default_initializer=None):
    """
104
	:api_attr: Static Graph
S
swtkiwi 已提交
105

106
    This function creates a parameter. The parameter is a learnable variable, which can have
Y
yuyang18 已提交
107 108 109 110 111
    gradient, and can be optimized.

    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.

112 113 114 115 116 117 118
    Parameters:
        shape (list of int): Shape of the parameter
        dtype (str): Data type of the parameter
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
        attr (ParamAttr, optional): Attributes of the parameter
        is_bias (bool, optional): This can affect which default initializer is chosen
119 120 121
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
122
        default_initializer (Initializer, optional): Initializer for the parameter
123 124

    Returns:
125
        The created parameter.
Y
yuyang18 已提交
126 127

    Examples:
128 129
        .. code-block:: python

130 131 132
            import paddle
            paddle.enable_static()
            W = paddle.static.create_parameter(shape=[784, 200], dtype='float32')
133
    """
134 135
    check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter')
    for item in shape:
T
tianshuo78520a 已提交
136 137 138
        check_type(item, 'item of shape',
                   (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                    numpy.int64), 'create_parameter')
139 140 141 142 143 144 145 146 147

    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8'
    ], 'create_parameter')
    check_type(attr, 'attr', (type(None), ParamAttr), 'create_parameter')
    check_type(default_initializer, 'default_initializer',
               (type(None), Initializer), 'create_parameter')

Q
Qiao Longfei 已提交
148
    helper = LayerHelper("create_parameter", **locals())
149
    if attr is None:
X
xuwei06 已提交
150
        attr = ParamAttr(name=name)
151 152
    return helper.create_parameter(attr, shape,
                                   convert_dtype(dtype), is_bias,
153 154 155
                                   default_initializer)


156 157 158 159 160 161 162
def create_global_var(shape,
                      value,
                      dtype,
                      persistable=False,
                      force_cpu=False,
                      name=None):
    """
163
    This function creates a new tensor variable with value in the global block(block 0).
F
fengjiayi 已提交
164

165
    Parameters:
166
        shape (list[int]|tuple[int]): Shape of the variable
167
        value (float): The value of the variable. The new created
F
fengjiayi 已提交
168
                      variable will be filled with it.
169 170
        dtype (str): Data type of the variable
        persistable (bool, optional): If this variable is persistable.
F
fengjiayi 已提交
171
                           Default: False
172
        force_cpu (bool, optional): Force this variable to be on CPU.
F
fengjiayi 已提交
173
                         Default: False
174 175
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
176 177

    Returns:
178
        Variable: The created Variable
F
fengjiayi 已提交
179 180 181 182

    Examples:
        .. code-block:: python

183 184 185
            import paddle
            paddle.enable_static()
            var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
186
                                           persistable=True, force_cpu=True, name='new_var')
187
    """
188 189 190
    check_type(shape, 'shape', (list, tuple, numpy.ndarray),
               'create_global_var')
    for item in shape:
T
tianshuo78520a 已提交
191 192 193
        check_type(item, 'item of shape',
                   (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                    numpy.int64), 'create_global_var')
194 195

    check_dtype(dtype, 'dtype', [
196 197 198 199 200 201 202 203 204 205
        'bool',
        'float16',
        'float32',
        'float64',
        'int8',
        'int16',
        'int32',
        'int64',
        'uint8',
        'uint16',
206 207
    ], 'create_global_var')

Q
Qiao Longfei 已提交
208 209
    helper = LayerHelper("global_var", **locals())
    var = helper.create_global_variable(
M
minqiyang 已提交
210 211 212 213 214
        dtype=dtype,
        shape=shape,
        persistable=persistable,
        name=name,
        stop_gradient=True)
M
minqiyang 已提交
215 216 217
    helper.set_variable_initializer(
        var, initializer=Constant(
            value=float(value), force_cpu=force_cpu))
M
minqiyang 已提交
218

Q
Qiao Longfei 已提交
219 220 221
    return var


222
def cast(x, dtype):
Y
Yu Yang 已提交
223
    """
S
swtkiwi 已提交
224

225
    This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
226 227
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
228 229

    Args:
230
        x(Tensor): An input N-D Tensor with data type bool, float16,
231
            float32, float64, int32, int64, uint8.
232
        dtype(np.dtype|str): Data type of the output:
233
            bool, float16, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
234 235

    Returns:
236
        Tensor: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
237 238 239

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
240

241
            import paddle
242

243 244
            x = paddle.to_tensor([2, 3, 4], 'float64')
            y = paddle.cast(x, 'uint8')
Y
Yu Yang 已提交
245
    """
H
hong 已提交
246 247 248 249 250
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
        return _C_ops.final_state_cast(x, dtype)

J
Jiabin Yang 已提交
251
    if _non_static_mode():
252 253
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
W
wanghuancoder 已提交
254
        out = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
Z
Zhang Ting 已提交
255
        return out
256

257
    check_variable_and_dtype(x, 'x', [
258 259
        'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64',
        'uint8', 'uint16'
260
    ], 'cast')
261
    check_dtype(dtype, 'dtype', [
262 263
        'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8', 'uint16'
264 265 266
    ], 'cast')

    helper = LayerHelper('cast', **locals())
267 268
    out = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=x.stop_gradient)
Y
Yu Yang 已提交
269 270 271 272 273 274 275 276 277
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype,
               'out_dtype': out.dtype})
    return out


278
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
279
    """
280
    This OP concatenates the input along the axis.
281 282

    Args:
283 284
        input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type
            bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type. 
285 286
        axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
            It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64.
287
            The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way
288
            as ``axis+R``. Default is 0.
289 290 291
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
292 293

    Returns:
294
        Tensor: A Tensor with the same data type as ``input``.
295 296 297

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
298

299
            import paddle.fluid as fluid
300 301
            import numpy as np

302 303 304 305 306 307
            in1 = np.array([[1, 2, 3],
                            [4, 5, 6]])
            in2 = np.array([[11, 12, 13],
                            [14, 15, 16]])
            in3 = np.array([[21, 22],
                            [23, 24]])
308 309 310 311
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
312 313
                # When the axis is negative, the real axis is (axis + Rank(x)).
                # As follows, axis is -1, Rank(x) is 2, the real axis is 1
314 315
                out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1, x2], axis=0)
316 317 318 319 320 321 322 323
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
324
    """
325

326 327 328 329 330 331 332 333 334
    if in_dygraph_mode():
        if isinstance(axis, Variable):
            axis = axis.numpy()
            axis = axis.item(0)
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
        return _C_ops.final_state_concat(input, axis)

    if _in_legacy_dygraph():
S
songyouwei 已提交
335 336
        if isinstance(axis, Variable):
            axis = axis.numpy()
337
            axis = axis.item(0)
338 339
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
340 341 342
        out = _varbase_creator()
        _C_ops.concat(input, out, 'axis', axis)
        return out
343

344 345 346 347 348 349 350 351 352 353 354
    check_type(input, 'input', (list, tuple, Variable), 'concat')
    if not isinstance(input, Variable):
        for id, x in enumerate(input):
            check_variable_and_dtype(
                x, 'input[' + str(id) + ']',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'concat')
            if x.dtype != input[0].dtype:
                raise TypeError(
                    "All the Tensors in the input must have the same data type.")
    else:
355
        input = [input]
356
    check_type(axis, 'axis', (int, Variable), 'concat')
357

358 359 360 361 362
    if isinstance(axis, Variable):
        check_dtype(
            axis.dtype, 'axis', ['int32', 'int64'], 'concat',
            "The data type of axis must be int32 or int64 when axis is a Tensor")

363
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
364
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
365 366

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
367 368 369 370
        # NOTE(liym27): Don't remove this if branch!
        # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
        # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.

371
        assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \
372
                "number of the elements must be 1, but received %s." % len(input)
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
        out_index = helper.create_variable_for_type_inference(dtype="int32")
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out],
                     'OutIndex': [out_index]},
            attrs={'axis': axis,
                   'use_stack': False})
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
            inputs['AxisTensor'] = axis
        else:
            attrs['axis'] = axis

        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
Y
Yu Yang 已提交
392 393 394
    return out


G
Guo Sheng 已提交
395
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
396
    r"""
G
Guo Sheng 已提交
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
447 448

    Args:
G
Guo Sheng 已提交
449 450 451 452 453 454 455
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
456 457

    Returns:
G
Guo Sheng 已提交
458 459 460
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
461 462 463 464

    Examples:
        .. code-block:: python

465
            import paddle.fluid as fluid
466
            import numpy as np
G
Guo Sheng 已提交
467 468 469 470 471 472 473
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            array = fluid.layers.create_array(dtype='float32')
            fluid.layers.array_write(x0, i, array)
            fluid.layers.array_write(x1, i + 1, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
474
    """
J
Jiabin Yang 已提交
475
    if _non_static_mode():
476 477 478 479 480 481 482 483 484 485
        assert isinstance(
            input, list), "The 'input' in tensor_array_to_tensor must be list"
        from .nn import stack, concat
        from ..dygraph import to_variable
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
            numpy.array(list(map(lambda x: int(x.shape[axis]), input))))
        return res, sizes

486 487 488 489 490
    check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
    if isinstance(input, list):
        for i, input_x in enumerate(input):
            check_type(input_x, 'input[' + str(i) + ']', Variable,
                       'tensor_array_to_tensor')
L
li099 已提交
491
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
492 493 494
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
    helper.append_op(
L
li099 已提交
495
        type='tensor_array_to_tensor',
L
li099 已提交
496 497 498
        inputs={'X': input},
        outputs={'Out': [out],
                 'OutIndex': [out_index]},
G
Guo Sheng 已提交
499 500
        attrs={'axis': axis,
               'use_stack': use_stack})
L
li099 已提交
501 502 503
    return out, out_index


504
def sums(input, out=None):
505
    r"""
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
527 528

    Args:
529 530 531 532
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
533 534

    Returns:
535 536
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
537 538

    Examples:
F
fengjiayi 已提交
539
        .. code-block:: python
K
kavyasrinet 已提交
540

541 542 543 544 545 546 547 548 549
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
550

551 552
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
553
    """
554 555 556 557
    check_type(input, 'input', (Variable, tuple, list), 'sums')
    if isinstance(input, list) or isinstance(input, tuple):
        for input_section in input:
            check_variable_and_dtype(input_section, "input", \
558
                    ['float16', 'float32', 'float64', 'int32', 'int64'], 'sums')
559 560
    else:
        check_variable_and_dtype(input, "input", \
561
                ['float16', 'float32', 'float64', 'int32', 'int64'], 'sums')
562

Y
Yu Yang 已提交
563 564
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
565 566
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
567 568 569 570
    else:
        check_variable_and_dtype(
            out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums')

T
tensor-tang 已提交
571 572 573 574 575
    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False})
Y
Yu Yang 已提交
576 577 578
    return out


F
fengjiayi 已提交
579
def assign(input, output=None):
580
    """
S
swtkiwi 已提交
581

582
    The OP copies the :attr:`input` to the :attr:`output`.
583

584
    Parameters:
585 586 587 588
        input (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
            or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
            Note: the float64 data will be converted to float32 because of current platform protobuf
            data limitation.
589
        output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
590
            be created as :attr:`output`. Default: None.
591 592

    Returns:
593
        Tensor: A tensor with the same shape, data type and value as :attr:`input`.
594 595 596

    Examples:
        .. code-block:: python
597

598
          import paddle
599
          import numpy as np
600
          data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
601 602 603 604
          array = np.array([[1, 1],
                            [3, 4],
                            [1, 3]]).astype(np.int64)
          result1 = paddle.zeros(shape=[3, 3], dtype='float32')
605 606 607
          paddle.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
          result2 = paddle.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
608
    """
Y
Yu Yang 已提交
609
    helper = LayerHelper('assign', **locals())
610 611
    check_type(input, 'input', (Variable, numpy.ndarray, list, tuple, float,
                                int, bool), 'assign')
612 613
    is_inplace = True if output is not None else False

614 615 616 617
    if numpy.isscalar(input) and not isinstance(input, str):
        input = numpy.array([input])
    elif isinstance(input, (list, tuple)):
        input = numpy.array(input)
618 619
    # NOTE(Aurelius84): Why we judge core.VarBase?
    # In case of @to_static, a VarBase can be as input of `assign`,
J
Jiabin Yang 已提交
620
    # but _non_static_mode()==False under @to_static, which means
621 622 623
    # isinstance(VarBase, Variable) == False. It will cause return None
    # after this api.
    if isinstance(input, (Variable, core.VarBase)):
624
        if _non_static_mode():
C
chentianyu03 已提交
625 626 627 628 629 630 631 632 633
            if in_dygraph_mode() and output is None:
                output = _C_ops.final_state_assign(input)
            else:
                if output is None:
                    if _in_legacy_dygraph():
                        output = core.VarBase()
                    else:
                        output = core.eager.Tensor()
                _C_ops.assign(input, output)
634 635 636 637 638 639 640 641 642 643 644
        else:
            check_dtype(input.dtype, 'input', [
                'float16', 'uint16', 'float32', 'float64', 'int32', 'int64',
                'uint8', 'bool'
            ], 'assign', '(When the type of input in assign is Variable.)')
            if output is None:
                output = helper.create_variable_for_type_inference(
                    dtype=input.dtype)
            helper.append_op(
                type='assign', inputs={'X': [input]},
                outputs={'Out': [output]})
X
xuwei06 已提交
645
    elif isinstance(input, numpy.ndarray):
646 647 648 649 650
        # Not support [var, var, ...] currently.
        if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
            raise TypeError(
                "Required type(input) numpy.ndarray, but found `list(Variable)` in input."
            )
X
xuwei06 已提交
651
        dtype = convert_np_dtype_to_dtype_(input.dtype)
652 653 654 655 656 657 658 659
        if dtype == VarDesc.VarType.FP64:
            # Setting FP64 numpy data is not supported in Paddle, so we
            # use FP32 here
            warnings.warn(
                "paddle.assign doesn't support float64 input now due "
                "to current platform protobuf data limitation, we convert "
                "it to float32")
            dtype = VarDesc.VarType.FP32
660 661
        if dtype == VarDesc.VarType.BOOL:
            value_name = "bool_values"
W
wanghuancoder 已提交
662
            values = [int(v) for v in input.flat]
663
        elif dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
664
            value_name = "fp32_values"
665
            values = [float(v) for v in input.flat]
666
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
667
            value_name = "int32_values"
668
            values = [int(v) for v in input.flat]
669 670 671
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
672
        else:
673 674
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
675
                "the data type of 'input' must be bool, float32, int32 or int64, but "
676
                "received %s." % convert_dtype(dtype))
677 678 679
        if input.size > 1024 * 1024:
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
680 681 682
        if output is None:
            output = helper.create_variable_for_type_inference(
                dtype=input.dtype)
X
xuwei06 已提交
683 684 685 686 687 688
        helper.append_op(
            type='assign_value',
            outputs={'Out': [output]},
            attrs={
                'dtype': dtype,
                'shape': list(input.shape),
689
                value_name: values
X
xuwei06 已提交
690 691
            })

J
Jiabin Yang 已提交
692
    if is_inplace and _non_static_mode():
693
        output._bump_inplace_version()
694

Y
Yu Yang 已提交
695 696 697
    return output


698
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
Y
Yu Yang 已提交
699
    """
S
swtkiwi 已提交
700

W
wangchaochaohu 已提交
701
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
702
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
703

T
tianshuo78520a 已提交
704
    The attribute `stop_gradient` of the created Tensor is set to True.
705 706

    Args:
707 708 709
        shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
            If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
            If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
W
wangchaochaohu 已提交
710
        dtype(np.dtype|str): Data type of the output Tensor which can
711
            be float16, float32, float64, uint8, int16, int32, int64.
712 713 714 715 716 717
        value(bool|float|int|Tensor): The constant value used to initialize 
            the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
        force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
        out(Tensor, optional): Optional output which can be any created 
            Tensor that meets the requirements to store the result of operation.
            if ``out`` is None, a new Tensor will be create to store the result.
718 719
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
720 721

    Returns:
722
        Tensor: Tensor which is created according to shape and dtype.
W
wangchaochaohu 已提交
723

724 725 726
    Examples:
        .. code-block:: python

727
          import paddle.fluid as fluid
728
          # attr shape is a list which doesn't contain  Tensor.
729 730
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
731
          # data1=[[5], [5]] data2=[[5], [5]]
732

733
          # attr shape is a list which contains Tensor.
734
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
735
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
736

737
          # attr shape is a Tensor.
738
          shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
739
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
W
wangchaochaohu 已提交
740
          
741
          # attr value is a Tensor.
W
wangchaochaohu 已提交
742 743
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
744
    """
745

W
wangchaochaohu 已提交
746
    attrs = {'force_cpu': force_cpu}
747
    dtype = convert_dtype(dtype)
748
    if not isinstance(value, Variable):
749
        if dtype in ['uint8', 'int16', 'int32', 'int64']:
W
wangchaochaohu 已提交
750
            attrs['str_value'] = str(int(value))
751
            attrs['value'] = int(value)
W
wangchaochaohu 已提交
752 753
        else:
            attrs['str_value'] = str(float(value))
754
            attrs['value'] = float(value)
755

J
Jiabin Yang 已提交
756
    if _non_static_mode():
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
        if out is None and in_dygraph_mode():
            #Currently, final state mode don't support out is None.
            place = _current_expected_place()
            if force_cpu:
                place = core.CPUPlace()

            shape = utils.convert_shape_to_list(shape)
            if not isinstance(dtype, core.VarDesc.VarType):
                dtype = convert_np_dtype_to_dtype_(dtype)
            out = _C_ops.final_state_full(shape, float(value), dtype, place)
            out.stop_gradient = True
            return out

        else:
            shape = utils.convert_shape_to_list(shape)
            if out is None:
                out = _varbase_creator(dtype=dtype)

            if isinstance(value, Variable):
                if dtype in ['uint8', 'int16', 'int32', 'int64']:
                    attrs['str_value'] = str(int(value.numpy().item(0)))
                else:
                    attrs['str_value'] = str(float(value.numpy().item(0)))

            _C_ops.fill_constant(out, 'value',
                                 float(value), 'force_cpu', force_cpu, 'dtype',
                                 out.dtype, 'str_value', attrs['str_value'],
                                 'shape', shape)
            out.stop_gradient = True
            return out
787

788 789 790
    helper = LayerHelper("fill_constant", **locals())
    inputs = {}
    if isinstance(value, Variable):
791 792
        if convert_dtype(value.dtype) != dtype:
            value = cast(value, dtype)
793 794
        inputs['ValueTensor'] = value

795
    check_shape(shape)
796 797
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'uint8', 'int16', 'int32',
798
        'int64', 'complex64', 'complex128'
799
    ], 'fill_constant')
800
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
801

802 803 804 805 806
    if out is not None:
        check_variable_and_dtype(out, 'out', [convert_dtype(dtype)],
                                 'fill_constant')

    helper = LayerHelper("fill_constant", **locals())
807
    utils.get_shape_tensor_inputs(
808
        inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant')
L
liym27 已提交
809

Y
Yu Yang 已提交
810
    if out is None:
X
Xin Pan 已提交
811
        out = helper.create_variable_for_type_inference(dtype=dtype)
L
liym27 已提交
812
    attrs['dtype'] = out.dtype
Y
Yu Yang 已提交
813 814
    helper.append_op(
        type='fill_constant',
L
liym27 已提交
815
        inputs=inputs,
Y
Yu Yang 已提交
816
        outputs={'Out': [out]},
L
liym27 已提交
817
        attrs=attrs,
M
minqiyang 已提交
818
        stop_gradient=True)
Y
Yu Yang 已提交
819 820 821 822
    out.stop_gradient = True
    return out


823
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
Y
yuyang18 已提交
824
@templatedoc()
Y
Yu Yang 已提交
825 826 827 828 829
def fill_constant_batch_size_like(input,
                                  shape,
                                  dtype,
                                  value,
                                  input_dim_idx=0,
G
Guo Sheng 已提交
830 831
                                  output_dim_idx=0,
                                  force_cpu=False):
832
    """
T
tianshuo78520a 已提交
833
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
834 835 836 837
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
838 839

    Args:
W
wangchaochaohu 已提交
840 841 842 843 844 845 846 847 848 849 850
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
        value(float|int): The constant value used to initialize the Tensor to be created. 
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
851
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
852 853

    Returns:
W
wangchaochaohu 已提交
854
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
855 856 857 858 859

    Examples:

        .. code-block:: python

860
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
861
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
862
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
863
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
864

865
    """
866 867 868 869 870 871 872 873 874 875 876 877
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
        out = _C_ops.final_state_full_batch_size_like(
            input, shape, dtype, value, input_dim_idx, output_dim_idx, place)
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
878
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
879
    out = helper.create_variable_for_type_inference(dtype=dtype)
880 881 882 883 884 885
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
886
        'force_cpu': force_cpu
887 888 889 890 891
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
Y
Yu Yang 已提交
892 893 894 895
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
896
        attrs=attrs)
Y
Yu Yang 已提交
897 898 899 900
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
901 902
def argmin(x, axis=0):
    """
903 904 905
	:alias_main: paddle.argmin
	:alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
	:old_api: paddle.fluid.layers.argmin
S
swtkiwi 已提交
906

S
sneaxiy 已提交
907 908
    **argmin**

909 910
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
911 912

    Args:
913 914 915 916 917
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
918

S
sneaxiy 已提交
919
    Returns:
920
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
921

S
sneaxiy 已提交
922 923
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
924

925
            import paddle.fluid as fluid
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
953
    """
954 955 956
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin')
S
sneaxiy 已提交
957
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
958
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
959 960 961 962 963
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
964
    out.stop_gradient = True
S
sneaxiy 已提交
965 966 967 968 969 970 971
    return out


def argmax(x, axis=0):
    """
    **argmax**

972 973
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
974 975

    Args:
976 977 978 979 980
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
981

S
sneaxiy 已提交
982
    Returns:
983
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
984

S
sneaxiy 已提交
985 986
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
987

988
            import paddle.fluid as fluid
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
1016
    """
1017 1018 1019
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax')
S
sneaxiy 已提交
1020
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
1021
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
1022 1023 1024 1025 1026
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
1027
    out.stop_gradient = True
S
sneaxiy 已提交
1028 1029 1030
    return out


1031
def argsort(input, axis=-1, descending=False, name=None):
Y
Yibing Liu 已提交
1032
    """
1033 1034 1035
	:alias_main: paddle.argsort
	:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
	:old_api: paddle.fluid.layers.argsort
S
swtkiwi 已提交
1036

1037 1038 1039
    This OP sorts the input along the given axis, and returns sorted output
    data Varibale and its corresponding index Variable with the same shape as
    :attr:`input`.
Y
Yibing Liu 已提交
1040 1041

    Args:
1042 1043 1044 1045 1046
        input(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
1047 1048 1049
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
1050 1051 1052
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
Y
Yibing Liu 已提交
1053 1054

    Returns:
1055 1056 1057
        tuple: A tuple of sorted data Variable(with the same shape and data
        type as input) and the sorted indices(with the same shape as input's
        and with data type int64).
Y
Yibing Liu 已提交
1058 1059 1060 1061

    Examples:
        .. code-block:: python

1062
            import paddle.fluid as fluid
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]]).astype(np.float32)
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argsort(input=x, axis=-1)
                out2 = fluid.layers.argsort(input=x, axis=0)
                out3 = fluid.layers.argsort(input=x, axis=1)
                print(out1[0].numpy())
                # [[[5. 5. 8. 9.]
                #   [0. 0. 1. 7.]
                #   [2. 4. 6. 9.]]
                #  [[2. 2. 4. 5.]
                #   [4. 7. 7. 9.]
                #   [0. 1. 6. 7.]]]
                print(out1[1].numpy())
                # [[[0 3 1 2]
                #   [0 1 2 3]
                #   [2 3 0 1]]
                #  [[1 3 2 0]
                #   [0 1 2 3]
                #   [2 0 3 1]]]
                print(out2[0].numpy())
                # [[[5. 2. 4. 2.]
                #   [0. 0. 1. 7.]
                #   [1. 7. 0. 4.]]
                #  [[5. 8. 9. 5.]
                #   [4. 7. 7. 9.]
                #   [6. 9. 2. 6.]]]
                print(out3[0].numpy())
                # [[[0. 0. 1. 4.]
                #   [5. 8. 2. 5.]
                #   [6. 9. 9. 7.]]
                #  [[1. 2. 0. 2.]
                #   [4. 7. 4. 6.]
                #   [5. 7. 7. 9.]]]
Y
Yibing Liu 已提交
1104
    """
1105 1106 1107
    check_variable_and_dtype(
        input, 'input',
        ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort')
Y
Yibing Liu 已提交
1108
    helper = LayerHelper("argsort", **locals())
X
Xin Pan 已提交
1109 1110 1111 1112
    out = helper.create_variable_for_type_inference(
        dtype=input.dtype, stop_gradient=True)
    ids = helper.create_variable_for_type_inference(
        VarDesc.VarType.INT64, stop_gradient=True)
Y
Yibing Liu 已提交
1113 1114 1115 1116
    helper.append_op(
        type='argsort',
        inputs={'X': input},
        outputs={'Out': out,
1117
                 'Indices': ids},
1118 1119
        attrs={'axis': axis,
               'descending': descending})
Y
Yibing Liu 已提交
1120 1121 1122
    return out, ids


Y
Yang Yu 已提交
1123
def ones(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
1124
    """
1125 1126
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1127

1128
    Parameters:
1129
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of shape is int32 or int64.
W
wangchaochaohu 已提交
1130
        dtype (np.dtype|str): Data type of output Tensor, it supports
1131
            bool, float16, float32, float64, int32 and int64.
1132 1133
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1134
            Default: False.
1135 1136

    Returns:
1137
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
1138 1139 1140 1141

    Examples:
        .. code-block:: python

1142
          import paddle.fluid as fluid
1143 1144 1145 1146 1147
          data0 = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
          
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.ones(shape=shape, dtype='int32') #[[1, 1], [1, 1]]
Y
Yu Yang 已提交
1148 1149 1150 1151
    """
    return fill_constant(value=1.0, **locals())


1152
def zeros(shape, dtype, force_cpu=False, name=None):
Y
Yu Yang 已提交
1153
    """
1154 1155
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1156

1157
    Parameters:
1158
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
W
wangchaochaohu 已提交
1159
        dtype (np.dtype|str): Data type of output Tensor, it supports
1160
            bool, float16, float32, float64, int32 and int64.
1161 1162
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1163
            Default: False.
1164 1165
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
1166 1167

    Returns:
1168
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
1169 1170 1171 1172

    Examples:
        .. code-block:: python

1173
          import paddle.fluid as fluid
1174
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
1175 1176 1177 1178
          
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
Y
Yu Yang 已提交
1179 1180
    """
    return fill_constant(value=0.0, **locals())
1181 1182


F
fengjiayi 已提交
1183 1184
def reverse(x, axis):
    """
1185 1186 1187
	:alias_main: paddle.reverse
	:alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse
	:old_api: paddle.fluid.layers.reverse
S
swtkiwi 已提交
1188

1189
    The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
F
fengjiayi 已提交
1190

1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
    .. code-block:: text

        Case 1:

            Given a LoDTensor:
                x = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
                axis = [0, 1]

            Then:
                output = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]

        Case 2:

            Given a LoDTensorArray:
                x = {[[0, 1], [2, 3]],
                     [[4, 5, 6]],
                     [[7],[8], [9]]}
                axis = 0

            Then:
                output = {[[7],[8], [9]],
                          [[4, 5, 6]],
                          [[0, 1], [2, 3]]}

1215
    Parameters:
1216 1217
        x (Variable): A tensor or LoDTensorArray to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
                      If input is a LoDTensorArray, returns a new reversed LoDTensorArray without changing the internal order of each inner tensor.
1218 1219
        axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
            in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
1220 1221
            will be apply on each axis in the tuple or list. If input is a LoDTensorArray, the value of axis shall be 0, or a
            list [0] or tuple (0, ) with shape [1].
F
fengjiayi 已提交
1222 1223

    Returns:
1224
        Variable: The reversed tensor with the same shape and data type as :attr:`x`.
F
fengjiayi 已提交
1225 1226 1227 1228

    Examples:
        .. code-block:: python

1229
          import paddle.fluid as fluid
1230 1231 1232 1233
          import numpy as np
          data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
          result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
          result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243

          # example of LoDTensorArray
          data1 = fluid.layers.assign(np.array([[0, 1, 2]], dtype='float32'))
          data2 = fluid.layers.assign(np.array([[3, 4, 5]], dtype='float32'))
          tensor_array = fluid.layers.create_array(dtype='float32')
          i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
          fluid.layers.array_write(data1, i, tensor_array)
          fluid.layers.array_write(data2, i+1, tensor_array)

          reversed_tensor_array = fluid.layers.reverse(tensor_array, 0) # {[[3, 4, 5]], [[0, 1, 2]]}
F
fengjiayi 已提交
1244
    """
1245 1246 1247
    check_variable_and_dtype(
        x, 'x', ('float32', 'float64', 'int32', 'int64', 'uint8'), 'reverse')
    check_type(axis, 'axis', (int, tuple, list), 'reverse')
F
fengjiayi 已提交
1248 1249 1250
    if isinstance(axis, int):
        axis = [axis]
    helper = LayerHelper("reverse", **locals())
X
Xin Pan 已提交
1251
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
F
fengjiayi 已提交
1252 1253
    helper.append_op(
        type='reverse',
W
Wu Yi 已提交
1254
        inputs={'X': x},
F
fengjiayi 已提交
1255 1256 1257 1258 1259
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


1260 1261 1262 1263 1264 1265 1266
def save(x, file_path, overwrite=True):
    """
    Saves a variable as a file.

    Args:
        x(variable): The Tensor/LoDTensor to be saved.
        file_path(str): The file path where the variable will be saved.
1267 1268 1269
        overwrite(bool): Whether or not cover the given file when it has already
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
    """
    helper = LayerHelper("save", **locals())
    helper.append_op(
        type="save",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def save_combine(x, file_path, overwrite=True):
    """
    Saves a list of variables into a single file.

    Args:
1285 1286
        x(list): A list of Tensor/LoDTensor variables to be saved together in
                 a single file.
1287
        file_path(str): The file path where variables will be saved.
1288
        overwrite(bool): Whether or not cover the given file when it has already
1289 1290
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1291 1292 1293 1294 1295 1296 1297 1298

    Returns:
        There is no return value.

    Examples:

        .. code-block:: python

1299
            import paddle.fluid as fluid
1300 1301 1302 1303 1304 1305 1306
            v1 = fluid.layers.data(name="data",
                                   shape=(4, 6),
                                   dtype="float32")
            v2 = fluid.layers.data(name="data",
                                   shape=(6, 8, 4),
                                   dtype="float32")
            normed = fluid.layers.save_combine([v1, v2], file_path="output")
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
    """
    helper = LayerHelper("save_combine", **locals())
    helper.append_op(
        type="save_combine",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def load_combine(out, file_path):
    """
T
tianshuo78520a 已提交
1319
    Loads a list of variable from a single file.
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330

    Args:
        out(list): The list of variables to be read from the disk file.
        file_path(str): The path of the disk file.
    """
    helper = LayerHelper("load_combine", **locals())
    helper.append_op(
        type="load_combine",
        inputs={},
        output={"Out": out},
        args={"file_path": file_path})
1331 1332 1333 1334 1335 1336 1337


def has_inf(x):
    """
    Test if any of x contains an infinity number

    Args:
S
Steffy-zxf 已提交
1338
       x (Tensor): The Tensor to be checked.
1339 1340

    Returns:
S
Steffy-zxf 已提交
1341
       Tensor: The tensor storing the output, only a bool value, indicating that whether there is infinity number in x or not.
1342 1343 1344 1345
    
    Examples:
        .. code-block:: python
          
S
Steffy-zxf 已提交
1346 1347
          import paddle
          data = paddle.randn(shape=[4, 32, 32], dtype="float32")
1348
          res = paddle.fluid.layers.has_inf(data)
S
Steffy-zxf 已提交
1349
          # [False]
1350

1351
    """
J
Jiabin Yang 已提交
1352
    if _non_static_mode():
W
wanghuancoder 已提交
1353
        return _C_ops.isinf(x)
S
Steffy-zxf 已提交
1354

1355
    check_type(x, 'x', (Variable), 'has_inf')
1356
    helper = LayerHelper("isinf", **locals())
X
Xin Pan 已提交
1357
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1358 1359 1360 1361 1362 1363 1364 1365 1366
    helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
    return out


def has_nan(x):
    """
    Test if any of x contains a NAN

    Args:
S
Steffy-zxf 已提交
1367
       x (Tensor): The Tensor to be checked.
1368 1369

    Returns:
S
Steffy-zxf 已提交
1370
       Tensor: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
1371 1372 1373 1374
    
    Examples:
        .. code-block:: python
    
S
Steffy-zxf 已提交
1375 1376
          import paddle
          data = paddle.randn(shape=[2,3], dtype="float32")
1377
          res = paddle.fluid.layers.has_nan(data)
S
Steffy-zxf 已提交
1378
          # [False]
1379

1380
    """
J
Jiabin Yang 已提交
1381
    if _non_static_mode():
W
wanghuancoder 已提交
1382
        return _C_ops.isnan(x)
S
Steffy-zxf 已提交
1383

1384
    check_type(x, 'x', (Variable), 'has_nan')
1385
    helper = LayerHelper("isnan", **locals())
X
Xin Pan 已提交
1386
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1387 1388 1389 1390 1391 1392
    helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
    return out


def isfinite(x):
    """
S
swtkiwi 已提交
1393

1394 1395 1396 1397
    Test if any of x contains an infinity/NAN number. If all the elements are finite,
    returns true, else false.

    Args:
N
Noel 已提交
1398
        x(Tensor): The Tensor to be checked.
1399 1400

    Returns:
N
Noel 已提交
1401
        Tensor: The tensor storing the output, contains a bool value.
1402 1403 1404 1405 1406

    Examples:

        .. code-block:: python

N
Noel 已提交
1407 1408 1409 1410 1411 1412
            import paddle

            x = paddle.rand(shape=[4, 6], dtype='float32')
            y = paddle.fluid.layers.isfinite(x)
            print(y)

1413
    """
1414 1415
    check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
                             "isfinite")
1416
    helper = LayerHelper("isfinite", **locals())
1417

1418
    out = helper.create_variable_for_type_inference(dtype='bool')
1419 1420
    helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
    return out
W
whs 已提交
1421 1422


1423
def range(start, end, step, dtype, name=None):
W
whs 已提交
1424
    """
1425
    This OP returns a 1-D Tensor with spaced values within a given interval.
W
whs 已提交
1426

1427 1428
    Values are generated into the half-open interval [``start``, ``end``) with
    the ``step``. (the interval including ``start`` but excluding ``end``).
1429

1430 1431
    If ``dtype`` is float32 or float64, we advise adding a small epsilon to
    ``end`` to avoid floating point rounding errors when comparing against ``end``.
W
whs 已提交
1432

L
Liufang Sang 已提交
1433
    Parameters:
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
        start(float|int|Tensor): Start of interval. The interval includes this
            value. If ``start`` is a Tensor, it is a 1-D Tensor with shape [1],
            with data type int32, int64, float32, float64.
        end(float|int|Tensor): End of interval. The interval does not include
            this value. If ``end`` is a Tensor, it is a 1-D Tensor with shape
            [1], with data type int32, int64, float32, float64.
        step(float|int|Tensor): Spacing between values. For any out, it is
            the istance between two adjacent values, out[i+1] - out[i]. If
            ``step`` is a Tensor, it is a 1-D Tensor with shape [1], with data
            type int32, int64, float32, float64.
        dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of the
            output tensor. Supported data types: int32, int64, float32, float64.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns: 
        Tensor: A 1-D Tensor with values from the interval [``start``, ``end``)
            taken with common difference ``step`` beginning from ``start``. Its
            data type is set by ``dtype``.

    Raises:
        TypeError: If ``dtype`` is not int32, int64, float32, float64.
W
whs 已提交
1457 1458 1459 1460 1461

    examples:

        .. code-block:: python

1462
            import paddle.fluid as fluid
W
whs 已提交
1463

1464 1465
            out1 = fluid.layers.range(0, 10, 2, 'int32')
            # [0, 2, 4, 6, 8]
W
whs 已提交
1466

1467 1468 1469 1470 1471 1472 1473
            start_var = fluid.layers.fill_constant([1], 'int64', 3)
            out2 = fluid.layers.range(start_var, 7, 1, 'int64')
            # [3, 4, 5, 6]

    """
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
1474

W
whs 已提交
1475
    if not isinstance(start, Variable):
1476
        with device_guard("cpu"):
1477
            start = fill_constant([1], dtype, start, force_cpu=True)
1478 1479
    elif start.dtype != dtype:
        start = cast(start, dtype)
1480

W
whs 已提交
1481
    if not isinstance(end, Variable):
1482
        with device_guard("cpu"):
1483
            end = fill_constant([1], dtype, end, force_cpu=True)
1484 1485
    elif end.dtype != dtype:
        end = cast(end, dtype)
1486

W
whs 已提交
1487
    if not isinstance(step, Variable):
1488
        with device_guard("cpu"):
1489
            step = fill_constant([1], dtype, step, force_cpu=True)
1490 1491
    elif step.dtype != dtype:
        step = cast(step, dtype)
W
whs 已提交
1492

Z
zyfncg 已提交
1493 1494 1495 1496
    if in_dygraph_mode():
        return _C_ops.final_state_arange(start, end, step, dtype,
                                         _current_expected_place())

Z
zyfncg 已提交
1497
    if _in_legacy_dygraph():
J
Jiawei Wang 已提交
1498 1499 1500
        out = _C_ops.range(start, end, step)
        out.stop_gradient = True
        return out
W
whs 已提交
1501

W
wanghuancoder 已提交
1502 1503 1504 1505 1506
    out_shape = None
    if not isinstance(start, Variable) and not isinstance(
            end, Variable) and not isinstance(step, Variable):
        out_shape = [int(math.ceil((end - start) / step))]

1507 1508 1509
    check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],
                'range/arange')
    helper = LayerHelper('range', **locals())
1510
    out = helper.create_variable_for_type_inference(dtype, shape=out_shape)
W
whs 已提交
1511 1512 1513 1514 1515
    helper.append_op(
        type='range',
        inputs={'Start': start,
                'End': end,
                'Step': step},
1516
        outputs={'Out': out})
1517
    out.stop_gradient = True
W
whs 已提交
1518
    return out
Z
zhoukunsheng 已提交
1519 1520


1521
def linspace(start, stop, num, dtype=None, name=None):
1522
    r"""
1523
    This OP return fixed number of evenly spaced values within a given interval.
Z
zhoukunsheng 已提交
1524 1525

    Args:
1526 1527 1528 1529
        start(int|float|Tensor): The input :attr:`start` is start variable of range. It is a scalar, \
            or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
        stop(int|float|Tensor): The input :attr:`stop` is start variable of range. It is a scalar, \
            or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
1530
        num(int|Tensor): The input :attr:`num` is given num of the sequence. It is an int scalar, \
1531
            or a Tensor of shape [1] with data type int32.
W
wangchaochaohu 已提交
1532
        dtype(np.dtype|str, optional): The data type of output tensor, it could be
1533
            int32, int64, float32 and float64. Default: if None, the data type is float32.
1534 1535
        name(str, optional): Normally there is no need for user to set this property. 
            For more information, please refer to :ref:`api_guide_Name`.Default: None.
Z
zhoukunsheng 已提交
1536 1537

    Returns:
1538
        Tensor: the output data type will be float32, float64. The 1-D tensor with fixed number of evenly spaced values, \
1539 1540
        the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
        the value with input :attr:`start`. 
Z
zhoukunsheng 已提交
1541

Z
zhoukunsheng 已提交
1542
    Examples:
Z
zhoukunsheng 已提交
1543 1544
        .. code-block:: python

1545 1546 1547
             import paddle
             data = paddle.linspace(0, 10, 5, 'float32') # [0.0,  2.5,  5.0,  7.5, 10.0]
             data = paddle.linspace(0, 10, 1, 'float32') # [0.0]
Z
zhoukunsheng 已提交
1548 1549

    """
1550 1551
    if dtype is None:
        dtype = 'float32'
1552 1553 1554
    tensor_num = num
    tensor_start = start
    tensor_stop = stop
1555 1556
    if not isinstance(num, Variable):
        check_type(num, 'num', (int), 'linspace')
1557 1558
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
Z
zhoukunsheng 已提交
1559
    if not isinstance(start, Variable):
1560 1561
        with device_guard("cpu"):
            tensor_start = fill_constant([1], dtype, start)
Z
zhoukunsheng 已提交
1562
    if not isinstance(stop, Variable):
1563 1564
        with device_guard("cpu"):
            tensor_stop = fill_constant([1], dtype, stop)
Z
zhoukunsheng 已提交
1565
    if not isinstance(num, Variable):
1566 1567
        with device_guard("cpu"):
            tensor_num = fill_constant([1], 'int32', num)
1568
    if _in_legacy_dygraph():
W
wanghuancoder 已提交
1569 1570
        return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype',
                               dtype)
1571 1572 1573
    if in_dygraph_mode():
        return _C_ops.final_state_linspace(tensor_start, tensor_stop,
                                           tensor_num, dtype)
1574 1575
    helper = LayerHelper("linspace", **locals())

1576 1577 1578
    start_dtype = convert_dtype(tensor_start.dtype)
    stop_dtype = convert_dtype(tensor_stop.dtype)
    out_dtype = convert_dtype(dtype)
1579
    if isinstance(start, Variable):
1580 1581
        check_dtype(start.dtype, 'start',
                    ['float32', 'float64', 'int32', 'int64'], 'linspace')
1582 1583
    else:
        check_type(start, 'start', (int, float), 'linspace')
Z
zhoukunsheng 已提交
1584

1585
    if isinstance(stop, Variable):
1586 1587
        check_dtype(stop.dtype, 'stop',
                    ['float32', 'float64', 'int32', 'int64'], 'linspace')
1588 1589 1590 1591 1592 1593
    else:
        check_type(stop, 'stop', (int, float), 'linspace')
    if isinstance(num, Variable):
        check_dtype(num.dtype, 'num', ['int32'], 'linspace')
    check_dtype(dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'],
                'linspace')
1594 1595 1596 1597 1598 1599 1600 1601
    if ((stop_dtype == "float64" or start_dtype == "float64") and
            out_dtype in ["float32", "int32"]) or ((stop_dtype == "int64" or
                                                    start_dtype == "int64") and
                                                   out_dtype == "int32"):
        raise ValueError(
            "The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
            "which may cause data type overflows. Please reset attr(dtype) of linspace."
            .format(start_dtype, stop_dtype, dtype))
1602 1603

    out = helper.create_variable_for_type_inference(dtype=dtype)
Z
zhoukunsheng 已提交
1604 1605 1606

    helper.append_op(
        type='linspace',
1607 1608 1609 1610
        inputs={'Start': tensor_start,
                'Stop': tensor_stop,
                'Num': tensor_num},
        attrs={'dtype': dtype},
Z
zhoukunsheng 已提交
1611
        outputs={'Out': [out]})
1612 1613
    if isinstance(num, int):
        out.desc.set_shape((num, ))
Z
zhoukunsheng 已提交
1614
    return out
1615 1616


Z
zhoukunsheng 已提交
1617 1618
def zeros_like(x, out=None):
    """
1619
    This OP creates a zeros tensor which has identical shape and dtype 
Z
zhoukunsheng 已提交
1620 1621 1622
    with `x`.

    Args:
1623 1624 1625 1626 1627 1628
        x(Variable): The input tensor which specifies shape and dtype, the
            input data dtype could be bool, float32, float64, int32, int64.
        out(Variable, optional): If is :attr:`None` , the op will create the
            variable as output, the data type and shape of this variable will
            be same as input :attr:`x`. If is a tensor, the data type and shape
            need to be same as input :attr:`x`. The default value is :attr:`None` .
Z
zhoukunsheng 已提交
1629 1630

    Returns:
1631 1632 1633
        Variable: The N-D tensor, the element in tensor is related to input
            data type, if the input data type is bool, the output value is
            False, otherwise is zero. The output shape is the same as the input.
Z
zhoukunsheng 已提交
1634 1635 1636 1637

    Examples:
        .. code-block:: python

1638
          import paddle.fluid as fluid
1639
          x = fluid.data(name='x', dtype='float32', shape=[3])
Z
zhoukunsheng 已提交
1640 1641
          data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]

Z
zhoukunsheng 已提交
1642 1643
    """

1644 1645
    check_variable_and_dtype(
        x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like')
Z
zhoukunsheng 已提交
1646 1647 1648
    helper = LayerHelper("zeros_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1649 1650 1651
    else:
        check_variable_and_dtype(
            out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
1652
            'zeros_like')
1653

Z
zhoukunsheng 已提交
1654 1655 1656 1657
    helper.append_op(
        type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]})
    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1658 1659


1660
@deprecated(since="2.0.0", update_to="paddle.diag")
Z
zhoukunsheng 已提交
1661
def diag(diagonal):
1662
    r"""
1663 1664 1665
	:alias_main: paddle.diag
	:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
	:old_api: paddle.fluid.layers.diag
S
swtkiwi 已提交
1666

1667
    This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Z
zhoukunsheng 已提交
1668 1669

    Args:
1670 1671
        diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
            specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Z
zhoukunsheng 已提交
1672 1673

    Returns:
1674 1675
        Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
            the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Z
zhoukunsheng 已提交
1676 1677 1678 1679 1680 1681 1682

    Examples:
        .. code-block:: python

          # [[3, 0, 0]
          #  [0, 4, 0]
          #  [0, 0, 5] 
1683 1684 1685

          import paddle.fluid as fluid
          import numpy as np
1686 1687 1688
          diagonal = np.arange(3, 6, dtype='int32')
          data = fluid.layers.diag(diagonal)
          # diagonal.shape=(3,) data.shape=(3, 3)
Z
zhoukunsheng 已提交
1689 1690

    """
1691 1692 1693
    check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag')
    check_dtype(diagonal.dtype, 'diagonal',
                ['float32', 'float64', 'int32', 'int64'], 'diag')
Z
zhoukunsheng 已提交
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
    helper = LayerHelper("diag", **locals())

    if not isinstance(diagonal, Variable):
        diagonal = assign(diagonal)

    out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)

    helper.append_op(
        type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]})

    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1706 1707


1708 1709 1710 1711 1712
def eye(num_rows,
        num_columns=None,
        batch_shape=None,
        dtype='float32',
        name=None):
1713
    """
1714
    This function constructs a or a batch of 2-D tensor with ones on the diagonal and zeros elsewhere. 
1715 1716 1717

    Args:
        num_rows(int): the number of rows in each batch tensor.
1718 1719
        num_columns(int, optional): the number of columns in each batch tensor.
            If None, default: num_rows.
1720 1721
        batch_shape(list, optional): If provided, the returned tensor will have a leading
            batch size of this shape, the data type of ``batch_shape`` is int. Default is None.
W
wangchaochaohu 已提交
1722
        dtype(np.dtype|str, optional): The data type of the returned tensor.
1723 1724 1725 1726
            It should be int32, int64, float16, float32, float64, default is 'float32'.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
1727 1728

    Returns:
1729
        Tensor: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
1730 1731 1732 1733 1734

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
1735 1736
          data = fluid.layers.eye(3, dtype='int32')
          # [[1, 0, 0]
1737
          #  [0, 1, 0]
1738 1739
          #  [0, 0, 1]]

1740
          data = fluid.layers.eye(2, 3, dtype='int32')
1741
          # [[1, 0, 0]
1742
          #  [0, 1, 0]]
1743 1744

          data = fluid.layers.eye(2, batch_shape=[3])
1745 1746 1747 1748 1749
          # Construct a batch of 3 identity tensors, each 2 x 2.
          # data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.

    """

1750 1751
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
1752 1753 1754 1755 1756
    if num_columns is not None:
        if not isinstance(num_columns, int) or num_columns < 0:
            raise TypeError("num_columns should be a non-negative int")
    else:
        num_columns = num_rows
1757

R
Ruibiao Chen 已提交
1758 1759 1760 1761
    if in_dygraph_mode():
        out = _C_ops.final_state_eye(num_rows, num_columns, dtype,
                                     _current_expected_place())
    elif _in_legacy_dygraph():
W
wanghuancoder 已提交
1762 1763
        out = _C_ops.eye('dtype', dtype, 'num_rows', num_rows, 'num_columns',
                         num_columns)
1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
    else:
        helper = LayerHelper("eye", **locals())
        check_dtype(dtype, 'dtype',
                    ['float16', 'float32', 'float64', 'int32', 'int64'], 'eye')
        if not isinstance(num_rows, int) or num_rows < 0:
            raise TypeError("num_rows should be a non-negative int")
        out = helper.create_variable_for_type_inference(dtype=dtype)
        helper.append_op(
            type='eye',
            inputs={},
            outputs={'Out': [out]},
            attrs={
                'num_rows': num_rows,
                'num_columns': num_columns,
                'dtype': dtype
            },
            stop_gradient=True)
1781 1782

    if batch_shape is not None:
1783 1784 1785
        re_shape = [1] * len(batch_shape)
        re_shape = re_shape + [num_rows, num_columns]
        expand_times = batch_shape + [1, 1]
J
Jiabin Yang 已提交
1786
        if _non_static_mode():
W
wanghuancoder 已提交
1787 1788
            out = _C_ops.reshape(out, 'shape', re_shape)
            return _C_ops.expand(out, None, 'expand_times', expand_times)
1789

1790 1791
        if not isinstance(batch_shape, list):
            raise TypeError("batch_shape should be a list")
1792
        for batch_val in (batch_shape):
1793 1794
            if batch_val <= 0:
                raise TypeError("batch_shape should be a positive int list")
1795 1796 1797 1798 1799 1800

        from .nn import reshape, expand
        out = reshape(x=out, shape=re_shape)
        out = expand(x=out, expand_times=expand_times)

    out.stop_gradient = True
1801 1802 1803
    return out


Z
zhoukunsheng 已提交
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
def ones_like(x, out=None):
    """
    **ones_like**

    This function creates a ones tensor which has identical shape and dtype 
    with `x`.

    Args:
        x(Variable): The input tensor which specifies shape and dtype.
        out(Variable): The output tensor.

    Returns:
1816
        out(Variable): The tensor variable storing the output.
Z
zhoukunsheng 已提交
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid

          x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
          data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]

    """
1827 1828
    check_variable_and_dtype(
        x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like')
Z
zhoukunsheng 已提交
1829 1830 1831 1832

    helper = LayerHelper("ones_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1833 1834 1835 1836
    else:
        check_variable_and_dtype(
            out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
            'ones_like')
Z
zhoukunsheng 已提交
1837 1838 1839 1840 1841 1842
    helper.append_op(
        type='fill_any_like',
        inputs={'X': [x]},
        attrs={'value': 1.0},
        outputs={'Out': [out]})
    return out
Y
yaoxuefeng 已提交
1843 1844 1845 1846 1847 1848


@deprecated(since="2.0.0", update_to="paddle.triu")
def triu(input, diagonal=0, name=None):
    import paddle
    return paddle.tensor.triu(x=input, diagonal=diagonal, name=name)