tensor.py 54.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16
from six.moves import reduce
Y
Yu Yang 已提交
17
from ..layer_helper import LayerHelper
18
from ..param_attr import ParamAttr
19
from ..framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varbase_creator
X
xuwei06 已提交
20
from ..framework import Variable
21
from ..initializer import Constant
22
from ..core import VarDesc
23
from .. import core
24
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
25
from . import utils
26
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
X
xuwei06 已提交
27
import numpy
28
import warnings
Y
Yu Yang 已提交
29 30

__all__ = [
L
li099 已提交
31 32 33
    'create_tensor', 'create_parameter', 'create_global_var', 'cast',
    'tensor_array_to_tensor', 'concat', 'sums', 'assign',
    'fill_constant_batch_size_like', 'fill_constant', 'argmin', 'argmax',
Z
zhoukunsheng 已提交
34
    'argsort', 'ones', 'zeros', 'reverse', 'has_inf', 'has_nan', 'isfinite',
35
    'range', 'linspace', 'zeros_like', 'ones_like', 'diag', 'eye'
Y
Yu Yang 已提交
36 37 38
]


X
xuwei06 已提交
39
def create_tensor(dtype, name=None, persistable=False):
40
    """
W
wangchaochaohu 已提交
41
    Create a variable, which will hold a Tensor with data type dtype.
42 43

    Args:
W
wangchaochaohu 已提交
44 45 46 47
        dtype(string|numpy.dtype): the data type of Tensor to be created, the
            data type is bool, float16, float32, float64, int8, int16, int32 and int64.
        name(string, optional): The default value is None.  Normally there is no need for 
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
Q
update  
qiaolongfei 已提交
48
        persistable(bool): Set the persistable flag of the create tensor.
W
wangchaochaohu 已提交
49
            default value is False.
50 51

    Returns:
W
wangchaochaohu 已提交
52
        Variable: The tensor to be created according to dtype.
53 54 55 56

    Examples:
        .. code-block:: python

57
          import paddle.fluid as fluid
58 59
          tensor = fluid.layers.create_tensor(dtype='float32')
    """
Y
Yu Yang 已提交
60
    helper = LayerHelper("create_tensor", **locals())
X
xuwei06 已提交
61 62
    return helper.create_variable(
        name=helper.name, dtype=dtype, persistable=persistable)
Y
Yu Yang 已提交
63 64


65 66
def create_parameter(shape,
                     dtype,
X
xuwei06 已提交
67
                     name=None,
68 69 70 71
                     attr=None,
                     is_bias=False,
                     default_initializer=None):
    """
72
    This function creates a parameter. The parameter is a learnable variable, which can have
Y
yuyang18 已提交
73 74 75 76 77
    gradient, and can be optimized.

    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.

78 79 80 81 82 83 84
    Parameters:
        shape (list of int): Shape of the parameter
        dtype (str): Data type of the parameter
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
        attr (ParamAttr, optional): Attributes of the parameter
        is_bias (bool, optional): This can affect which default initializer is chosen
85 86 87
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
88
        default_initializer (Initializer, optional): Initializer for the parameter
89 90

    Returns:
91
        The created parameter.
Y
yuyang18 已提交
92 93

    Examples:
94 95
        .. code-block:: python

96
            import paddle.fluid as fluid
97 98
            import paddle.fluid.layers as layers
            W = layers.create_parameter(shape=[784, 200], dtype='float32')
99
    """
Q
Qiao Longfei 已提交
100
    helper = LayerHelper("create_parameter", **locals())
101
    if attr is None:
X
xuwei06 已提交
102
        attr = ParamAttr(name=name)
103 104 105 106
    return helper.create_parameter(attr, shape, dtype, is_bias,
                                   default_initializer)


107 108 109 110 111 112 113
def create_global_var(shape,
                      value,
                      dtype,
                      persistable=False,
                      force_cpu=False,
                      name=None):
    """
114
    This function creates a new tensor variable with value in the global block(block 0).
F
fengjiayi 已提交
115

116 117 118
    Parameters:
        shape (list of int): Shape of the variable
        value (float): The value of the variable. The new created
F
fengjiayi 已提交
119
                      variable will be filled with it.
120 121
        dtype (str): Data type of the variable
        persistable (bool, optional): If this variable is persistable.
F
fengjiayi 已提交
122
                           Default: False
123
        force_cpu (bool, optional): Force this variable to be on CPU.
F
fengjiayi 已提交
124
                         Default: False
125 126
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
127 128

    Returns:
129
        Variable: The created Variable
F
fengjiayi 已提交
130 131 132 133

    Examples:
        .. code-block:: python

134
            import paddle.fluid as fluid
135 136 137
            import paddle.fluid.layers as layers
            var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32',
                                          persistable=True, force_cpu=True, name='new_var')
138
    """
Q
Qiao Longfei 已提交
139 140
    helper = LayerHelper("global_var", **locals())
    var = helper.create_global_variable(
M
minqiyang 已提交
141 142 143 144 145
        dtype=dtype,
        shape=shape,
        persistable=persistable,
        name=name,
        stop_gradient=True)
M
minqiyang 已提交
146 147 148
    helper.set_variable_initializer(
        var, initializer=Constant(
            value=float(value), force_cpu=force_cpu))
M
minqiyang 已提交
149

Q
Qiao Longfei 已提交
150 151 152
    return var


153
def cast(x, dtype):
Y
Yu Yang 已提交
154
    """
155 156 157
    This OP takes in the Variable :attr:`x` with :attr:`x.dtype` and casts it
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
158 159

    Args:
160 161 162 163
        x(Variable): An input N-D Tensor with data type bool, float16,
            float32, float64, int32, int64, uint8.
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output:
            bool, float15, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
164 165

    Returns:
166
        Variable: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
167 168 169

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
170

171
            import paddle.fluid as fluid
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
            import numpy as np

            place = fluid.core.CPUPlace()

            x_lod = fluid.data(name="x", shape=[2,2], lod_level=0)
            cast_res1 = fluid.layers.cast(x=x_lod, dtype="uint8")
            cast_res2 = fluid.layers.cast(x=x_lod, dtype=np.int32)

            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())

            x_i_lod = fluid.core.LoDTensor()
            x_i_lod.set(np.array([[1.3,-2.4],[0,4]]).astype("float32"), place)
            x_i_lod.set_recursive_sequence_lengths([[0,2]])
            res1 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res1], return_numpy=False)
            res2 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res2], return_numpy=False)
            print(np.array(res1[0]), np.array(res1[0]).dtype)
            # [[  1 254]
            #  [  0   4]] uint8
            print(np.array(res2[0]), np.array(res2[0]).dtype)
            # [[ 1 -2]
            #  [ 0  4]] int32
Y
Yu Yang 已提交
194 195
    """
    helper = LayerHelper('cast', **locals())
196 197
    check_variable_and_dtype(
        x, 'x',
198 199
        ['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
        'cast')
X
Xin Pan 已提交
200
    out = helper.create_variable_for_type_inference(dtype=dtype)
Y
Yu Yang 已提交
201 202 203 204 205 206 207 208 209
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype,
               'out_dtype': out.dtype})
    return out


210
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
211
    """
212 213
    **Concat**

214
    This OP concatenates the input along the axis.
215 216

    Args:
217 218
        input(list): List of input Tensors with data type float32, float64, int32,
            int64.
219
        axis(int32|Variable, optional):  A scalar with type ``int32`` or a ``Tensor`` with shape [1] and type ``int32``. Axis to compute indices along. The effective range
220 221 222 223 224
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
225 226

    Returns:
227
        Variable: A Tensor with the same data type as input's.
228 229 230

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
231

232
            import paddle.fluid as fluid
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
            import numpy as np

            in1 = np.array([[1,2,3],
                            [4,5,6]])
            in2 = np.array([[11,12,13],
                            [14,15,16]])
            in3 = np.array([[21,22],
                            [23,24]])
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
                out1 = fluid.layers.concat(input=[x1,x2,x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1,x2], axis=0)
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
255
    """
256 257

    if in_dygraph_mode():
S
songyouwei 已提交
258 259 260 261 262
        if isinstance(axis, Variable):
            axis = axis.numpy()
            assert axis.shape == (
                1, ), "axis of type Variable should have shape [1]"
            axis = axis[0]
263
        return core.ops.concat(input, 'axis', axis)
264

265 266 267 268 269
    if not isinstance(input, list):
        warnings.warn(
            "The type of input in concat should be list, but received %s." %
            (type(input)))
        input = [input]
270
    for id, x in enumerate(input):
271 272
        check_variable_and_dtype(
            x, 'input[' + str(id) + ']',
273 274
            ['float16', 'float32', 'float64', 'int32', 'int64'], 'concat')
    check_type(axis, 'axis', (int, Variable), 'concat')
275

276
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
277
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
        assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \
                            "number of the elements must be 1, but received %s." % len(x)
        out_index = helper.create_variable_for_type_inference(dtype="int32")
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out],
                     'OutIndex': [out_index]},
            attrs={'axis': axis,
                   'use_stack': False})
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
            inputs['AxisTensor'] = axis
        else:
            attrs['axis'] = axis

        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
Y
Yu Yang 已提交
301 302 303
    return out


G
Guo Sheng 已提交
304
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
L
li099 已提交
305
    """
G
Guo Sheng 已提交
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
356 357

    Args:
G
Guo Sheng 已提交
358 359 360 361 362 363 364
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
365 366

    Returns:
G
Guo Sheng 已提交
367 368 369
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
370 371 372 373

    Examples:
        .. code-block:: python

374
            import paddle.fluid as fluid
375
            import numpy as np
G
Guo Sheng 已提交
376 377 378 379 380 381 382
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            array = fluid.layers.create_array(dtype='float32')
            fluid.layers.array_write(x0, i, array)
            fluid.layers.array_write(x1, i + 1, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
383
    """
384 385 386 387 388 389 390 391 392 393 394
    if in_dygraph_mode():
        assert isinstance(
            input, list), "The 'input' in tensor_array_to_tensor must be list"
        from .nn import stack, concat
        from ..dygraph import to_variable
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
            numpy.array(list(map(lambda x: int(x.shape[axis]), input))))
        return res, sizes

L
li099 已提交
395
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
396 397 398
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
    helper.append_op(
L
li099 已提交
399
        type='tensor_array_to_tensor',
L
li099 已提交
400 401 402
        inputs={'X': input},
        outputs={'Out': [out],
                 'OutIndex': [out_index]},
G
Guo Sheng 已提交
403 404
        attrs={'axis': axis,
               'use_stack': use_stack})
L
li099 已提交
405 406 407
    return out, out_index


408
def sums(input, out=None):
F
fengjiayi 已提交
409
    """
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
431 432

    Args:
433 434 435 436
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
437 438

    Returns:
439 440
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
441 442

    Examples:
F
fengjiayi 已提交
443
        .. code-block:: python
K
kavyasrinet 已提交
444

445 446 447 448 449 450 451 452 453
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
454

455 456
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
457 458 459
    """
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
460 461
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
T
tensor-tang 已提交
462 463 464 465 466
    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False})
Y
Yu Yang 已提交
467 468 469
    return out


F
fengjiayi 已提交
470
def assign(input, output=None):
471
    """
472
    The OP copies the :attr:`input` to the :attr:`output`.
473

474 475 476 477 478
    Parameters:
        input (Variable|numpy.ndarray): A tensor or numpy ndarray, its data type supports
            float32, float64, int32 and int64.
        output (Variable, optional): A tensor. If :attr:`output` is None, a new tensor will
            be created as :attr:`output`. Default: None.
479 480

    Returns:
481
        Variable: A tensor with the same shape, data type and value as :attr:`input`.
482 483 484

    Examples:
        .. code-block:: python
485

486
          import paddle.fluid as fluid
487 488 489 490 491 492
          import numpy as np
          data = fluid.layers.fill_constant(shape=[3, 2], value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result1 = fluid.layers.create_tensor(dtype='float64')
          fluid.layers.assign(data, result1) # result1 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result2 = fluid.layers.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = fluid.layers.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
493
    """
Y
Yu Yang 已提交
494
    helper = LayerHelper('assign', **locals())
495
    check_type(input, 'input', (Variable, numpy.ndarray), 'assign')
X
xuwei06 已提交
496
    if isinstance(input, Variable):
497 498 499
        check_dtype(input.dtype, 'input',
                    ['float32', 'float64', 'int32', 'int64', 'bool'], 'assign',
                    '(When the type of input in assign is Variable.)')
500 501 502
        if output is None:
            output = helper.create_variable_for_type_inference(
                dtype=input.dtype)
X
xuwei06 已提交
503
        helper.append_op(
R
robot 已提交
504
            type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
X
xuwei06 已提交
505 506
    elif isinstance(input, numpy.ndarray):
        dtype = convert_np_dtype_to_dtype_(input.dtype)
507
        if dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
508
            value_name = "fp32_values"
509
            values = [float(v) for v in input.flat]
510
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
511
            value_name = "int32_values"
512
            values = [int(v) for v in input.flat]
513 514 515
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
516
        else:
517 518
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
519
                "the data type of 'input' must be float32, int32 or int64, but "
520
                "received %s." % convert_dtype(dtype))
521 522 523
        if input.size > 1024 * 1024:
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
524 525 526
        if output is None:
            output = helper.create_variable_for_type_inference(
                dtype=input.dtype)
X
xuwei06 已提交
527 528 529 530 531 532
        helper.append_op(
            type='assign_value',
            outputs={'Out': [output]},
            attrs={
                'dtype': dtype,
                'shape': list(input.shape),
533
                value_name: values
X
xuwei06 已提交
534 535
            })

Y
Yu Yang 已提交
536 537 538
    return output


Q
QI JUN 已提交
539
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
Y
Yu Yang 已提交
540
    """
W
wangchaochaohu 已提交
541
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
542
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
543

T
tianshuo78520a 已提交
544
    The attribute `stop_gradient` of the created Tensor is set to True.
545 546

    Args:
547 548 549 550
        shape(list|tuple|Variable): Shape of the Tensor to be created.
                The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
                the elements of it should be integers or Tensors with shape [1].
                If ``shape`` is an Variable, it should be an 1-D Tensor .
W
wangchaochaohu 已提交
551 552 553
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can
            be float16, float32, float64, int32, int64.
        value(float): The constant value used to initialize the Tensor to be created.
T
tianshuo78520a 已提交
554
        force_cpu(True): data should be on CPU if it's true, default value is False.
W
wangchaochaohu 已提交
555 556 557
        out(Variable, optional): Optional output which can be any created 
            Variable that meets the requirements to store the result of operation.
            if out is None, a new Varibale will be create to store the result.
558 559

    Returns:
W
wangchaochaohu 已提交
560 561 562 563 564
        Variable: Tensor which is created according to shape and dtype.

    Raise:
        TypeError: The dtype must be one of bool, float16, float32, float64, int32 and int64
        and the data type of out Tensor must be the same as the dtype. 
565 566 567 568

    Examples:
        .. code-block:: python

569
          import paddle.fluid as fluid
570 571 572
          # attr shape is a list which doesn't contain Variable Tensor.
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
573
          # data1=[[5], [5]] data2=[[5], [5]]
574 575 576 577 578 579 580 581

          # attr shape is a list which contains Variable Tensor.
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5]

          # attr shape is an Variable Tensor.
          shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
Y
Yu Yang 已提交
582
    """
583
    attrs = {'value': float(value), 'force_cpu': force_cpu}
584 585 586 587 588 589 590 591

    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))

    if in_dygraph_mode():
        if isinstance(shape, (list, tuple)):
S
songyouwei 已提交
592 593 594
            shape = list(
                map(lambda x: x.numpy()[0] if isinstance(x, Variable) else x,
                    shape))
595
        else:
S
songyouwei 已提交
596
            shape = list(shape.numpy().astype(int))
597 598
        if out is None:
            out = _varbase_creator(dtype=dtype)
599 600
        core.ops.fill_constant(out, 'value',
                               float(value), 'force_cpu', force_cpu, 'dtype',
601 602
                               out.dtype, 'str_value', attrs['str_value'],
                               'shape', shape)
603 604 605
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
606
    helper = LayerHelper("fill_constant", **locals())
607 608 609 610
    check_dtype(dtype, 'create data type',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'fill_constant')
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
L
liym27 已提交
611
    inputs = {}
612
    attrs = {'value': float(value), 'force_cpu': force_cpu}
L
liym27 已提交
613

614 615 616 617 618
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))

L
liym27 已提交
619 620 621 622 623 624 625 626 627 628 629
    def _get_attr_shape(list_shape):
        attr_shape = []
        for idx, dim in enumerate(list_shape):
            if isinstance(dim, Variable):
                attr_shape.append(-1)
            else:
                attr_shape.append(dim)
        return attr_shape

    def _get_shape_tensor(list_shape):
        new_shape_tensor = []
630
        for idx, dim in enumerate(list_shape):
L
liym27 已提交
631 632
            if isinstance(dim, Variable):
                dim.stop_gradient = True
633 634 635 636
                check_dtype(
                    dim.dtype, 'shape[' + str(idx) + ']', ['int32', 'int64'],
                    'fill_constant',
                    '(When type of shape in fill_constant is list or tuple.)')
637 638
                if convert_dtype(dim.dtype) == 'int64':
                    dim = cast(x=dim, dtype='int32')
L
liym27 已提交
639 640 641 642 643 644 645 646 647
                new_shape_tensor.append(dim)
            else:
                temp_out = helper.create_variable_for_type_inference('int32')
                fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
                new_shape_tensor.append(temp_out)
        return new_shape_tensor

    if isinstance(shape, Variable):
        shape.stop_gradient = True
648 649
        check_dtype(shape.dtype, 'shape', ['int32', 'int64'], 'fill_constant',
                    '(When type of shape in fill_constant is Variable.)')
650 651
        if (convert_dtype(shape.dtype) == 'int64'):
            shape = cast(shape, 'int32')
L
liym27 已提交
652 653 654 655 656 657
        inputs["ShapeTensor"] = shape
    elif isinstance(shape, (list, tuple)):
        assert len(shape) > 0, (
            "The size of 'shape' in fill_constant can't be zero, "
            "but received %s." % len(shape))
        attrs["shape"] = _get_attr_shape(shape)
L
Leo Chen 已提交
658
        if utils._contain_var(shape):
L
liym27 已提交
659 660
            inputs['ShapeTensorList'] = _get_shape_tensor(shape)

Y
Yu Yang 已提交
661
    if out is None:
X
Xin Pan 已提交
662
        out = helper.create_variable_for_type_inference(dtype=dtype)
663
    else:
664 665 666 667 668
        check_dtype(
            dtype, 'create data type',
            convert_dtype(out.dtype), 'fill_constant',
            '(The create data type in fill_constant must be the same with out data type.)'
        )
L
liym27 已提交
669
    attrs['dtype'] = out.dtype
Y
Yu Yang 已提交
670 671
    helper.append_op(
        type='fill_constant',
L
liym27 已提交
672
        inputs=inputs,
Y
Yu Yang 已提交
673
        outputs={'Out': [out]},
L
liym27 已提交
674
        attrs=attrs,
M
minqiyang 已提交
675
        stop_gradient=True)
Y
Yu Yang 已提交
676 677 678 679
    out.stop_gradient = True
    return out


Y
yuyang18 已提交
680
@templatedoc()
Y
Yu Yang 已提交
681 682 683 684 685
def fill_constant_batch_size_like(input,
                                  shape,
                                  dtype,
                                  value,
                                  input_dim_idx=0,
G
Guo Sheng 已提交
686 687
                                  output_dim_idx=0,
                                  force_cpu=False):
688
    """
T
tianshuo78520a 已提交
689
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
690 691 692 693
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
694 695

    Args:
W
wangchaochaohu 已提交
696 697 698 699 700 701 702 703 704 705 706
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
        value(float|int): The constant value used to initialize the Tensor to be created. 
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
707
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
708 709

    Returns:
W
wangchaochaohu 已提交
710
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
711 712 713 714 715

    Examples:

        .. code-block:: python

716
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
717
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
718
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
719
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
720

721
    """
Y
Yu Yang 已提交
722
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
723
    out = helper.create_variable_for_type_inference(dtype=dtype)
724 725 726 727 728 729
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
730
        'force_cpu': force_cpu
731 732 733 734 735
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
Y
Yu Yang 已提交
736 737 738 739
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
740
        attrs=attrs)
Y
Yu Yang 已提交
741 742 743 744
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
745 746 747 748
def argmin(x, axis=0):
    """
    **argmin**

749 750
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
751 752

    Args:
753 754 755 756 757
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
758

S
sneaxiy 已提交
759
    Returns:
760
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
761

S
sneaxiy 已提交
762 763
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
764

765
            import paddle.fluid as fluid
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
793 794
    """
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
795
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
796 797 798 799 800
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
801
    out.stop_gradient = True
S
sneaxiy 已提交
802 803 804 805 806 807 808
    return out


def argmax(x, axis=0):
    """
    **argmax**

809 810
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
811 812

    Args:
813 814 815 816 817
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
818

S
sneaxiy 已提交
819
    Returns:
820
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
821

S
sneaxiy 已提交
822 823
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
824

825
            import paddle.fluid as fluid
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
853 854
    """
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
855
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
856 857 858 859 860
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
861
    out.stop_gradient = True
S
sneaxiy 已提交
862 863 864
    return out


865
def argsort(input, axis=-1, descending=False, name=None):
Y
Yibing Liu 已提交
866
    """
867 868 869
    This OP sorts the input along the given axis, and returns sorted output
    data Varibale and its corresponding index Variable with the same shape as
    :attr:`input`.
Y
Yibing Liu 已提交
870 871

    Args:
872 873 874 875 876
        input(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
877 878 879
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
880 881 882
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
Y
Yibing Liu 已提交
883 884

    Returns:
885 886 887
        tuple: A tuple of sorted data Variable(with the same shape and data
        type as input) and the sorted indices(with the same shape as input's
        and with data type int64).
Y
Yibing Liu 已提交
888 889 890 891

    Examples:
        .. code-block:: python

892
            import paddle.fluid as fluid
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]]).astype(np.float32)
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argsort(input=x, axis=-1)
                out2 = fluid.layers.argsort(input=x, axis=0)
                out3 = fluid.layers.argsort(input=x, axis=1)
                print(out1[0].numpy())
                # [[[5. 5. 8. 9.]
                #   [0. 0. 1. 7.]
                #   [2. 4. 6. 9.]]
                #  [[2. 2. 4. 5.]
                #   [4. 7. 7. 9.]
                #   [0. 1. 6. 7.]]]
                print(out1[1].numpy())
                # [[[0 3 1 2]
                #   [0 1 2 3]
                #   [2 3 0 1]]
                #  [[1 3 2 0]
                #   [0 1 2 3]
                #   [2 0 3 1]]]
                print(out2[0].numpy())
                # [[[5. 2. 4. 2.]
                #   [0. 0. 1. 7.]
                #   [1. 7. 0. 4.]]
                #  [[5. 8. 9. 5.]
                #   [4. 7. 7. 9.]
                #   [6. 9. 2. 6.]]]
                print(out3[0].numpy())
                # [[[0. 0. 1. 4.]
                #   [5. 8. 2. 5.]
                #   [6. 9. 9. 7.]]
                #  [[1. 2. 0. 2.]
                #   [4. 7. 4. 6.]
                #   [5. 7. 7. 9.]]]
Y
Yibing Liu 已提交
934 935
    """
    helper = LayerHelper("argsort", **locals())
X
Xin Pan 已提交
936 937 938 939
    out = helper.create_variable_for_type_inference(
        dtype=input.dtype, stop_gradient=True)
    ids = helper.create_variable_for_type_inference(
        VarDesc.VarType.INT64, stop_gradient=True)
Y
Yibing Liu 已提交
940 941 942 943
    helper.append_op(
        type='argsort',
        inputs={'X': input},
        outputs={'Out': out,
944
                 'Indices': ids},
945 946
        attrs={'axis': axis,
               'descending': descending})
Y
Yibing Liu 已提交
947 948 949
    return out, ids


Y
Yang Yu 已提交
950
def ones(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
951
    """
952 953
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
954

955 956 957 958 959 960 961
    Parameters:
        shape (tuple|list): Shape of output tensor.
        dtype (np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
            bool, float16, float32, float64, int32 and int64.
        force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
            If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
            Default: False.
962 963

    Returns:
964
        Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
965 966 967 968

    Examples:
        .. code-block:: python

969
          import paddle.fluid as fluid
970
          data = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
Y
Yu Yang 已提交
971
    """
C
chengduozh 已提交
972 973 974 975
    assert isinstance(shape, list) or isinstance(
        shape, tuple), "The shape's type should be list or tuple."
    assert reduce(lambda x, y: x * y,
                  shape) > 0, "The shape is invalid: %s." % (str(shape))
Y
Yu Yang 已提交
976 977 978
    return fill_constant(value=1.0, **locals())


Y
Yang Yu 已提交
979
def zeros(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
980
    """
981 982
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
983

984 985 986 987 988 989 990
    Parameters:
        shape (tuple|list): Shape of output tensor.
        dtype (np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
            bool, float16, float32, float64, int32 and int64.
        force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
            If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
            Default: False.
991 992

    Returns:
993
        Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
994 995 996 997

    Examples:
        .. code-block:: python

998
          import paddle.fluid as fluid
999
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
Y
Yu Yang 已提交
1000
    """
1001 1002 1003
    check_dtype(dtype, 'create data type',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'zeros')
Y
Yu Yang 已提交
1004
    return fill_constant(value=0.0, **locals())
1005 1006


F
fengjiayi 已提交
1007 1008
def reverse(x, axis):
    """
1009
    The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
F
fengjiayi 已提交
1010

1011 1012 1013 1014 1015
    Parameters:
        x (Variable): A tensor to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
        axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
            in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
            will be apply on each axis in the tuple or list.
F
fengjiayi 已提交
1016 1017

    Returns:
1018
        Variable: The reversed tensor with the same shape and data type as :attr:`x`.
F
fengjiayi 已提交
1019 1020 1021 1022

    Examples:
        .. code-block:: python

1023
          import paddle.fluid as fluid
1024 1025 1026 1027
          import numpy as np
          data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
          result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
          result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
F
fengjiayi 已提交
1028 1029 1030 1031
    """
    if isinstance(axis, int):
        axis = [axis]
    helper = LayerHelper("reverse", **locals())
X
Xin Pan 已提交
1032
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
F
fengjiayi 已提交
1033 1034
    helper.append_op(
        type='reverse',
W
Wu Yi 已提交
1035
        inputs={'X': x},
F
fengjiayi 已提交
1036 1037 1038 1039 1040
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


1041 1042 1043 1044 1045 1046 1047
def save(x, file_path, overwrite=True):
    """
    Saves a variable as a file.

    Args:
        x(variable): The Tensor/LoDTensor to be saved.
        file_path(str): The file path where the variable will be saved.
1048 1049 1050
        overwrite(bool): Whether or not cover the given file when it has already
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
    """
    helper = LayerHelper("save", **locals())
    helper.append_op(
        type="save",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def save_combine(x, file_path, overwrite=True):
    """
    Saves a list of variables into a single file.

    Args:
1066 1067
        x(list): A list of Tensor/LoDTensor variables to be saved together in
                 a single file.
1068
        file_path(str): The file path where variables will be saved.
1069
        overwrite(bool): Whether or not cover the given file when it has already
1070 1071
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1072 1073 1074 1075 1076 1077 1078 1079

    Returns:
        There is no return value.

    Examples:

        .. code-block:: python

1080
            import paddle.fluid as fluid
1081 1082 1083 1084 1085 1086 1087
            v1 = fluid.layers.data(name="data",
                                   shape=(4, 6),
                                   dtype="float32")
            v2 = fluid.layers.data(name="data",
                                   shape=(6, 8, 4),
                                   dtype="float32")
            normed = fluid.layers.save_combine([v1, v2], file_path="output")
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
    """
    helper = LayerHelper("save_combine", **locals())
    helper.append_op(
        type="save_combine",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def load_combine(out, file_path):
    """
T
tianshuo78520a 已提交
1100
    Loads a list of variable from a single file.
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111

    Args:
        out(list): The list of variables to be read from the disk file.
        file_path(str): The path of the disk file.
    """
    helper = LayerHelper("load_combine", **locals())
    helper.append_op(
        type="load_combine",
        inputs={},
        output={"Out": out},
        args={"file_path": file_path})
1112 1113 1114 1115 1116 1117 1118


def has_inf(x):
    """
    Test if any of x contains an infinity number

    Args:
L
liu zhengxi 已提交
1119
       x (Variable): The Tensor/LoDTensor to be checked.
1120 1121

    Returns:
L
liu zhengxi 已提交
1122
       Variable: The tensor variable storing the output, only a bool value, indicating that whether there is infinity number in x or not.
1123 1124 1125 1126 1127 1128 1129 1130
    
    Examples:
        .. code-block:: python
          
          import paddle.fluid as fluid
          data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
          res = fluid.layers.has_inf(data)

1131 1132
    """
    helper = LayerHelper("isinf", **locals())
X
Xin Pan 已提交
1133
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1134 1135 1136 1137 1138 1139 1140 1141 1142
    helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
    return out


def has_nan(x):
    """
    Test if any of x contains a NAN

    Args:
L
liu zhengxi 已提交
1143
       x (Variable): The Tensor/LoDTensor to be checked.
1144 1145

    Returns:
L
liu zhengxi 已提交
1146
       Variable: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
1147 1148 1149 1150 1151 1152 1153 1154
    
    Examples:
        .. code-block:: python
    
          import paddle.fluid as fluid
          data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
          res = fluid.layers.has_nan(data)

1155 1156
    """
    helper = LayerHelper("isnan", **locals())
X
Xin Pan 已提交
1157
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
    helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
    return out


def isfinite(x):
    """
    Test if any of x contains an infinity/NAN number. If all the elements are finite,
    returns true, else false.

    Args:
       x(variable): The Tensor/LoDTensor to be checked.

    Returns:
        Variable: The tensor variable storing the output, contains a bool value.
1172 1173 1174 1175 1176

    Examples:

        .. code-block:: python

1177
            import paddle.fluid as fluid
1178 1179 1180
            var = fluid.layers.data(name="data",
                                    shape=(4, 6),
                                    dtype="float32")
石晓伟 已提交
1181
            out = fluid.layers.isfinite(var)
1182 1183
    """
    helper = LayerHelper("isfinite", **locals())
X
Xin Pan 已提交
1184
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1185 1186
    helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
    return out
W
whs 已提交
1187 1188 1189 1190 1191 1192 1193 1194 1195


def range(start, end, step, dtype):
    """
    Return evenly spaced values within a given interval.

    Values are generated within the half-open interval [start, stop) (in other words,
    the interval including start but excluding stop).

L
Liufang Sang 已提交
1196 1197 1198 1199
    Parameters:
        start(float32 | float64 | int32 | int64 | Variable): Start of interval. The interval includes this value.
            when start is Variable, it is a 1-D Tensor with shape [1].
        end(float32 | float64 | int32 | int64 | Variable): End of interval. The interval does not include this
W
whs 已提交
1200
                                 value, except in some cases where step is not an integer
L
Liufang Sang 已提交
1201 1202 1203
                                 and floating point round-off affects the length of out. When end is Variable,
                                 it is a 1-D Tensor with shape [1].
        step(float32 | float64 | int32 | int64 | Variable): Spacing between values. For any output out, this is the
W
whs 已提交
1204
                                  distance between two adjacent values, out[i+1] - out[i].
1205
        dtype(str|core.VarDesc.VarType): the data type of the output tensor, can be float32, float64, int32, int64.
W
whs 已提交
1206

L
Liufang Sang 已提交
1207 1208 1209
    Returns: a 1-D Tensor which is evenly spaced values within a given interval. Its data type is set by dtype.
    
    Return type: Variable
W
whs 已提交
1210 1211 1212 1213 1214

    examples:

        .. code-block:: python

1215
             import paddle.fluid as fluid
W
whs 已提交
1216 1217 1218 1219 1220
             data = fluid.layers.range(0, 10, 2, 'int32')

    """
    helper = LayerHelper("range", **locals())

1221 1222 1223 1224
    check_dtype(dtype, 'create data type',
                ['float32', 'float64', 'int32', 'int64'], 'range')

    dtype = convert_dtype(dtype)
W
whs 已提交
1225 1226
    if not isinstance(start, Variable):
        start = fill_constant([1], dtype, start)
1227 1228 1229 1230 1231
    elif convert_dtype(start.dtype) != dtype:
        # make sure that start, end, step has the same dtype as
        # `dtype`
        start = cast(x=start, dtype=dtype)

W
whs 已提交
1232 1233
    if not isinstance(end, Variable):
        end = fill_constant([1], dtype, end)
1234 1235 1236
    elif convert_dtype(end.dtype) != dtype:
        end = cast(x=end, dtype=dtype)

W
whs 已提交
1237 1238
    if not isinstance(step, Variable):
        step = fill_constant([1], dtype, step)
1239 1240
    elif convert_dtype(step.dtype) != dtype:
        step = cast(x=step, dtype=dtype)
W
whs 已提交
1241 1242 1243 1244 1245 1246 1247 1248 1249

    out = helper.create_variable_for_type_inference(dtype=start.dtype)

    helper.append_op(
        type='range',
        inputs={'Start': start,
                'End': end,
                'Step': step},
        outputs={'Out': [out]})
1250
    out.stop_gradient = True
W
whs 已提交
1251
    return out
Z
zhoukunsheng 已提交
1252 1253


Z
zhoukunsheng 已提交
1254 1255
def linspace(start, stop, num, dtype):
    """
1256
    This OP return fixed number of evenly spaced values within a given interval.
Z
zhoukunsheng 已提交
1257 1258

    Args:
1259 1260 1261 1262 1263 1264 1265
        start(float|Variable): The input :attr:`start` is start variable of range. It is a float scalar, \
            or a tensor of shape [1] with input data type float32, float64.
        stop(float|Variable): The input :attr:`stop` is start variable of range. It is a float scalar, \
            or a tensor of shape [1] with input data type float32, float64.
        num(int|Variable): The input :attr:`num` is given num of the sequence. It is an int scalar, \
            or a tensor of shape [1] with type int32.
        dtype(string): The data type of output tensor, it could be 'float32' and 'float64'.
Z
zhoukunsheng 已提交
1266 1267

    Returns:
1268 1269 1270
        Variable, the output data type will be float32, float64.: The 1-D tensor with fixed number of evenly spaced values, \
        the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
        the value with input :attr:`start`. 
Z
zhoukunsheng 已提交
1271

Z
zhoukunsheng 已提交
1272
    Examples:
Z
zhoukunsheng 已提交
1273 1274
        .. code-block:: python

1275
             import paddle.fluid as fluid
Z
zhoukunsheng 已提交
1276 1277
             data = fluid.layers.linspace(0, 10, 5, 'float32') # [0.0,  2.5,  5.0,  7.5, 10.0]
             data = fluid.layers.linspace(0, 10, 1, 'float32') # [0.0]
Z
zhoukunsheng 已提交
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297

    """
    helper = LayerHelper("linspace", **locals())

    if not isinstance(start, Variable):
        start = fill_constant([1], dtype, start)
    if not isinstance(stop, Variable):
        stop = fill_constant([1], dtype, stop)
    if not isinstance(num, Variable):
        num = fill_constant([1], 'int32', num)

    out = helper.create_variable_for_type_inference(dtype=start.dtype)

    helper.append_op(
        type='linspace',
        inputs={'Start': start,
                'Stop': stop,
                'Num': num},
        outputs={'Out': [out]})
    return out
1298 1299


Z
zhoukunsheng 已提交
1300 1301
def zeros_like(x, out=None):
    """
1302
    This OP creates a zeros tensor which has identical shape and dtype 
Z
zhoukunsheng 已提交
1303 1304 1305
    with `x`.

    Args:
1306 1307 1308
        x(Variable): The input tensor which specifies shape and dtype, the input data dtype could be bool, float32, float64, int32, int64.
        out(Variable, optional): If is :attr:`None` , the op will create the variable as output, the data type and shape of \
            this variable will be same as input :attr:`x`. If is a tensor, the data type and shape need to be same as input :attr:`x`. 
T
tianshuo78520a 已提交
1309
            The default value is :attr:`None` .
Z
zhoukunsheng 已提交
1310 1311

    Returns:
1312 1313
        Variable: The N-D tensor, the element in tensor is related to input data type, if the input data type is bool, \
            the output value is False, otherwise is zero. The output shape is the same as the input.
Z
zhoukunsheng 已提交
1314 1315 1316 1317

    Examples:
        .. code-block:: python

1318
          import paddle.fluid as fluid
1319
          x = fluid.data(name='x', dtype='float32', shape=[3])
Z
zhoukunsheng 已提交
1320 1321
          data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]

Z
zhoukunsheng 已提交
1322 1323 1324 1325 1326 1327 1328 1329 1330
    """

    helper = LayerHelper("zeros_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]})
    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1331 1332 1333 1334


def diag(diagonal):
    """
1335
    This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Z
zhoukunsheng 已提交
1336 1337

    Args:
1338 1339
        diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
            specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Z
zhoukunsheng 已提交
1340 1341

    Returns:
1342 1343
        Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
            the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Z
zhoukunsheng 已提交
1344 1345 1346 1347 1348 1349 1350

    Examples:
        .. code-block:: python

          # [[3, 0, 0]
          #  [0, 4, 0]
          #  [0, 0, 5] 
1351 1352 1353

          import paddle.fluid as fluid
          import numpy as np
1354 1355 1356
          diagonal = np.arange(3, 6, dtype='int32')
          data = fluid.layers.diag(diagonal)
          # diagonal.shape=(3,) data.shape=(3, 3)
Z
zhoukunsheng 已提交
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371

    """

    helper = LayerHelper("diag", **locals())

    if not isinstance(diagonal, Variable):
        diagonal = assign(diagonal)

    out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)

    helper.append_op(
        type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]})

    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1372 1373


1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
def eye(num_rows, num_columns=None, batch_shape=None, dtype='float32'):
    """
    **eye**

    This function constructs an identity tensor, or a batch of tensor.

    Args:
        num_rows(int): the number of rows in each batch tensor.
        num_columns(int): the number of columns in each batch tensor.
                          If None, default: num_rows.
        batch_shape(list(int)): If provided, the returned tensor will have a leading
                                batch size of this shape.
1386 1387
        dtype(string): The data type of the returned tensor.
                       It should be int32, int64, float16, float32, float64.
1388 1389

    Returns:
1390
        Variable: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
1391 1392 1393 1394 1395

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
1396 1397
          data = fluid.layers.eye(3, dtype='int32')
          # [[1, 0, 0]
1398
          #  [0, 1, 0]
1399 1400
          #  [0, 0, 1]]

1401
          data = fluid.layers.eye(2, 3, dtype='int32')
1402
          # [[1, 0, 0]
1403
          #  [0, 1, 0]]
1404 1405

          data = fluid.layers.eye(2, batch_shape=[3])
1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
          # Construct a batch of 3 identity tensors, each 2 x 2.
          # data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.

    """

    helper = LayerHelper("eye", **locals())
    if not isinstance(num_rows, int) or num_rows < 0:
        raise TypeError("num_rows should be a non-negative int")
    if num_columns is not None:
        if not isinstance(num_columns, int) or num_columns < 0:
            raise TypeError("num_columns should be a non-negative int")
    else:
        num_columns = num_rows
    out = helper.create_variable_for_type_inference(dtype=dtype)
    c_dtype = convert_np_dtype_to_dtype_(dtype)
    helper.append_op(
        type='eye',
        inputs={},
        outputs={'Out': [out]},
        attrs={
            'num_rows': num_rows,
            'num_columns': num_columns,
            'dtype': c_dtype
        },
        stop_gradient=True)
    out.stop_gradient = True

    if batch_shape is not None:
        if not isinstance(batch_shape, list):
            raise TypeError("batch_shape should be a list")
        from .nn import stack
        for batch_val in reversed(batch_shape):
            if batch_val <= 0:
                raise TypeError("batch_shape should be a positive int list")
            else:
                stack_vars = [out for _ in numpy.arange(batch_val)]
                out = stack(stack_vars, axis=0)
    return out


Z
zhoukunsheng 已提交
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
def ones_like(x, out=None):
    """
    **ones_like**

    This function creates a ones tensor which has identical shape and dtype 
    with `x`.

    Args:
        x(Variable): The input tensor which specifies shape and dtype.
        out(Variable): The output tensor.

    Returns:
1458
        out(Variable): The tensor variable storing the output.
Z
zhoukunsheng 已提交
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid

          x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
          data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]

    """

    helper = LayerHelper("ones_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type='fill_any_like',
        inputs={'X': [x]},
        attrs={'value': 1.0},
        outputs={'Out': [out]})
    return out