tensor.py 53.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16
from six.moves import reduce
Y
Yu Yang 已提交
17
from ..layer_helper import LayerHelper
18
from ..param_attr import ParamAttr
19
from ..framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varbase_creator
X
xuwei06 已提交
20
from ..framework import Variable
21
from ..initializer import Constant
22
from ..core import VarDesc
23
from .. import core
24
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
25
from . import utils
26
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
X
xuwei06 已提交
27
import numpy
28
import warnings
Y
Yu Yang 已提交
29 30

__all__ = [
L
li099 已提交
31 32 33
    'create_tensor', 'create_parameter', 'create_global_var', 'cast',
    'tensor_array_to_tensor', 'concat', 'sums', 'assign',
    'fill_constant_batch_size_like', 'fill_constant', 'argmin', 'argmax',
Z
zhoukunsheng 已提交
34
    'argsort', 'ones', 'zeros', 'reverse', 'has_inf', 'has_nan', 'isfinite',
35
    'range', 'linspace', 'zeros_like', 'ones_like', 'diag', 'eye'
Y
Yu Yang 已提交
36 37 38
]


X
xuwei06 已提交
39
def create_tensor(dtype, name=None, persistable=False):
40
    """
W
wangchaochaohu 已提交
41
    Create a variable, which will hold a Tensor with data type dtype.
42 43

    Args:
W
wangchaochaohu 已提交
44 45 46 47
        dtype(string|numpy.dtype): the data type of Tensor to be created, the
            data type is bool, float16, float32, float64, int8, int16, int32 and int64.
        name(string, optional): The default value is None.  Normally there is no need for 
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
Q
update  
qiaolongfei 已提交
48
        persistable(bool): Set the persistable flag of the create tensor.
W
wangchaochaohu 已提交
49
            default value is False.
50 51

    Returns:
W
wangchaochaohu 已提交
52
        Variable: The tensor to be created according to dtype.
53 54 55 56

    Examples:
        .. code-block:: python

57
          import paddle.fluid as fluid
58 59
          tensor = fluid.layers.create_tensor(dtype='float32')
    """
60 61 62 63
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int32',
        'int64'
    ], 'create_tensor')
Y
Yu Yang 已提交
64
    helper = LayerHelper("create_tensor", **locals())
X
xuwei06 已提交
65 66
    return helper.create_variable(
        name=helper.name, dtype=dtype, persistable=persistable)
Y
Yu Yang 已提交
67 68


69 70
def create_parameter(shape,
                     dtype,
X
xuwei06 已提交
71
                     name=None,
72 73 74 75
                     attr=None,
                     is_bias=False,
                     default_initializer=None):
    """
76
    This function creates a parameter. The parameter is a learnable variable, which can have
Y
yuyang18 已提交
77 78 79 80 81
    gradient, and can be optimized.

    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.

82 83 84 85 86 87 88
    Parameters:
        shape (list of int): Shape of the parameter
        dtype (str): Data type of the parameter
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
        attr (ParamAttr, optional): Attributes of the parameter
        is_bias (bool, optional): This can affect which default initializer is chosen
89 90 91
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
92
        default_initializer (Initializer, optional): Initializer for the parameter
93 94

    Returns:
95
        The created parameter.
Y
yuyang18 已提交
96 97

    Examples:
98 99
        .. code-block:: python

100
            import paddle.fluid as fluid
101 102
            import paddle.fluid.layers as layers
            W = layers.create_parameter(shape=[784, 200], dtype='float32')
103
    """
Q
Qiao Longfei 已提交
104
    helper = LayerHelper("create_parameter", **locals())
105
    if attr is None:
X
xuwei06 已提交
106
        attr = ParamAttr(name=name)
107 108 109 110
    return helper.create_parameter(attr, shape, dtype, is_bias,
                                   default_initializer)


111 112 113 114 115 116 117
def create_global_var(shape,
                      value,
                      dtype,
                      persistable=False,
                      force_cpu=False,
                      name=None):
    """
118
    This function creates a new tensor variable with value in the global block(block 0).
F
fengjiayi 已提交
119

120 121 122
    Parameters:
        shape (list of int): Shape of the variable
        value (float): The value of the variable. The new created
F
fengjiayi 已提交
123
                      variable will be filled with it.
124 125
        dtype (str): Data type of the variable
        persistable (bool, optional): If this variable is persistable.
F
fengjiayi 已提交
126
                           Default: False
127
        force_cpu (bool, optional): Force this variable to be on CPU.
F
fengjiayi 已提交
128
                         Default: False
129 130
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
131 132

    Returns:
133
        Variable: The created Variable
F
fengjiayi 已提交
134 135 136 137

    Examples:
        .. code-block:: python

138
            import paddle.fluid as fluid
139 140
            import paddle.fluid.layers as layers
            var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32',
141
                                           persistable=True, force_cpu=True, name='new_var')
142
    """
Q
Qiao Longfei 已提交
143 144
    helper = LayerHelper("global_var", **locals())
    var = helper.create_global_variable(
M
minqiyang 已提交
145 146 147 148 149
        dtype=dtype,
        shape=shape,
        persistable=persistable,
        name=name,
        stop_gradient=True)
M
minqiyang 已提交
150 151 152
    helper.set_variable_initializer(
        var, initializer=Constant(
            value=float(value), force_cpu=force_cpu))
M
minqiyang 已提交
153

Q
Qiao Longfei 已提交
154 155 156
    return var


157
def cast(x, dtype):
Y
Yu Yang 已提交
158
    """
159 160 161
    This OP takes in the Variable :attr:`x` with :attr:`x.dtype` and casts it
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
162 163

    Args:
164 165 166 167
        x(Variable): An input N-D Tensor with data type bool, float16,
            float32, float64, int32, int64, uint8.
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output:
            bool, float15, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
168 169

    Returns:
170
        Variable: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
171 172 173

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
174

175
            import paddle.fluid as fluid
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
            import numpy as np

            place = fluid.core.CPUPlace()

            x_lod = fluid.data(name="x", shape=[2,2], lod_level=0)
            cast_res1 = fluid.layers.cast(x=x_lod, dtype="uint8")
            cast_res2 = fluid.layers.cast(x=x_lod, dtype=np.int32)

            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())

            x_i_lod = fluid.core.LoDTensor()
            x_i_lod.set(np.array([[1.3,-2.4],[0,4]]).astype("float32"), place)
            x_i_lod.set_recursive_sequence_lengths([[0,2]])
            res1 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res1], return_numpy=False)
            res2 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res2], return_numpy=False)
            print(np.array(res1[0]), np.array(res1[0]).dtype)
            # [[  1 254]
            #  [  0   4]] uint8
            print(np.array(res2[0]), np.array(res2[0]).dtype)
            # [[ 1 -2]
            #  [ 0  4]] int32
Y
Yu Yang 已提交
198
    """
199 200
    check_variable_and_dtype(
        x, 'x',
201 202
        ['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
        'cast')
203 204 205 206 207 208
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int64',
        'uint8'
    ], 'cast')

    helper = LayerHelper('cast', **locals())
X
Xin Pan 已提交
209
    out = helper.create_variable_for_type_inference(dtype=dtype)
Y
Yu Yang 已提交
210 211 212 213 214 215 216 217 218
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype,
               'out_dtype': out.dtype})
    return out


219
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
220
    """
221 222
    **Concat**

223
    This OP concatenates the input along the axis.
224 225

    Args:
226 227
        input(list): List of input Tensors with data type float32, float64, int32,
            int64.
228
        axis(int32|Variable, optional):  A scalar with type ``int32`` or a ``Tensor`` with shape [1] and type ``int32``. Axis to compute indices along. The effective range
229 230 231 232 233
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
234 235

    Returns:
236
        Variable: A Tensor with the same data type as input's.
237 238 239

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
240

241
            import paddle.fluid as fluid
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
            import numpy as np

            in1 = np.array([[1,2,3],
                            [4,5,6]])
            in2 = np.array([[11,12,13],
                            [14,15,16]])
            in3 = np.array([[21,22],
                            [23,24]])
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
                out1 = fluid.layers.concat(input=[x1,x2,x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1,x2], axis=0)
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
264
    """
265 266

    if in_dygraph_mode():
S
songyouwei 已提交
267 268 269 270 271
        if isinstance(axis, Variable):
            axis = axis.numpy()
            assert axis.shape == (
                1, ), "axis of type Variable should have shape [1]"
            axis = axis[0]
272
        return core.ops.concat(input, 'axis', axis)
273

274 275 276 277 278
    if not isinstance(input, list):
        warnings.warn(
            "The type of input in concat should be list, but received %s." %
            (type(input)))
        input = [input]
279
    for id, x in enumerate(input):
280 281
        check_variable_and_dtype(
            x, 'input[' + str(id) + ']',
282 283
            ['float16', 'float32', 'float64', 'int32', 'int64'], 'concat')
    check_type(axis, 'axis', (int, Variable), 'concat')
284

285
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
286
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
        assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \
                            "number of the elements must be 1, but received %s." % len(x)
        out_index = helper.create_variable_for_type_inference(dtype="int32")
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out],
                     'OutIndex': [out_index]},
            attrs={'axis': axis,
                   'use_stack': False})
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
            inputs['AxisTensor'] = axis
        else:
            attrs['axis'] = axis

        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
Y
Yu Yang 已提交
310 311 312
    return out


G
Guo Sheng 已提交
313
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
L
li099 已提交
314
    """
G
Guo Sheng 已提交
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
365 366

    Args:
G
Guo Sheng 已提交
367 368 369 370 371 372 373
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
374 375

    Returns:
G
Guo Sheng 已提交
376 377 378
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
379 380 381 382

    Examples:
        .. code-block:: python

383
            import paddle.fluid as fluid
384
            import numpy as np
G
Guo Sheng 已提交
385 386 387 388 389 390 391
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            array = fluid.layers.create_array(dtype='float32')
            fluid.layers.array_write(x0, i, array)
            fluid.layers.array_write(x1, i + 1, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
392
    """
393 394 395 396 397 398 399 400 401 402 403
    if in_dygraph_mode():
        assert isinstance(
            input, list), "The 'input' in tensor_array_to_tensor must be list"
        from .nn import stack, concat
        from ..dygraph import to_variable
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
            numpy.array(list(map(lambda x: int(x.shape[axis]), input))))
        return res, sizes

L
li099 已提交
404
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
405 406 407
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
    helper.append_op(
L
li099 已提交
408
        type='tensor_array_to_tensor',
L
li099 已提交
409 410 411
        inputs={'X': input},
        outputs={'Out': [out],
                 'OutIndex': [out_index]},
G
Guo Sheng 已提交
412 413
        attrs={'axis': axis,
               'use_stack': use_stack})
L
li099 已提交
414 415 416
    return out, out_index


417
def sums(input, out=None):
F
fengjiayi 已提交
418
    """
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
440 441

    Args:
442 443 444 445
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
446 447

    Returns:
448 449
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
450 451

    Examples:
F
fengjiayi 已提交
452
        .. code-block:: python
K
kavyasrinet 已提交
453

454 455 456 457 458 459 460 461 462
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
463

464 465
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
466 467 468
    """
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
469 470
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
T
tensor-tang 已提交
471 472 473 474 475
    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False})
Y
Yu Yang 已提交
476 477 478
    return out


F
fengjiayi 已提交
479
def assign(input, output=None):
480
    """
481
    The OP copies the :attr:`input` to the :attr:`output`.
482

483 484 485 486 487
    Parameters:
        input (Variable|numpy.ndarray): A tensor or numpy ndarray, its data type supports
            float32, float64, int32 and int64.
        output (Variable, optional): A tensor. If :attr:`output` is None, a new tensor will
            be created as :attr:`output`. Default: None.
488 489

    Returns:
490
        Variable: A tensor with the same shape, data type and value as :attr:`input`.
491 492 493

    Examples:
        .. code-block:: python
494

495
          import paddle.fluid as fluid
496 497 498 499 500 501
          import numpy as np
          data = fluid.layers.fill_constant(shape=[3, 2], value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result1 = fluid.layers.create_tensor(dtype='float64')
          fluid.layers.assign(data, result1) # result1 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result2 = fluid.layers.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = fluid.layers.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
502
    """
Y
Yu Yang 已提交
503
    helper = LayerHelper('assign', **locals())
504
    check_type(input, 'input', (Variable, numpy.ndarray), 'assign')
X
xuwei06 已提交
505
    if isinstance(input, Variable):
506 507 508
        check_dtype(input.dtype, 'input',
                    ['float32', 'float64', 'int32', 'int64', 'bool'], 'assign',
                    '(When the type of input in assign is Variable.)')
509 510 511
        if output is None:
            output = helper.create_variable_for_type_inference(
                dtype=input.dtype)
X
xuwei06 已提交
512
        helper.append_op(
R
robot 已提交
513
            type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
X
xuwei06 已提交
514 515
    elif isinstance(input, numpy.ndarray):
        dtype = convert_np_dtype_to_dtype_(input.dtype)
516
        if dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
517
            value_name = "fp32_values"
518
            values = [float(v) for v in input.flat]
519
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
520
            value_name = "int32_values"
521
            values = [int(v) for v in input.flat]
522 523 524
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
525
        else:
526 527
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
528
                "the data type of 'input' must be float32, int32 or int64, but "
529
                "received %s." % convert_dtype(dtype))
530 531 532
        if input.size > 1024 * 1024:
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
533 534 535
        if output is None:
            output = helper.create_variable_for_type_inference(
                dtype=input.dtype)
X
xuwei06 已提交
536 537 538 539 540 541
        helper.append_op(
            type='assign_value',
            outputs={'Out': [output]},
            attrs={
                'dtype': dtype,
                'shape': list(input.shape),
542
                value_name: values
X
xuwei06 已提交
543 544
            })

Y
Yu Yang 已提交
545 546 547
    return output


Q
QI JUN 已提交
548
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
Y
Yu Yang 已提交
549
    """
W
wangchaochaohu 已提交
550
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
551
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
552

T
tianshuo78520a 已提交
553
    The attribute `stop_gradient` of the created Tensor is set to True.
554 555

    Args:
556 557 558 559
        shape(list|tuple|Variable): Shape of the Tensor to be created.
                The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
                the elements of it should be integers or Tensors with shape [1].
                If ``shape`` is an Variable, it should be an 1-D Tensor .
W
wangchaochaohu 已提交
560 561
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can
            be float16, float32, float64, int32, int64.
W
wangchaochaohu 已提交
562 563 564
        value(float16|float32|float64|int32|int64|Variable): The constant value used to initialize 
            the Tensor to be created. If value is an Variable, it should be an 1-D Tensor.
        force_cpu(bool): data should be on CPU if it's true, default value is False.
W
wangchaochaohu 已提交
565 566 567
        out(Variable, optional): Optional output which can be any created 
            Variable that meets the requirements to store the result of operation.
            if out is None, a new Varibale will be create to store the result.
568 569

    Returns:
W
wangchaochaohu 已提交
570 571 572 573 574
        Variable: Tensor which is created according to shape and dtype.

    Raise:
        TypeError: The dtype must be one of bool, float16, float32, float64, int32 and int64
        and the data type of out Tensor must be the same as the dtype. 
575 576 577 578

    Examples:
        .. code-block:: python

579
          import paddle.fluid as fluid
580 581 582
          # attr shape is a list which doesn't contain Variable Tensor.
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
583
          # data1=[[5], [5]] data2=[[5], [5]]
584 585 586 587 588 589 590 591

          # attr shape is a list which contains Variable Tensor.
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5]

          # attr shape is an Variable Tensor.
          shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
W
wangchaochaohu 已提交
592 593 594 595
          
          # attr value is an Variable Tensor.
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
596
    """
W
wangchaochaohu 已提交
597 598 599 600
    inputs = {}
    attrs = {'force_cpu': force_cpu}
    if isinstance(value, Variable):
        inputs['ValueTensor'] = value
601
    else:
W
wangchaochaohu 已提交
602 603 604 605 606
        attrs['value'] = float(value)
        if convert_dtype(dtype) in ['int64', 'int32']:
            attrs['str_value'] = str(int(value))
        else:
            attrs['str_value'] = str(float(value))
607 608 609

    if in_dygraph_mode():
        if isinstance(shape, (list, tuple)):
S
songyouwei 已提交
610 611 612
            shape = list(
                map(lambda x: x.numpy()[0] if isinstance(x, Variable) else x,
                    shape))
613
        else:
S
songyouwei 已提交
614
            shape = list(shape.numpy().astype(int))
615 616
        if out is None:
            out = _varbase_creator(dtype=dtype)
W
wangchaochaohu 已提交
617 618 619 620 621 622 623

        if isinstance(value, Variable):
            if convert_dtype(dtype) in ['int64', 'int32']:
                attrs['str_value'] = str(int(value.numpy()))
            else:
                attrs['str_value'] = str(float(value.numpy()))

624 625
        core.ops.fill_constant(out, 'value',
                               float(value), 'force_cpu', force_cpu, 'dtype',
626 627
                               out.dtype, 'str_value', attrs['str_value'],
                               'shape', shape)
628 629 630
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
631
    helper = LayerHelper("fill_constant", **locals())
632 633 634 635
    check_dtype(dtype, 'create data type',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'fill_constant')
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
W
wangchaochaohu 已提交
636 637 638 639 640 641
    inputs = utils._get_shape_tensor_inputs(
        inputs=inputs,
        helper=helper,
        attrs=attrs,
        shape=shape,
        op_type='fill_constant')
L
liym27 已提交
642

Y
Yu Yang 已提交
643
    if out is None:
X
Xin Pan 已提交
644
        out = helper.create_variable_for_type_inference(dtype=dtype)
645
    else:
646 647 648 649 650
        check_dtype(
            dtype, 'create data type',
            convert_dtype(out.dtype), 'fill_constant',
            '(The create data type in fill_constant must be the same with out data type.)'
        )
L
liym27 已提交
651
    attrs['dtype'] = out.dtype
Y
Yu Yang 已提交
652 653
    helper.append_op(
        type='fill_constant',
L
liym27 已提交
654
        inputs=inputs,
Y
Yu Yang 已提交
655
        outputs={'Out': [out]},
L
liym27 已提交
656
        attrs=attrs,
M
minqiyang 已提交
657
        stop_gradient=True)
Y
Yu Yang 已提交
658 659 660 661
    out.stop_gradient = True
    return out


Y
yuyang18 已提交
662
@templatedoc()
Y
Yu Yang 已提交
663 664 665 666 667
def fill_constant_batch_size_like(input,
                                  shape,
                                  dtype,
                                  value,
                                  input_dim_idx=0,
G
Guo Sheng 已提交
668 669
                                  output_dim_idx=0,
                                  force_cpu=False):
670
    """
T
tianshuo78520a 已提交
671
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
672 673 674 675
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
676 677

    Args:
W
wangchaochaohu 已提交
678 679 680 681 682 683 684 685 686 687 688
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
        value(float|int): The constant value used to initialize the Tensor to be created. 
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
689
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
690 691

    Returns:
W
wangchaochaohu 已提交
692
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
693 694 695 696 697

    Examples:

        .. code-block:: python

698
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
699
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
700
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
701
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
702

703
    """
Y
Yu Yang 已提交
704
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
705
    out = helper.create_variable_for_type_inference(dtype=dtype)
706 707 708 709 710 711
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
712
        'force_cpu': force_cpu
713 714 715 716 717
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
Y
Yu Yang 已提交
718 719 720 721
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
722
        attrs=attrs)
Y
Yu Yang 已提交
723 724 725 726
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
727 728 729 730
def argmin(x, axis=0):
    """
    **argmin**

731 732
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
733 734

    Args:
735 736 737 738 739
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
740

S
sneaxiy 已提交
741
    Returns:
742
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
743

S
sneaxiy 已提交
744 745
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
746

747
            import paddle.fluid as fluid
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
775
    """
776 777 778
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin')
S
sneaxiy 已提交
779
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
780
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
781 782 783 784 785
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
786
    out.stop_gradient = True
S
sneaxiy 已提交
787 788 789 790 791 792 793
    return out


def argmax(x, axis=0):
    """
    **argmax**

794 795
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
796 797

    Args:
798 799 800 801 802
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
803

S
sneaxiy 已提交
804
    Returns:
805
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
806

S
sneaxiy 已提交
807 808
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
809

810
            import paddle.fluid as fluid
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
838
    """
839 840 841
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax')
S
sneaxiy 已提交
842
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
843
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
844 845 846 847 848
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
849
    out.stop_gradient = True
S
sneaxiy 已提交
850 851 852
    return out


853
def argsort(input, axis=-1, descending=False, name=None):
Y
Yibing Liu 已提交
854
    """
855 856 857
    This OP sorts the input along the given axis, and returns sorted output
    data Varibale and its corresponding index Variable with the same shape as
    :attr:`input`.
Y
Yibing Liu 已提交
858 859

    Args:
860 861 862 863 864
        input(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
865 866 867
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
868 869 870
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
Y
Yibing Liu 已提交
871 872

    Returns:
873 874 875
        tuple: A tuple of sorted data Variable(with the same shape and data
        type as input) and the sorted indices(with the same shape as input's
        and with data type int64).
Y
Yibing Liu 已提交
876 877 878 879

    Examples:
        .. code-block:: python

880
            import paddle.fluid as fluid
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]]).astype(np.float32)
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argsort(input=x, axis=-1)
                out2 = fluid.layers.argsort(input=x, axis=0)
                out3 = fluid.layers.argsort(input=x, axis=1)
                print(out1[0].numpy())
                # [[[5. 5. 8. 9.]
                #   [0. 0. 1. 7.]
                #   [2. 4. 6. 9.]]
                #  [[2. 2. 4. 5.]
                #   [4. 7. 7. 9.]
                #   [0. 1. 6. 7.]]]
                print(out1[1].numpy())
                # [[[0 3 1 2]
                #   [0 1 2 3]
                #   [2 3 0 1]]
                #  [[1 3 2 0]
                #   [0 1 2 3]
                #   [2 0 3 1]]]
                print(out2[0].numpy())
                # [[[5. 2. 4. 2.]
                #   [0. 0. 1. 7.]
                #   [1. 7. 0. 4.]]
                #  [[5. 8. 9. 5.]
                #   [4. 7. 7. 9.]
                #   [6. 9. 2. 6.]]]
                print(out3[0].numpy())
                # [[[0. 0. 1. 4.]
                #   [5. 8. 2. 5.]
                #   [6. 9. 9. 7.]]
                #  [[1. 2. 0. 2.]
                #   [4. 7. 4. 6.]
                #   [5. 7. 7. 9.]]]
Y
Yibing Liu 已提交
922
    """
923 924 925
    check_variable_and_dtype(
        input, 'input',
        ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort')
Y
Yibing Liu 已提交
926
    helper = LayerHelper("argsort", **locals())
X
Xin Pan 已提交
927 928 929 930
    out = helper.create_variable_for_type_inference(
        dtype=input.dtype, stop_gradient=True)
    ids = helper.create_variable_for_type_inference(
        VarDesc.VarType.INT64, stop_gradient=True)
Y
Yibing Liu 已提交
931 932 933 934
    helper.append_op(
        type='argsort',
        inputs={'X': input},
        outputs={'Out': out,
935
                 'Indices': ids},
936 937
        attrs={'axis': axis,
               'descending': descending})
Y
Yibing Liu 已提交
938 939 940
    return out, ids


Y
Yang Yu 已提交
941
def ones(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
942
    """
943 944
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
945

946 947 948 949 950 951 952
    Parameters:
        shape (tuple|list): Shape of output tensor.
        dtype (np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
            bool, float16, float32, float64, int32 and int64.
        force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
            If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
            Default: False.
953 954

    Returns:
955
        Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
956 957 958 959

    Examples:
        .. code-block:: python

960
          import paddle.fluid as fluid
961
          data = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
Y
Yu Yang 已提交
962
    """
C
chengduozh 已提交
963 964 965 966
    assert isinstance(shape, list) or isinstance(
        shape, tuple), "The shape's type should be list or tuple."
    assert reduce(lambda x, y: x * y,
                  shape) > 0, "The shape is invalid: %s." % (str(shape))
Y
Yu Yang 已提交
967 968 969
    return fill_constant(value=1.0, **locals())


Y
Yang Yu 已提交
970
def zeros(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
971
    """
972 973
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
974

975 976 977 978 979 980 981
    Parameters:
        shape (tuple|list): Shape of output tensor.
        dtype (np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
            bool, float16, float32, float64, int32 and int64.
        force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
            If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
            Default: False.
982 983

    Returns:
984
        Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
985 986 987 988

    Examples:
        .. code-block:: python

989
          import paddle.fluid as fluid
990
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
Y
Yu Yang 已提交
991
    """
992 993 994
    check_dtype(dtype, 'create data type',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'zeros')
Y
Yu Yang 已提交
995
    return fill_constant(value=0.0, **locals())
996 997


F
fengjiayi 已提交
998 999
def reverse(x, axis):
    """
1000
    The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
F
fengjiayi 已提交
1001

1002 1003 1004 1005 1006
    Parameters:
        x (Variable): A tensor to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
        axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
            in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
            will be apply on each axis in the tuple or list.
F
fengjiayi 已提交
1007 1008

    Returns:
1009
        Variable: The reversed tensor with the same shape and data type as :attr:`x`.
F
fengjiayi 已提交
1010 1011 1012 1013

    Examples:
        .. code-block:: python

1014
          import paddle.fluid as fluid
1015 1016 1017 1018
          import numpy as np
          data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
          result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
          result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
F
fengjiayi 已提交
1019 1020 1021 1022
    """
    if isinstance(axis, int):
        axis = [axis]
    helper = LayerHelper("reverse", **locals())
X
Xin Pan 已提交
1023
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
F
fengjiayi 已提交
1024 1025
    helper.append_op(
        type='reverse',
W
Wu Yi 已提交
1026
        inputs={'X': x},
F
fengjiayi 已提交
1027 1028 1029 1030 1031
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


1032 1033 1034 1035 1036 1037 1038
def save(x, file_path, overwrite=True):
    """
    Saves a variable as a file.

    Args:
        x(variable): The Tensor/LoDTensor to be saved.
        file_path(str): The file path where the variable will be saved.
1039 1040 1041
        overwrite(bool): Whether or not cover the given file when it has already
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
    """
    helper = LayerHelper("save", **locals())
    helper.append_op(
        type="save",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def save_combine(x, file_path, overwrite=True):
    """
    Saves a list of variables into a single file.

    Args:
1057 1058
        x(list): A list of Tensor/LoDTensor variables to be saved together in
                 a single file.
1059
        file_path(str): The file path where variables will be saved.
1060
        overwrite(bool): Whether or not cover the given file when it has already
1061 1062
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1063 1064 1065 1066 1067 1068 1069 1070

    Returns:
        There is no return value.

    Examples:

        .. code-block:: python

1071
            import paddle.fluid as fluid
1072 1073 1074 1075 1076 1077 1078
            v1 = fluid.layers.data(name="data",
                                   shape=(4, 6),
                                   dtype="float32")
            v2 = fluid.layers.data(name="data",
                                   shape=(6, 8, 4),
                                   dtype="float32")
            normed = fluid.layers.save_combine([v1, v2], file_path="output")
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
    """
    helper = LayerHelper("save_combine", **locals())
    helper.append_op(
        type="save_combine",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def load_combine(out, file_path):
    """
T
tianshuo78520a 已提交
1091
    Loads a list of variable from a single file.
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102

    Args:
        out(list): The list of variables to be read from the disk file.
        file_path(str): The path of the disk file.
    """
    helper = LayerHelper("load_combine", **locals())
    helper.append_op(
        type="load_combine",
        inputs={},
        output={"Out": out},
        args={"file_path": file_path})
1103 1104 1105 1106 1107 1108 1109


def has_inf(x):
    """
    Test if any of x contains an infinity number

    Args:
L
liu zhengxi 已提交
1110
       x (Variable): The Tensor/LoDTensor to be checked.
1111 1112

    Returns:
L
liu zhengxi 已提交
1113
       Variable: The tensor variable storing the output, only a bool value, indicating that whether there is infinity number in x or not.
1114 1115 1116 1117 1118 1119 1120 1121
    
    Examples:
        .. code-block:: python
          
          import paddle.fluid as fluid
          data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
          res = fluid.layers.has_inf(data)

1122
    """
1123
    # check_type(x, 'x', (Variable), 'has_inf')
1124
    helper = LayerHelper("isinf", **locals())
X
Xin Pan 已提交
1125
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1126 1127 1128 1129 1130 1131 1132 1133 1134
    helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
    return out


def has_nan(x):
    """
    Test if any of x contains a NAN

    Args:
L
liu zhengxi 已提交
1135
       x (Variable): The Tensor/LoDTensor to be checked.
1136 1137

    Returns:
L
liu zhengxi 已提交
1138
       Variable: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
1139 1140 1141 1142 1143 1144 1145 1146
    
    Examples:
        .. code-block:: python
    
          import paddle.fluid as fluid
          data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
          res = fluid.layers.has_nan(data)

1147
    """
1148
    # check_type(x, 'x', (Variable), 'has_nan')
1149
    helper = LayerHelper("isnan", **locals())
X
Xin Pan 已提交
1150
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
    helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
    return out


def isfinite(x):
    """
    Test if any of x contains an infinity/NAN number. If all the elements are finite,
    returns true, else false.

    Args:
       x(variable): The Tensor/LoDTensor to be checked.

    Returns:
        Variable: The tensor variable storing the output, contains a bool value.
1165 1166 1167 1168 1169

    Examples:

        .. code-block:: python

1170
            import paddle.fluid as fluid
1171 1172 1173
            var = fluid.layers.data(name="data",
                                    shape=(4, 6),
                                    dtype="float32")
石晓伟 已提交
1174
            out = fluid.layers.isfinite(var)
1175 1176
    """
    helper = LayerHelper("isfinite", **locals())
1177
    out = helper.create_variable_for_type_inference(dtype='bool')
1178 1179
    helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
    return out
W
whs 已提交
1180 1181 1182 1183 1184 1185 1186 1187 1188


def range(start, end, step, dtype):
    """
    Return evenly spaced values within a given interval.

    Values are generated within the half-open interval [start, stop) (in other words,
    the interval including start but excluding stop).

L
Liufang Sang 已提交
1189 1190 1191 1192
    Parameters:
        start(float32 | float64 | int32 | int64 | Variable): Start of interval. The interval includes this value.
            when start is Variable, it is a 1-D Tensor with shape [1].
        end(float32 | float64 | int32 | int64 | Variable): End of interval. The interval does not include this
W
whs 已提交
1193
                                 value, except in some cases where step is not an integer
L
Liufang Sang 已提交
1194 1195 1196
                                 and floating point round-off affects the length of out. When end is Variable,
                                 it is a 1-D Tensor with shape [1].
        step(float32 | float64 | int32 | int64 | Variable): Spacing between values. For any output out, this is the
W
whs 已提交
1197
                                  distance between two adjacent values, out[i+1] - out[i].
1198
        dtype(str|core.VarDesc.VarType): the data type of the output tensor, can be float32, float64, int32, int64.
W
whs 已提交
1199

L
Liufang Sang 已提交
1200 1201 1202
    Returns: a 1-D Tensor which is evenly spaced values within a given interval. Its data type is set by dtype.
    
    Return type: Variable
W
whs 已提交
1203 1204 1205 1206 1207

    examples:

        .. code-block:: python

1208
             import paddle.fluid as fluid
W
whs 已提交
1209 1210 1211 1212 1213
             data = fluid.layers.range(0, 10, 2, 'int32')

    """
    helper = LayerHelper("range", **locals())

1214 1215 1216 1217
    check_dtype(dtype, 'create data type',
                ['float32', 'float64', 'int32', 'int64'], 'range')

    dtype = convert_dtype(dtype)
W
whs 已提交
1218 1219
    if not isinstance(start, Variable):
        start = fill_constant([1], dtype, start)
1220 1221 1222 1223 1224
    elif convert_dtype(start.dtype) != dtype:
        # make sure that start, end, step has the same dtype as
        # `dtype`
        start = cast(x=start, dtype=dtype)

W
whs 已提交
1225 1226
    if not isinstance(end, Variable):
        end = fill_constant([1], dtype, end)
1227 1228 1229
    elif convert_dtype(end.dtype) != dtype:
        end = cast(x=end, dtype=dtype)

W
whs 已提交
1230 1231
    if not isinstance(step, Variable):
        step = fill_constant([1], dtype, step)
1232 1233
    elif convert_dtype(step.dtype) != dtype:
        step = cast(x=step, dtype=dtype)
W
whs 已提交
1234 1235 1236 1237 1238 1239 1240 1241 1242

    out = helper.create_variable_for_type_inference(dtype=start.dtype)

    helper.append_op(
        type='range',
        inputs={'Start': start,
                'End': end,
                'Step': step},
        outputs={'Out': [out]})
1243
    out.stop_gradient = True
W
whs 已提交
1244
    return out
Z
zhoukunsheng 已提交
1245 1246


Z
zhoukunsheng 已提交
1247 1248
def linspace(start, stop, num, dtype):
    """
1249
    This OP return fixed number of evenly spaced values within a given interval.
Z
zhoukunsheng 已提交
1250 1251

    Args:
1252 1253 1254 1255 1256 1257 1258
        start(float|Variable): The input :attr:`start` is start variable of range. It is a float scalar, \
            or a tensor of shape [1] with input data type float32, float64.
        stop(float|Variable): The input :attr:`stop` is start variable of range. It is a float scalar, \
            or a tensor of shape [1] with input data type float32, float64.
        num(int|Variable): The input :attr:`num` is given num of the sequence. It is an int scalar, \
            or a tensor of shape [1] with type int32.
        dtype(string): The data type of output tensor, it could be 'float32' and 'float64'.
Z
zhoukunsheng 已提交
1259 1260

    Returns:
1261 1262 1263
        Variable, the output data type will be float32, float64.: The 1-D tensor with fixed number of evenly spaced values, \
        the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
        the value with input :attr:`start`. 
Z
zhoukunsheng 已提交
1264

Z
zhoukunsheng 已提交
1265
    Examples:
Z
zhoukunsheng 已提交
1266 1267
        .. code-block:: python

1268
             import paddle.fluid as fluid
Z
zhoukunsheng 已提交
1269 1270
             data = fluid.layers.linspace(0, 10, 5, 'float32') # [0.0,  2.5,  5.0,  7.5, 10.0]
             data = fluid.layers.linspace(0, 10, 1, 'float32') # [0.0]
Z
zhoukunsheng 已提交
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290

    """
    helper = LayerHelper("linspace", **locals())

    if not isinstance(start, Variable):
        start = fill_constant([1], dtype, start)
    if not isinstance(stop, Variable):
        stop = fill_constant([1], dtype, stop)
    if not isinstance(num, Variable):
        num = fill_constant([1], 'int32', num)

    out = helper.create_variable_for_type_inference(dtype=start.dtype)

    helper.append_op(
        type='linspace',
        inputs={'Start': start,
                'Stop': stop,
                'Num': num},
        outputs={'Out': [out]})
    return out
1291 1292


Z
zhoukunsheng 已提交
1293 1294
def zeros_like(x, out=None):
    """
1295
    This OP creates a zeros tensor which has identical shape and dtype 
Z
zhoukunsheng 已提交
1296 1297 1298
    with `x`.

    Args:
1299 1300 1301
        x(Variable): The input tensor which specifies shape and dtype, the input data dtype could be bool, float32, float64, int32, int64.
        out(Variable, optional): If is :attr:`None` , the op will create the variable as output, the data type and shape of \
            this variable will be same as input :attr:`x`. If is a tensor, the data type and shape need to be same as input :attr:`x`. 
T
tianshuo78520a 已提交
1302
            The default value is :attr:`None` .
Z
zhoukunsheng 已提交
1303 1304

    Returns:
1305 1306
        Variable: The N-D tensor, the element in tensor is related to input data type, if the input data type is bool, \
            the output value is False, otherwise is zero. The output shape is the same as the input.
Z
zhoukunsheng 已提交
1307 1308 1309 1310

    Examples:
        .. code-block:: python

1311
          import paddle.fluid as fluid
1312
          x = fluid.data(name='x', dtype='float32', shape=[3])
Z
zhoukunsheng 已提交
1313 1314
          data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]

Z
zhoukunsheng 已提交
1315 1316 1317 1318 1319 1320 1321 1322 1323
    """

    helper = LayerHelper("zeros_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]})
    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1324 1325 1326 1327


def diag(diagonal):
    """
1328
    This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Z
zhoukunsheng 已提交
1329 1330

    Args:
1331 1332
        diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
            specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Z
zhoukunsheng 已提交
1333 1334

    Returns:
1335 1336
        Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
            the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Z
zhoukunsheng 已提交
1337 1338 1339 1340 1341 1342 1343

    Examples:
        .. code-block:: python

          # [[3, 0, 0]
          #  [0, 4, 0]
          #  [0, 0, 5] 
1344 1345 1346

          import paddle.fluid as fluid
          import numpy as np
1347 1348 1349
          diagonal = np.arange(3, 6, dtype='int32')
          data = fluid.layers.diag(diagonal)
          # diagonal.shape=(3,) data.shape=(3, 3)
Z
zhoukunsheng 已提交
1350 1351

    """
1352 1353 1354
    check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag')
    check_dtype(diagonal.dtype, 'diagonal',
                ['float32', 'float64', 'int32', 'int64'], 'diag')
Z
zhoukunsheng 已提交
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
    helper = LayerHelper("diag", **locals())

    if not isinstance(diagonal, Variable):
        diagonal = assign(diagonal)

    out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)

    helper.append_op(
        type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]})

    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1367 1368


1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
def eye(num_rows, num_columns=None, batch_shape=None, dtype='float32'):
    """
    **eye**

    This function constructs an identity tensor, or a batch of tensor.

    Args:
        num_rows(int): the number of rows in each batch tensor.
        num_columns(int): the number of columns in each batch tensor.
                          If None, default: num_rows.
        batch_shape(list(int)): If provided, the returned tensor will have a leading
                                batch size of this shape.
1381 1382
        dtype(string): The data type of the returned tensor.
                       It should be int32, int64, float16, float32, float64.
1383 1384

    Returns:
1385
        Variable: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
1386 1387 1388 1389 1390

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
1391 1392
          data = fluid.layers.eye(3, dtype='int32')
          # [[1, 0, 0]
1393
          #  [0, 1, 0]
1394 1395
          #  [0, 0, 1]]

1396
          data = fluid.layers.eye(2, 3, dtype='int32')
1397
          # [[1, 0, 0]
1398
          #  [0, 1, 0]]
1399 1400

          data = fluid.layers.eye(2, batch_shape=[3])
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
          # Construct a batch of 3 identity tensors, each 2 x 2.
          # data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.

    """

    helper = LayerHelper("eye", **locals())
    if not isinstance(num_rows, int) or num_rows < 0:
        raise TypeError("num_rows should be a non-negative int")
    if num_columns is not None:
        if not isinstance(num_columns, int) or num_columns < 0:
            raise TypeError("num_columns should be a non-negative int")
    else:
        num_columns = num_rows
    out = helper.create_variable_for_type_inference(dtype=dtype)
    c_dtype = convert_np_dtype_to_dtype_(dtype)
    helper.append_op(
        type='eye',
        inputs={},
        outputs={'Out': [out]},
        attrs={
            'num_rows': num_rows,
            'num_columns': num_columns,
            'dtype': c_dtype
        },
        stop_gradient=True)
    out.stop_gradient = True

    if batch_shape is not None:
        if not isinstance(batch_shape, list):
            raise TypeError("batch_shape should be a list")
        from .nn import stack
        for batch_val in reversed(batch_shape):
            if batch_val <= 0:
                raise TypeError("batch_shape should be a positive int list")
            else:
                stack_vars = [out for _ in numpy.arange(batch_val)]
                out = stack(stack_vars, axis=0)
    return out


Z
zhoukunsheng 已提交
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
def ones_like(x, out=None):
    """
    **ones_like**

    This function creates a ones tensor which has identical shape and dtype 
    with `x`.

    Args:
        x(Variable): The input tensor which specifies shape and dtype.
        out(Variable): The output tensor.

    Returns:
1453
        out(Variable): The tensor variable storing the output.
Z
zhoukunsheng 已提交
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid

          x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
          data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]

    """

    helper = LayerHelper("ones_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type='fill_any_like',
        inputs={'X': [x]},
        attrs={'value': 1.0},
        outputs={'Out': [out]})
    return out